Merge branch 'develop' into zmq-targets

This commit is contained in:
Frank Dana
2019-10-30 05:01:24 -04:00
committed by GitHub
44 changed files with 1623 additions and 340 deletions

View File

@@ -24,7 +24,7 @@
# along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
################################################################################
cmake_minimum_required(VERSION 3.1...3.14 FATAL_ERROR)
cmake_minimum_required(VERSION 3.2...3.14 FATAL_ERROR)
message("\
-----------------------------------------------------------------
@@ -56,7 +56,7 @@ STRING(REGEX REPLACE "\-.*$" "" VERSION_NUM "${PROJECT_VERSION_FULL}")
PROJECT(libopenshot LANGUAGES C CXX VERSION ${VERSION_NUM})
message("
Generating build files for OpenShot
Generating build files for OpenShot with CMake ${CMAKE_VERSION}
Building ${PROJECT_NAME} (version ${PROJECT_VERSION})
SO/API/ABI Version: ${PROJECT_SO_VERSION}
")
@@ -115,5 +115,6 @@ install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/doc/html/
endif()
############# PROCESS tests/ DIRECTORY ##############
add_subdirectory(tests)
if(NOT DISABLE_TESTS)
add_subdirectory(tests)
endif()

View File

@@ -1,42 +1,82 @@
# vim: ts=2 sw=2
# - Try to find the required ffmpeg components(default: AVFORMAT, AVUTIL, AVCODEC)
#
# Once done this will define
# FFMPEG_FOUND - System has the all required components.
# FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers.
# FFMPEG_LIBRARIES - Link these to use the required ffmpeg components.
# FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
#
# For each of the components it will additionally set.
# - AVCODEC
# - AVDEVICE
# - AVFORMAT
# - AVFILTER
# - AVUTIL
# - POSTPROC
# - SWSCALE
# - SWRESAMPLE
# - AVRESAMPLE
# the following variables will be defined
# <component>_FOUND - System has <component>
# <component>_INCLUDE_DIRS - Include directory necessary for using the <component> headers
# <component>_LIBRARIES - Link these to use <component>
# <component>_DEFINITIONS - Compiler switches required for using <component>
# <component>_VERSION - The components version
#
# Copyright (c) 2006, Matthias Kretz, <kretz@kde.org>
# Copyright (c) 2008, Alexander Neundorf, <neundorf@kde.org>
# Copyright (c) 2011, Michael Jansen, <kde@michael-jansen.biz>
#
# Redistribution and use is allowed according to the terms of the BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
#[=======================================================================[.rst:
FindFFmpeg
----------
Try to find the requested ffmpeg components(default: avformat, avutil, avcodec)
IMPORTED targets
^^^^^^^^^^^^^^^^
This module defines :prop_tgt:`IMPORTED` targets ``FFmpeg:<component>`` for
each found component (see below).
Components
^^^^^^^^^^
The module recognizes the following components:
::
avcodec - target FFmpeg::avcodec
avdevice - target FFmpeg::avdevice
avformat - target FFmpeg::avformat
avfilter - target FFmpeg::avfilter
avutil - target FFmpeg::avutil
postproc - target FFmpeg::postproc
swscale - target FFmpeg::swscale
swresample - target FFmpeg::swresample
avresample - target FFmpeg::avresample
Result Variables
^^^^^^^^^^^^^^^^
This module defines the following variables:
::
FFMPEG_FOUND - System has the all required components.
FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers.
FFMPEG_LIBRARIES - Link these to use the required ffmpeg components.
FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
For each component, ``<component>_FOUND`` will be set if the component is available.
For each ``<component>_FOUND``, the following variables will be defined:
::
<component>_INCLUDE_DIRS - Include directory necessary for using the <component> headers
<component>_LIBRARIES - Link these to use <component>
<component>_DEFINITIONS - Compiler switches required for using <component>
<component>_VERSION - The components version
Backwards compatibility
^^^^^^^^^^^^^^^^^^^^^^^
For compatibility with previous versions of this module, uppercase names
for FFmpeg and for all components are also recognized, and all-uppercase
versions of the cache variables are also created.
Copyright (c) 2006, Matthias Kretz, <kretz@kde.org>
Copyright (c) 2008, Alexander Neundorf, <neundorf@kde.org>
Copyright (c) 2011, Michael Jansen, <kde@michael-jansen.biz>
Copyright (c) 2019, FeRD (Frank Dana) <ferdnyc@gmail.com>
Redistribution and use is allowed according to the terms of the BSD license.
For details see the accompanying COPYING-CMAKE-SCRIPTS file.
#]=======================================================================]
include(FindPackageHandleStandardArgs)
# The default components were taken from a survey over other FindFFMPEG.cmake files
if (NOT FFmpeg_FIND_COMPONENTS)
set(FFmpeg_FIND_COMPONENTS AVCODEC AVFORMAT AVUTIL)
set(FFmpeg_ALL_COMPONENTS avcodec avdevice avformat avfilter avutil postproc swscale swresample avresample)
# Default to all components, if not specified
if (FFMPEG_FIND_COMPONENTS AND NOT FFmpeg_FIND_COMPONENTS)
set(FFmpeg_FIND_COMPONENTS ${FFMPEG_FIND_COMPONENTS})
endif ()
if (NOT FFmpeg_FIND_COMPONENTS)
set(FFmpeg_FIND_COMPONENTS ${FFmpeg_ALL_COMPONENTS})
endif ()
#
### Macro: set_component_found
@@ -44,12 +84,14 @@ endif ()
# Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present.
#
macro(set_component_found _component )
if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS)
# message(STATUS " - ${_component} found.")
set(${_component}_FOUND TRUE)
else ()
# message(STATUS " - ${_component} not found.")
endif ()
if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS)
# message(STATUS "FFmpeg - ${_component} found.")
set(${_component}_FOUND TRUE)
else ()
if (NOT FFmpeg_FIND_QUIETLY AND NOT FFMPEG_FIND_QUIETLY)
message(STATUS "FFmpeg - ${_component} not found.")
endif ()
endif ()
endmacro()
#
@@ -60,102 +102,146 @@ endmacro()
#
macro(find_component _component _pkgconfig _library _header)
if (NOT WIN32)
# use pkg-config to get the directories and then use these values
# in the FIND_PATH() and FIND_LIBRARY() calls
find_package(PkgConfig)
if (PKG_CONFIG_FOUND)
pkg_check_modules(PC_${_component} ${_pkgconfig})
endif ()
endif (NOT WIN32)
if (NOT WIN32)
# use pkg-config to get the directories and then use these values
# in the FIND_PATH() and FIND_LIBRARY() calls
find_package(PkgConfig)
if (PKG_CONFIG_FOUND)
pkg_check_modules(PC_${_component} ${_pkgconfig})
endif ()
endif (NOT WIN32)
find_path(${_component}_INCLUDE_DIRS ${_header}
HINTS
/opt/
/opt/include/
${PC_LIB${_component}_INCLUDEDIR}
${PC_LIB${_component}_INCLUDE_DIRS}
$ENV{FFMPEGDIR}/include/
$ENV{FFMPEGDIR}/include/ffmpeg/
PATH_SUFFIXES
ffmpeg
)
find_path(${_component}_INCLUDE_DIRS ${_header}
HINTS
/opt/
/opt/include/
${PC_${_component}_INCLUDEDIR}
${PC_${_component}_INCLUDE_DIRS}
$ENV{FFMPEGDIR}/include/
$ENV{FFMPEGDIR}/include/ffmpeg/
PATH_SUFFIXES
ffmpeg
)
find_library(${_component}_LIBRARIES NAMES ${_library}
HINTS
${PC_LIB${_component}_LIBDIR}
${PC_LIB${_component}_LIBRARY_DIRS}
$ENV{FFMPEGDIR}/lib/
$ENV{FFMPEGDIR}/lib/ffmpeg/
$ENV{FFMPEGDIR}/bin/
)
find_library(${_component}_LIBRARIES NAMES ${_library}
HINTS
${PC_${_component}_LIBDIR}
${PC_${_component}_LIBRARY_DIRS}
$ENV{FFMPEGDIR}/lib/
$ENV{FFMPEGDIR}/lib/ffmpeg/
$ENV{FFMPEGDIR}/bin/
)
set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
set_component_found(${_component})
set_component_found(${_component})
mark_as_advanced(
${_component}_INCLUDE_DIRS
${_component}_LIBRARIES
${_component}_DEFINITIONS
${_component}_VERSION)
mark_as_advanced(
${_component}_INCLUDE_DIRS
${_component}_LIBRARIES
${_component}_DEFINITIONS
${_component}_VERSION
)
endmacro()
# Check for cached results. If there are skip the costly part.
if (NOT FFMPEG_LIBRARIES)
if (NOT FFmpeg_LIBRARIES)
# Check for all possible component.
find_component(AVCODEC libavcodec avcodec libavcodec/avcodec.h)
find_component(AVFORMAT libavformat avformat libavformat/avformat.h)
find_component(AVDEVICE libavdevice avdevice libavdevice/avdevice.h)
find_component(AVUTIL libavutil avutil libavutil/avutil.h)
find_component(AVFILTER libavfilter avfilter libavfilter/avfilter.h)
find_component(SWSCALE libswscale swscale libswscale/swscale.h)
find_component(POSTPROC libpostproc postproc libpostproc/postprocess.h)
find_component(SWRESAMPLE libswresample swresample libswresample/swresample.h)
find_component(AVRESAMPLE libavresample avresample libavresample/avresample.h)
# Check for all possible component.
find_component(avcodec libavcodec avcodec libavcodec/avcodec.h)
find_component(avdevice libavdevice avdevice libavdevice/avdevice.h)
find_component(avformat libavformat avformat libavformat/avformat.h)
find_component(avfilter libavfilter avfilter libavfilter/avfilter.h)
find_component(avutil libavutil avutil libavutil/avutil.h)
find_component(postproc libpostproc postproc libpostproc/postprocess.h)
find_component(swscale libswscale swscale libswscale/swscale.h)
find_component(swresample libswresample swresample libswresample/swresample.h)
find_component(avresample libavresample avresample libavresample/avresample.h)
else()
# Just set the noncached _FOUND vars for the components.
foreach(_component ${FFmpeg_ALL_COMPONENTS})
set_component_found(${_component})
endforeach ()
endif()
# Check if the required components were found and add their stuff to the FFMPEG_* vars.
foreach (_component ${FFmpeg_FIND_COMPONENTS})
if (${_component}_FOUND)
# message(STATUS "Required component ${_component} present.")
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} ${${_component}_LIBRARIES})
set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} ${${_component}_DEFINITIONS})
list(APPEND FFMPEG_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS})
else ()
# message(STATUS "Required component ${_component} missing.")
endif ()
endforeach ()
# Build the include path with duplicates removed.
if (FFMPEG_INCLUDE_DIRS)
list(REMOVE_DUPLICATES FFMPEG_INCLUDE_DIRS)
endif ()
# cache the vars.
set(FFMPEG_INCLUDE_DIRS ${FFMPEG_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE)
set(FFMPEG_LIBRARIES ${FFMPEG_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE)
set(FFMPEG_DEFINITIONS ${FFMPEG_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE)
mark_as_advanced(FFMPEG_INCLUDE_DIRS
FFMPEG_LIBRARIES
FFMPEG_DEFINITIONS)
endif ()
# Now set the noncached _FOUND vars for the components.
foreach (_component AVCODEC AVDEVICE AVFORMAT AVUTIL POSTPROCESS SWSCALE SWRESAMPLE AVRESAMPLE)
set_component_found(${_component})
# Check if the requested components were found and add their stuff to the FFmpeg_* vars.
foreach (_component ${FFmpeg_FIND_COMPONENTS})
string(TOLOWER "${_component}" _component)
if (${_component}_FOUND)
# message(STATUS "Requested component ${_component} present.")
set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} ${${_component}_LIBRARIES})
set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} ${${_component}_DEFINITIONS})
list(APPEND FFmpeg_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS})
else ()
# message(STATUS "Requested component ${_component} missing.")
endif ()
endforeach ()
# Build the include path with duplicates removed.
if (FFmpeg_INCLUDE_DIRS)
list(REMOVE_DUPLICATES FFmpeg_INCLUDE_DIRS)
endif ()
# cache the vars.
set(FFmpeg_INCLUDE_DIRS ${FFmpeg_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE)
set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE)
set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE)
mark_as_advanced(FFmpeg_INCLUDE_DIRS
FFmpeg_LIBRARIES
FFmpeg_DEFINITIONS)
# Backwards compatibility
foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS)
get_property(_help CACHE FFmpeg_${_suffix} PROPERTY HELPSTRING)
set(FFMPEG_${_suffix} ${FFmpeg_${_suffix}} CACHE STRING "${_help}" FORCE)
mark_as_advanced(FFMPEG_${_suffix})
endforeach()
foreach(_component ${FFmpeg_ALL_COMPONENTS})
if(${_component}_FOUND)
string(TOUPPER "${_component}" _uc_component)
set(${_uc_component}_FOUND TRUE)
foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS VERSION)
get_property(_help CACHE ${_component}_${_suffix} PROPERTY HELPSTRING)
set(${_uc_component}_${_suffix} ${${_component}_${_suffix}} CACHE STRING "${_help}" FORCE)
mark_as_advanced(${_uc_component}_${_suffix})
endforeach()
endif()
endforeach()
# Compile the list of required vars
set(_FFmpeg_REQUIRED_VARS FFMPEG_LIBRARIES FFMPEG_INCLUDE_DIRS)
set(_FFmpeg_REQUIRED_VARS FFmpeg_LIBRARIES FFmpeg_INCLUDE_DIRS)
foreach (_component ${FFmpeg_FIND_COMPONENTS})
list(APPEND _FFmpeg_REQUIRED_VARS ${_component}_LIBRARIES ${_component}_INCLUDE_DIRS)
list(APPEND _FFmpeg_REQUIRED_VARS
${_component}_LIBRARIES
${_component}_INCLUDE_DIRS)
endforeach ()
# Give a nice error message if some of the required vars are missing.
find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS})
# Export targets for each found component
foreach (_component ${FFmpeg_ALL_COMPONENTS})
if(${_component}_FOUND)
# message(STATUS "Creating IMPORTED target FFmpeg::${_component}")
if(NOT TARGET FFmpeg::${_component})
add_library(FFmpeg::${_component} UNKNOWN IMPORTED)
set_target_properties(FFmpeg::${_component} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${${_component}_INCLUDE_DIRS}")
set_property(TARGET FFmpeg::${_component} APPEND PROPERTY
INTERFACE_COMPILE_DEFINITIONS "${${_component}_DEFINITIONS}")
set_property(TARGET FFmpeg::${_component} APPEND PROPERTY
IMPORTED_LOCATION "${${_component}_LIBRARIES}")
endif()
endif()
endforeach()

View File

@@ -54,25 +54,25 @@ namespace openshot
* The <a href="http://www.juce.com/">JUCE</a> library cannot play audio directly from an AudioSampleBuffer, so this class exposes
* an AudioSampleBuffer as a AudioSource, so that JUCE can play the audio.
*/
class AudioBufferSource : public PositionableAudioSource
class AudioBufferSource : public juce::PositionableAudioSource
{
private:
int position;
int start;
bool repeat;
AudioSampleBuffer *buffer;
juce::AudioSampleBuffer *buffer;
public:
/// @brief Default constructor
/// @param audio_buffer This buffer contains the samples you want to play through JUCE.
AudioBufferSource(AudioSampleBuffer *audio_buffer);
AudioBufferSource(juce::AudioSampleBuffer *audio_buffer);
/// Destructor
~AudioBufferSource();
/// @brief Get the next block of audio samples
/// @param info This struct informs us of which samples are needed next.
void getNextAudioBlock (const AudioSourceChannelInfo& info);
void getNextAudioBlock (const juce::AudioSourceChannelInfo& info);
/// Prepare to play this audio source
void prepareToPlay(int, double);
@@ -82,13 +82,13 @@ namespace openshot
/// @brief Set the next read position of this source
/// @param newPosition The sample # to start reading from
void setNextReadPosition (int64 newPosition);
void setNextReadPosition (juce::int64 newPosition);
/// Get the next read position of this source
int64 getNextReadPosition() const;
juce::int64 getNextReadPosition() const;
/// Get the total length (in samples) of this audio source
int64 getTotalLength() const;
juce::int64 getTotalLength() const;
/// Determines if this audio source should repeat when it reaches the end
bool isLooping() const;
@@ -98,7 +98,7 @@ namespace openshot
void setLooping (bool shouldLoop);
/// Update the internal buffer used by this source
void setBuffer (AudioSampleBuffer *audio_buffer);
void setBuffer (juce::AudioSampleBuffer *audio_buffer);
};
}

View File

@@ -54,13 +54,13 @@ namespace openshot
*
* This allows any reader to play audio through JUCE (our audio framework).
*/
class AudioReaderSource : public PositionableAudioSource
class AudioReaderSource : public juce::PositionableAudioSource
{
private:
int position; /// The position of the audio source (index of buffer)
bool repeat; /// Repeat the audio source when finished
int size; /// The size of the internal buffer
AudioSampleBuffer *buffer; /// The audio sample buffer
juce::AudioSampleBuffer *buffer; /// The audio sample buffer
int speed; /// The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
ReaderBase *reader; /// The reader to pull samples from
@@ -90,7 +90,7 @@ namespace openshot
/// @brief Get the next block of audio samples
/// @param info This struct informs us of which samples are needed next.
void getNextAudioBlock (const AudioSourceChannelInfo& info);
void getNextAudioBlock (const juce::AudioSourceChannelInfo& info);
/// Prepare to play this audio source
void prepareToPlay(int, double);
@@ -100,13 +100,13 @@ namespace openshot
/// @brief Set the next read position of this source
/// @param newPosition The sample # to start reading from
void setNextReadPosition (int64 newPosition);
void setNextReadPosition (juce::int64 newPosition);
/// Get the next read position of this source
int64 getNextReadPosition() const;
juce::int64 getNextReadPosition() const;
/// Get the total length (in samples) of this audio source
int64 getTotalLength() const;
juce::int64 getTotalLength() const;
/// Determines if this audio source should repeat when it reaches the end
bool isLooping() const;
@@ -116,7 +116,7 @@ namespace openshot
void setLooping (bool shouldLoop);
/// Update the internal buffer used by this source
void setBuffer (AudioSampleBuffer *audio_buffer);
void setBuffer (juce::AudioSampleBuffer *audio_buffer);
const ReaderInfo & getReaderInfo() const { return reader->info; }

View File

@@ -55,11 +55,11 @@ namespace openshot {
*/
class AudioResampler {
private:
AudioSampleBuffer *buffer;
AudioSampleBuffer *resampled_buffer;
juce::AudioSampleBuffer *buffer;
juce::AudioSampleBuffer *resampled_buffer;
AudioBufferSource *buffer_source;
ResamplingAudioSource *resample_source;
AudioSourceChannelInfo resample_callback_buffer;
juce::ResamplingAudioSource *resample_source;
juce::AudioSourceChannelInfo resample_callback_buffer;
int num_of_samples;
int new_num_of_samples;
@@ -78,15 +78,15 @@ namespace openshot {
/// @param new_buffer The buffer of audio samples needing to be resampled
/// @param sample_rate The original sample rate of the buffered samples
/// @param new_sample_rate The requested sample rate you need
void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate);
void SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate);
/// @brief Sets the audio buffer and key settings
/// @param new_buffer The buffer of audio samples needing to be resampled
/// @param ratio The multiplier that needs to be applied to the sample rate (this is how resampling happens)
void SetBuffer(AudioSampleBuffer *new_buffer, double ratio);
void SetBuffer(juce::AudioSampleBuffer *new_buffer, double ratio);
/// Get the resampled audio buffer
AudioSampleBuffer* GetResampledBuffer();
juce::AudioSampleBuffer* GetResampledBuffer();
};
}

View File

@@ -53,7 +53,7 @@ namespace openshot {
int64_t max_bytes; ///< This is the max number of bytes to cache (0 = no limit)
/// Section lock for multiple threads
CriticalSection *cacheCriticalSection;
juce::CriticalSection *cacheCriticalSection;
public:

View File

@@ -103,15 +103,15 @@ namespace openshot {
class Clip : public ClipBase {
protected:
/// Section lock for multiple threads
CriticalSection getFrameCriticalSection;
juce::CriticalSection getFrameCriticalSection;
private:
bool waveform; ///< Should a waveform be used instead of the clip's image
list<EffectBase*> effects; ///<List of clips on this timeline
std::list<EffectBase*> effects; ///<List of clips on this timeline
// Audio resampler (if time mapping)
AudioResampler *resampler;
AudioSampleBuffer *audio_cache;
juce::AudioSampleBuffer *audio_cache;
// File Reader object
ReaderBase* reader;
@@ -127,7 +127,7 @@ namespace openshot {
std::shared_ptr<Frame> apply_effects(std::shared_ptr<Frame> frame);
/// Get file extension
string get_file_extension(string path);
std::string get_file_extension(std::string path);
/// Get a frame object or create a blank one
std::shared_ptr<Frame> GetOrCreateFrame(int64_t number);

View File

@@ -119,8 +119,8 @@ namespace openshot
std::shared_ptr<QImage> wave_image;
std::shared_ptr<juce::AudioSampleBuffer> audio;
std::shared_ptr<QApplication> previewApp;
CriticalSection addingImageSection;
CriticalSection addingAudioSection;
juce::CriticalSection addingImageSection;
juce::CriticalSection addingAudioSection;
const unsigned char *qbuffer;
Fraction pixel_ratio;
int channels;

View File

@@ -135,7 +135,9 @@
#include "PlayerBase.h"
#include "Point.h"
#include "Profiles.h"
#include "QtHtmlReader.h"
#include "QtImageReader.h"
#include "QtTextReader.h"
#include "Timeline.h"
#include "Settings.h"

145
include/QtHtmlReader.h Normal file
View File

@@ -0,0 +1,145 @@
/**
* @file
* @brief Header file for QtHtmlReader class
* @author Jonathan Thomas <jonathan@openshot.org>
* @author Sergei Kolesov (jediserg)
* @author Jeff Shillitto (jeffski)
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPENSHOT_QT_HTML_READER_H
#define OPENSHOT_QT_HTML_READER_H
#include "ReaderBase.h"
#include <cmath>
#include <ctime>
#include <iostream>
#include <omp.h>
#include <stdio.h>
#include <memory>
#include "CacheMemory.h"
#include "Enums.h"
#include "Exceptions.h"
class QImage;
namespace openshot
{
/**
* @brief This class uses Qt libraries, to create frames with rendered HTML, and return
* openshot::Frame objects.
*
* Supports HTML/CSS subset available via Qt libraries, see: https://doc.qt.io/qt-5/richtext-html-subset.html
*
* @code
* // Any application using this class must instantiate either QGuiApplication or QApplication
* QApplication a(argc, argv);
*
* // Create a reader to generate an openshot::Frame containing text
* QtHtmlReader r(720, // width
* 480, // height
* 5, // x_offset
* 5, // y_offset
* GRAVITY_CENTER, // gravity
* "<b>Check out</b> this Text!", // html
* "b { color: #ff0000 }", // css
* "#000000" // background_color
* );
* r.Open(); // Open the reader
*
* // Get frame number 1 from the video (in fact, any frame # you request will return the same frame)
* std::shared_ptr<Frame> f = r.GetFrame(1);
*
* // Now that we have an openshot::Frame object, lets have some fun!
* f->Display(); // Display the frame on the screen
*
* // Close the reader
* r.Close();
* @endcode
*/
class QtHtmlReader : public ReaderBase
{
private:
int width;
int height;
int x_offset;
int y_offset;
std::string html;
std::string css;
std::string background_color;
std::shared_ptr<QImage> image;
bool is_open;
openshot::GravityType gravity;
public:
/// Default constructor (blank text)
QtHtmlReader();
/// @brief Constructor for QtHtmlReader with all parameters.
/// @param width The width of the requested openshot::Frame (not the size of the text)
/// @param height The height of the requested openshot::Frame (not the size of the text)
/// @param x_offset The number of pixels to offset the text on the X axis (horizontal)
/// @param y_offset The number of pixels to offset the text on the Y axis (vertical)
/// @param gravity The alignment / gravity of the text
/// @param html The HTML you want to render / display
/// @param css The CSS you want to apply to style the HTML
/// @param background_color The background color of the frame image (valid values are a color string in #RRGGBB or #AARRGGBB notation, a CSS color name, or 'transparent')
QtHtmlReader(int width, int height, int x_offset, int y_offset, GravityType gravity, std::string html, std::string css, std::string background_color);
/// Close Reader
void Close();
/// Get the cache object used by this reader (always returns NULL for this object)
openshot::CacheMemory* GetCache() { return NULL; };
/// Get an openshot::Frame object for a specific frame number of this reader. All numbers
/// return the same Frame, since they all share the same image data.
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
std::shared_ptr<openshot::Frame> GetFrame(int64_t requested_frame);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
/// Return the type name of the class
std::string Name() { return "QtHtmlReader"; };
/// Get and Set JSON methods
std::string Json(); ///< Generate JSON string of this object
void SetJson(std::string value); ///< Load JSON string into this object
Json::Value JsonValue(); ///< Generate Json::JsonValue for this object
void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object
/// Open Reader - which is called by the constructor automatically
void Open();
};
}
#endif

159
include/QtTextReader.h Normal file
View File

@@ -0,0 +1,159 @@
/**
* @file
* @brief Header file for QtTextReader class
* @author Jonathan Thomas <jonathan@openshot.org>
* @author Sergei Kolesov (jediserg)
* @author Jeff Shillitto (jeffski)
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPENSHOT_QT_TEXT_READER_H
#define OPENSHOT_QT_TEXT_READER_H
#include "ReaderBase.h"
#include <cmath>
#include <ctime>
#include <iostream>
#include <omp.h>
#include <stdio.h>
#include <memory>
#include "CacheMemory.h"
#include "Enums.h"
#include "Exceptions.h"
class QImage;
namespace openshot
{
/**
* @brief This class uses Qt libraries, to create frames with "Text", and return
* openshot::Frame objects.
*
* All system fonts are supported, including many different font properties, such as size, color,
* alignment, padding, etc...
*
* @code
* // Any application using this class must instantiate either QGuiApplication or QApplication
* QApplication a(argc, argv);
*
* // Create a reader to generate an openshot::Frame containing text
* QtTextReader r(720, // width
* 480, // height
* 5, // x_offset
* 5, // y_offset
* GRAVITY_CENTER, // gravity
* "Check out this Text!", // text
* "Arial", // font
* 15.0, // font size
* "#fff000", // text_color
* "#000000" // background_color
* );
* r.Open(); // Open the reader
*
* // Get frame number 1 from the video (in fact, any frame # you request will return the same frame)
* std::shared_ptr<Frame> f = r.GetFrame(1);
*
* // Now that we have an openshot::Frame object, lets have some fun!
* f->Display(); // Display the frame on the screen
*
* // Close the reader
* r.Close();
* @endcode
*/
class QtTextReader : public ReaderBase
{
private:
int width;
int height;
int x_offset;
int y_offset;
std::string text;
QFont font;
std::string text_color;
std::string background_color;
std::string text_background_color;
std::shared_ptr<QImage> image;
bool is_open;
openshot::GravityType gravity;
public:
/// Default constructor (blank text)
QtTextReader();
/// @brief Constructor for QtTextReader with all parameters.
/// @param width The width of the requested openshot::Frame (not the size of the text)
/// @param height The height of the requested openshot::Frame (not the size of the text)
/// @param x_offset The number of pixels to offset the text on the X axis (horizontal)
/// @param y_offset The number of pixels to offset the text on the Y axis (vertical)
/// @param gravity The alignment / gravity of the text
/// @param text The text you want to generate / display
/// @param font The font of the text
/// @param font_size The size of the text
/// @param is_bold Set to true to make text bold
/// @param is_italic Set to true to make text italic
/// @param text_color The color of the text (valid values are a color string in #RRGGBB or #AARRGGBB notation or a CSS color name)
/// @param background_color The background color of the frame image (valid values are a color string in #RRGGBB or #AARRGGBB notation, a CSS color name, or 'transparent')
QtTextReader(int width, int height, int x_offset, int y_offset, GravityType gravity, std::string text, QFont font, std::string text_color, std::string background_color);
/// Draw a box under rendered text using the specified color.
/// @param color The background color behind the text (valid values are a color string in #RRGGBB or #AARRGGBB notation or a CSS color name)
void SetTextBackgroundColor(std::string color);
/// Close Reader
void Close();
/// Get the cache object used by this reader (always returns NULL for this object)
openshot::CacheMemory* GetCache() { return NULL; };
/// Get an openshot::Frame object for a specific frame number of this reader. All numbers
/// return the same Frame, since they all share the same image data.
///
/// @returns The requested frame (containing the image)
/// @param requested_frame The frame number that is requested.
std::shared_ptr<openshot::Frame> GetFrame(int64_t requested_frame);
/// Determine if reader is open or closed
bool IsOpen() { return is_open; };
/// Return the type name of the class
std::string Name() { return "QtTextReader"; };
/// Get and Set JSON methods
std::string Json(); ///< Generate JSON string of this object
void SetJson(string value); ///< Load JSON string into this object
Json::Value JsonValue(); ///< Generate Json::JsonValue for this object
void SetJsonValue(Json::Value root); ///< Load Json::JsonValue into this object
/// Open Reader - which is called by the constructor automatically
void Open();
};
}
#endif

View File

@@ -100,8 +100,8 @@ namespace openshot
{
protected:
/// Section lock for multiple threads
CriticalSection getFrameCriticalSection;
CriticalSection processingCriticalSection;
juce::CriticalSection getFrameCriticalSection;
juce::CriticalSection processingCriticalSection;
ClipBase* parent;
public:

View File

@@ -121,7 +121,7 @@ namespace openshot
TextReader(int width, int height, int x_offset, int y_offset, GravityType gravity, string text, string font, double size, string text_color, string background_color);
/// Draw a box under rendered text using the specified color.
/// @param text_background_color The background color behind the text
/// @param color The background color behind the text
void SetTextBackgroundColor(string color);
/// Close Reader

View File

@@ -55,7 +55,7 @@ namespace openshot {
*/
class ZmqLogger {
private:
CriticalSection loggerCriticalSection;
juce::CriticalSection loggerCriticalSection;
std::string connection;
// Logfile related vars

View File

@@ -34,7 +34,7 @@ using namespace std;
using namespace openshot;
// Default constructor
AudioBufferSource::AudioBufferSource(AudioSampleBuffer *audio_buffer)
AudioBufferSource::AudioBufferSource(juce::AudioSampleBuffer *audio_buffer)
: position(0), start(0), repeat(false), buffer(audio_buffer)
{ }
@@ -46,7 +46,7 @@ AudioBufferSource::~AudioBufferSource()
};
// Get the next block of audio samples
void AudioBufferSource::getNextAudioBlock (const AudioSourceChannelInfo& info)
void AudioBufferSource::getNextAudioBlock (const juce::AudioSourceChannelInfo& info)
{
int buffer_samples = buffer->getNumSamples();
int buffer_channels = buffer->getNumChannels();
@@ -98,7 +98,7 @@ void AudioBufferSource::prepareToPlay(int, double) { }
void AudioBufferSource::releaseResources() { }
// Set the next read position of this source
void AudioBufferSource::setNextReadPosition (int64 newPosition)
void AudioBufferSource::setNextReadPosition (juce::int64 newPosition)
{
// set position (if the new position is in range)
if (newPosition >= 0 && newPosition < buffer->getNumSamples())
@@ -106,14 +106,14 @@ void AudioBufferSource::setNextReadPosition (int64 newPosition)
}
// Get the next read position of this source
int64 AudioBufferSource::getNextReadPosition() const
juce::int64 AudioBufferSource::getNextReadPosition() const
{
// return the next read position
return position;
}
// Get the total length (in samples) of this audio source
int64 AudioBufferSource::getTotalLength() const
juce::int64 AudioBufferSource::getTotalLength() const
{
// Get the length
return buffer->getNumSamples();
@@ -134,7 +134,7 @@ void AudioBufferSource::setLooping (bool shouldLoop)
}
// Use a different AudioSampleBuffer for this source
void AudioBufferSource::setBuffer (AudioSampleBuffer *audio_buffer)
void AudioBufferSource::setBuffer (juce::AudioSampleBuffer *audio_buffer)
{
buffer = audio_buffer;
setNextReadPosition(0);

View File

@@ -152,7 +152,7 @@ juce::AudioSampleBuffer* AudioReaderSource::reverse_buffer(juce::AudioSampleBuff
ZmqLogger::Instance()->AppendDebugMethod("AudioReaderSource::reverse_buffer", "number_of_samples", number_of_samples, "channels", channels);
// Reverse array (create new buffer to hold the reversed version)
AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
juce::AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
reversed->clear();
for (int channel = 0; channel < channels; channel++)
@@ -177,7 +177,7 @@ juce::AudioSampleBuffer* AudioReaderSource::reverse_buffer(juce::AudioSampleBuff
}
// Get the next block of audio samples
void AudioReaderSource::getNextAudioBlock(const AudioSourceChannelInfo& info)
void AudioReaderSource::getNextAudioBlock(const juce::AudioSourceChannelInfo& info)
{
int buffer_samples = buffer->getNumSamples();
int buffer_channels = buffer->getNumChannels();
@@ -248,7 +248,7 @@ void AudioReaderSource::prepareToPlay(int, double) { }
void AudioReaderSource::releaseResources() { }
// Set the next read position of this source
void AudioReaderSource::setNextReadPosition (int64 newPosition)
void AudioReaderSource::setNextReadPosition (juce::int64 newPosition)
{
// set position (if the new position is in range)
if (newPosition >= 0 && newPosition < buffer->getNumSamples())
@@ -256,14 +256,14 @@ void AudioReaderSource::setNextReadPosition (int64 newPosition)
}
// Get the next read position of this source
int64 AudioReaderSource::getNextReadPosition() const
juce::int64 AudioReaderSource::getNextReadPosition() const
{
// return the next read position
return position;
}
// Get the total length (in samples) of this audio source
int64 AudioReaderSource::getTotalLength() const
juce::int64 AudioReaderSource::getTotalLength() const
{
// Get the length
if (reader)
@@ -287,7 +287,7 @@ void AudioReaderSource::setLooping (bool shouldLoop)
}
// Update the internal buffer used by this source
void AudioReaderSource::setBuffer (AudioSampleBuffer *audio_buffer)
void AudioReaderSource::setBuffer (juce::AudioSampleBuffer *audio_buffer)
{
buffer = audio_buffer;
setNextReadPosition(0);

View File

@@ -49,10 +49,10 @@ AudioResampler::AudioResampler()
buffer_source = new AudioBufferSource(buffer);
// Init resampling source
resample_source = new ResamplingAudioSource(buffer_source, false, 2);
resample_source = new juce::ResamplingAudioSource(buffer_source, false, 2);
// Init resampled buffer
resampled_buffer = new AudioSampleBuffer(2, 1);
resampled_buffer = new juce::AudioSampleBuffer(2, 1);
resampled_buffer->clear();
// Init callback buffer
@@ -74,7 +74,7 @@ AudioResampler::~AudioResampler()
}
// Sets the audio buffer and updates the key settings
void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
void AudioResampler::SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
{
if (sample_rate <= 0)
sample_rate = 44100;
@@ -89,7 +89,7 @@ void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate
}
// Sets the audio buffer and key settings
void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double ratio)
void AudioResampler::SetBuffer(juce::AudioSampleBuffer *new_buffer, double ratio)
{
// Update buffer & buffer source
buffer = new_buffer;
@@ -120,7 +120,7 @@ void AudioResampler::SetBuffer(AudioSampleBuffer *new_buffer, double ratio)
}
// Get the resampled audio buffer
AudioSampleBuffer* AudioResampler::GetResampledBuffer()
juce::AudioSampleBuffer* AudioResampler::GetResampledBuffer()
{
// Resample the current frame's audio buffer (into the temp callback buffer)
resample_source->getNextAudioBlock(resample_callback_buffer);

View File

@@ -29,13 +29,11 @@ include_directories(${OPENSHOT_INCLUDE_DIRS})
####### Display summary of options/dependencies ######
include(FeatureSummary)
#set_property(GLOBAL APPEND PROPERTY FeatureSummary_PKG_TYPES BUILD)
#find_package(FOO)
#set_package_properties(FOO PROPERTIES TYPE BUILD)
################ OPTIONS ##################
# Optional build settings for libopenshot
OPTION(USE_SYSTEM_JSONCPP "Use system installed JsonCpp" OFF)
option(ENABLE_IWYU "Enable 'Include What You Use' scanner (CMake 3.3+)" OFF)
################ WINDOWS ##################
# Set some compiler options for Windows
@@ -84,20 +82,6 @@ IF (ImageMagick_FOUND)
ENDIF (ImageMagick_FOUND)
################### FFMPEG #####################
# Find FFmpeg libraries (used for video encoding / decoding)
FIND_PACKAGE(FFmpeg REQUIRED)
foreach(ffmpeg_comp AVCODEC AVDEVICE AVFORMAT AVFILTER AVUTIL POSTPROC SWSCALE SWRESAMPLE AVRESAMPLE)
if(${ffmpeg_comp}_FOUND)
list(APPEND FF_INCLUDES ${${ffmpeg_comp}_INCLUDE_DIRS})
add_definitions(${${ffmpeg_comp}_DEFINITIONS})
list(APPEND FF_LIBRARIES ${${ffmpeg_comp}_LIBRARIES})
endif()
endforeach()
list(REMOVE_DUPLICATES FF_INCLUDES)
include_directories(${FF_INCLUDES})
################# LIBOPENSHOT-AUDIO ###################
# Find JUCE-based openshot Audio libraries
FIND_PACKAGE(OpenShotAudio 0.1.8 REQUIRED)
@@ -168,6 +152,26 @@ endif(USE_SYSTEM_JSONCPP)
#set(PROFILER "/usr/lib/libprofiler.so.0.3.2")
#set(PROFILER "/usr/lib/libtcmalloc.so.4")
if(CMAKE_VERSION VERSION_LESS 3.3)
# IWYU wasn't supported internally in 3.2
set(ENABLE_IWYU FALSE)
endif()
if(ENABLE_IWYU)
find_program(IWYU_PATH NAMES "iwyu"
DOC "include-what-you-use source code scanner executable")
if(IWYU_PATH)
if(IWYU_OPTS)
separate_arguments(IWYU_OPTS)
list(APPEND _iwyu_opts "-Xiwyu" ${IWYU_OPTS})
endif()
set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH} ${_iwyu_opts})
else()
set(ENABLE_IWYU FALSE)
endif()
endif()
add_feature_info("IWYU (include-what-you-use)" ENABLE_IWYU "Scan all source files with 'iwyu'")
#### GET LIST OF EFFECT FILES ####
FILE(GLOB EFFECT_FILES "${CMAKE_CURRENT_SOURCE_DIR}/effects/*.cpp")
@@ -211,6 +215,9 @@ SET ( OPENSHOT_SOURCE_FILES
QtPlayer.cpp
Settings.cpp
Timeline.cpp
QtTextReader.cpp
QtHtmlReader.cpp
# Qt Video Player
${QT_PLAYER_FILES}
@@ -260,6 +267,17 @@ set_target_properties(openshot
INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib"
)
################### FFMPEG #####################
# Find FFmpeg libraries (used for video encoding / decoding)
FIND_PACKAGE(FFmpeg REQUIRED COMPONENTS avcodec avdevice avformat avutil swscale)
foreach(ff_comp avcodec avdevice avformat avfilter avutil postproc swscale swresample avresample)
if(TARGET FFmpeg::${ff_comp})
target_link_libraries(openshot PUBLIC FFmpeg::${ff_comp})
endif()
endforeach()
################### OPENMP #####################
# Check for OpenMP (used for multi-core processing)
@@ -296,7 +314,6 @@ endif()
############### LINK LIBRARY #################
SET ( REQUIRED_LIBRARIES
${LIBOPENSHOT_AUDIO_LIBRARIES}
${FF_LIBRARIES}
${QT_LIBRARIES}
${PROFILER}
${JSONCPP_LIBRARY}
@@ -326,7 +343,7 @@ target_link_libraries(openshot PUBLIC ${REQUIRED_LIBRARIES})
# Pick up parameters from OpenMP target and propagate
target_link_libraries(openshot PUBLIC OpenMP::OpenMP_CXX)
############### CLI EXECUTABLE ################
############### CLI EXECUTABLES ################
# Create test executable
add_executable(openshot-example examples/Example.cpp)
@@ -341,6 +358,9 @@ target_compile_definitions(openshot-example PRIVATE
# Link test executable to the new library
target_link_libraries(openshot-example openshot)
add_executable(openshot-html-test examples/ExampleHtml.cpp)
target_link_libraries(openshot-html-test openshot Qt5::Gui)
############### PLAYER EXECUTABLE ################
# Create test executable
add_executable(openshot-player Qt/demo/main.cpp)

View File

@@ -367,7 +367,7 @@ void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
int channels = buffer->getNumChannels();
// Reverse array (create new buffer to hold the reversed version)
AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
juce::AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
reversed->clear();
for (int channel = 0; channel < channels; channel++)
@@ -399,7 +399,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
// Check for a valid time map curve
if (time.Values.size() > 1)
{
const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
const GenericScopedLock<juce::CriticalSection> lock(getFrameCriticalSection);
// create buffer and resampler
juce::AudioSampleBuffer *samples = NULL;
@@ -423,7 +423,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
if (time.GetRepeatFraction(frame_number).den > 1) {
// SLOWING DOWN AUDIO
// Resample data, and return new buffer pointer
AudioSampleBuffer *resampled_buffer = NULL;
juce::AudioSampleBuffer *resampled_buffer = NULL;
int resampled_buffer_size = 0;
// SLOW DOWN audio (split audio)
@@ -482,7 +482,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
delta_frame <= new_frame_number; delta_frame++) {
// buffer to hold detal samples
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
number_of_delta_samples);
delta_samples->clear();
@@ -526,7 +526,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
delta_frame >= new_frame_number; delta_frame--) {
// buffer to hold delta samples
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
number_of_delta_samples);
delta_samples->clear();
@@ -557,7 +557,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
resampler->SetBuffer(samples, float(start) / float(number_of_samples));
// Resample data, and return new buffer pointer
AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
juce::AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
int resampled_buffer_size = buffer->getNumSamples();
// Add the newly resized audio samples to the current frame

View File

@@ -172,59 +172,50 @@ void FFmpegWriter::SetVideoOptions(bool has_video, string codec, Fraction fps, i
AVCodec *new_codec;
// Check if the codec selected is a hardware accelerated codec
#if IS_FFMPEG_3_2
#if defined(__linux__)
if ( (strcmp(codec.c_str(),"h264_vaapi") == 0)) {
#if defined(__linux__)
if (strstr(codec.c_str(), "_vaapi") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_VAAPI;
hw_en_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
} else if (strstr(codec.c_str(), "_nvenc") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
} else {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 0;
hw_en_supported = 0;
}
else {
if ( (strcmp(codec.c_str(),"h264_nvenc") == 0)) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
}
else {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 0;
hw_en_supported = 0;
}
}
#elif defined(_WIN32)
if ( (strcmp(codec.c_str(),"h264_dxva2") == 0)) {
#elif defined(_WIN32)
if (strstr(codec.c_str(), "_dxva2") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_DXVA2_VLD;
hw_en_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
} else if (strstr(codec.c_str(), "_nvenc") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
} else {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 0;
hw_en_supported = 0;
}
else {
if ( (strcmp(codec.c_str(),"h264_nvenc") == 0)) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_CUDA;
hw_en_av_device_type = AV_HWDEVICE_TYPE_CUDA;
}
else {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 0;
hw_en_supported = 0;
}
}
#elif defined(__APPLE__)
if ( (strcmp(codec.c_str(),"h264_videotoolbox") == 0)) {
#elif defined(__APPLE__)
if (strstr(codec.c_str(), "_videotoolbox") != NULL) {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 1;
hw_en_supported = 1;
hw_en_av_pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX;
hw_en_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
}
else {
} else {
new_codec = avcodec_find_encoder_by_name(codec.c_str());
hw_en_on = 0;
hw_en_supported = 0;
@@ -350,7 +341,7 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value) {
// Was option found?
if (option || (name == "g" || name == "qmin" || name == "qmax" || name == "max_b_frames" || name == "mb_decision" ||
name == "level" || name == "profile" || name == "slices" || name == "rc_min_rate" || name == "rc_max_rate" ||
name == "crf")) {
name == "crf" || name == "cqp")) {
// Check for specific named options
if (name == "g")
// Set gop_size
@@ -396,7 +387,57 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value) {
// Buffer size
convert >> c->rc_buffer_size;
else if (name == "crf") {
else if (name == "cqp") {
// encode quality and special settings like lossless
// This might be better in an extra methods as more options
// and way to set quality are possible
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(55, 39, 101)
#if IS_FFMPEG_3_2
if (hw_en_on) {
av_opt_set_int(c->priv_data, "qp", min(stoi(value),63), 0); // 0-63
} else
#endif
{
switch (c->codec_id) {
#if (LIBAVCODEC_VERSION_MAJOR >= 58)
case AV_CODEC_ID_AV1 :
c->bit_rate = 0;
av_opt_set_int(c->priv_data, "qp", min(stoi(value),63), 0); // 0-63
break;
#endif
case AV_CODEC_ID_VP8 :
c->bit_rate = 10000000;
av_opt_set_int(c->priv_data, "qp", max(min(stoi(value), 63), 4), 0); // 4-63
break;
case AV_CODEC_ID_VP9 :
c->bit_rate = 0; // Must be zero!
av_opt_set_int(c->priv_data, "qp", min(stoi(value), 63), 0); // 0-63
if (stoi(value) == 0) {
av_opt_set(c->priv_data, "preset", "veryslow", 0);
av_opt_set_int(c->priv_data, "lossless", 1, 0);
}
break;
case AV_CODEC_ID_H264 :
av_opt_set_int(c->priv_data, "qp", min(stoi(value), 51), 0); // 0-51
if (stoi(value) == 0) {
av_opt_set(c->priv_data, "preset", "veryslow", 0);
}
break;
case AV_CODEC_ID_H265 :
av_opt_set_int(c->priv_data, "qp", min(stoi(value), 51), 0); // 0-51
if (stoi(value) == 0) {
av_opt_set(c->priv_data, "preset", "veryslow", 0);
av_opt_set_int(c->priv_data, "lossless", 1, 0);
}
break;
default:
// For all other codecs assume a range of 0-63
av_opt_set_int(c->priv_data, "qp", min(stoi(value), 63), 0); // 0-63
c->bit_rate = 0;
}
}
#endif
} else if (name == "crf") {
// encode quality and special settings like lossless
// This might be better in an extra methods as more options
// and way to set quality are possible
@@ -480,7 +521,7 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value) {
// write selfcontained fragmented file, minimum length of the fragment 8 sec; only for MOV, MP4
av_dict_set(&mux_dict, "movflags", "frag_keyframe", 0);
av_dict_set(&mux_dict, "min_frag_duration", "8000000", 0);
}
}
} else {
throw InvalidOptions("The option is not valid for this codec.", path);
}
@@ -543,6 +584,7 @@ void FFmpegWriter::WriteHeader() {
// Write the stream header
if (avformat_write_header(oc, &dict) != 0) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::WriteHeader (avformat_write_header)");
throw InvalidFile("Could not write header to file.", path);
};
@@ -1241,7 +1283,7 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) {
// Open the codec
if (avcodec_open2(audio_codec, codec, &opts) < 0)
throw InvalidCodec("Could not open codec", path);
throw InvalidCodec("Could not open audio codec", path);
AV_COPY_PARAMS_FROM_CONTEXT(st, audio_codec);
// Free options
@@ -1330,7 +1372,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
#elif defined(__APPLE__)
if( adapter_ptr != NULL ) {
#endif
ZmqLogger::Instance()->AppendDebugMethod("Encode Device present using device");
ZmqLogger::Instance()->AppendDebugMethod("Encode Device present using device", "adapter", adapter_num);
}
else {
adapter_ptr = NULL; // use default
@@ -1361,23 +1403,58 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
#if IS_FFMPEG_3_2
if (hw_en_on && hw_en_supported) {
video_codec->max_b_frames = 0; // At least this GPU doesn't support b-frames
video_codec->pix_fmt = hw_en_av_pix_fmt;
video_codec->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED;
av_opt_set(video_codec->priv_data,"preset","slow",0);
av_opt_set(video_codec->priv_data,"tune","zerolatency",0);
av_opt_set(video_codec->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN);
// for the list of possible options, see the list of codec-specific options:
// e.g. ffmpeg -h encoder=h264_vaapi or ffmpeg -h encoder=hevc_vaapi
// and "man ffmpeg-codecs"
// For VAAPI, it is safer to explicitly set rc_mode instead of relying on auto-selection
// which is ffmpeg version-specific.
if (hw_en_av_pix_fmt == AV_PIX_FMT_VAAPI) {
int64_t qp;
if (av_opt_get_int(video_codec->priv_data, "qp", 0, &qp) != 0 || qp == 0) {
// unless "qp" was set for CQP, switch to VBR RC mode
av_opt_set(video_codec->priv_data, "rc_mode", "VBR", 0);
// In the current state (ffmpeg-4.2-4 libva-mesa-driver-19.1.5-1) to use VBR,
// one has to specify both bit_rate and maxrate, otherwise a small low quality file is generated on Intel iGPU).
video_codec->rc_max_rate = video_codec->bit_rate;
}
}
switch (video_codec->codec_id) {
case AV_CODEC_ID_H264:
video_codec->max_b_frames = 0; // At least this GPU doesn't support b-frames
video_codec->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED;
av_opt_set(video_codec->priv_data, "preset", "slow", 0);
av_opt_set(video_codec->priv_data, "tune", "zerolatency", 0);
av_opt_set(video_codec->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN);
break;
case AV_CODEC_ID_HEVC:
// tested to work with defaults
break;
case AV_CODEC_ID_VP9:
// tested to work with defaults
break;
default:
ZmqLogger::Instance()->AppendDebugMethod("No codec-specific options defined for this codec. HW encoding may fail",
"codec_id", video_codec->codec_id);
break;
}
// set hw_frames_ctx for encoder's AVCodecContext
int err;
if ((err = set_hwframe_ctx(video_codec, hw_device_ctx, info.width, info.height)) < 0) {
fprintf(stderr, "Failed to set hwframe context.\n");
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video (set_hwframe_ctx) ERROR faled to set hwframe context",
"width", info.width, "height", info.height, av_err2str(err), -1);
}
}
#endif
/* open the codec */
if (avcodec_open2(video_codec, codec, &opts) < 0)
throw InvalidCodec("Could not open codec", path);
throw InvalidCodec("Could not open video codec", path);
AV_COPY_PARAMS_FROM_CONTEXT(st, video_codec);
// Free options

Some files were not shown because too many files have changed in this diff Show More