diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 58881d81..ea7c3ff1 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -6,35 +6,43 @@ jobs:
strategy:
matrix:
os: [ubuntu-18.04, ubuntu-20.04]
- compiler: [gcc, clang]
+ compiler:
+ - { cc: gcc, cpp: g++ }
+ - { cc: clang, cpp: clang++ }
env:
- CC: ${{ matrix.compiler }}
+ CC: ${{ matrix.compiler.cc }}
+ CXX: ${{ matrix.compiler.cpp }}
CODECOV_TOKEN: 'dc94d508-39d3-4369-b1c6-321749f96f7c'
steps:
- uses: actions/checkout@v2
- # Work around a codecov issue detecting commit SHAs
- # see: https://community.codecov.io/t/issue-detecting-commit-sha-please-run-actions-checkout-with-fetch-depth-1-or-set-to-0/2571
with:
+ # Work around a codecov issue detecting commit SHAs
+ # see: https://community.codecov.io/t/issue-detecting-commit-sha-please-run-actions-checkout-with-fetch-depth-1-or-set-to-0/2571
fetch-depth: 0
+ - name: Checkout OpenShotAudio
+ uses: actions/checkout@v2
+ with:
+ repository: OpenShot/libopenshot-audio
+ path: audio
+
- uses: haya14busa/action-cond@v1
id: coverage
with:
- cond: ${{ matrix.compiler == 'clang' }}
+ cond: ${{ matrix.compiler.cc == 'gcc' }}
if_true: "-DENABLE_COVERAGE:BOOL=1"
if_false: "-DENABLE_COVERAGE:BOOL=0"
- name: Install dependencies
shell: bash
run: |
- sudo add-apt-repository ppa:openshot.developers/libopenshot-daily
sudo apt update
sudo apt remove libzmq5 # See actions/virtual-environments#3317
sudo apt install \
cmake swig doxygen graphviz curl lcov \
- libopenshot-audio-dev libasound2-dev \
- qtbase5-dev qtbase5-dev-tools \
+ libasound2-dev \
+ qtbase5-dev qtbase5-dev-tools libqt5svg5-dev \
libfdk-aac-dev libavcodec-dev libavformat-dev libavdevice-dev libavutil-dev libavfilter-dev libswscale-dev libpostproc-dev libswresample-dev \
libzmq3-dev libmagick++-dev \
libopencv-dev libprotobuf-dev protobuf-compiler
@@ -43,13 +51,30 @@ jobs:
wget https://launchpad.net/ubuntu/+archive/primary/+files/catch2_2.13.0-1_all.deb
sudo dpkg -i catch2_2.13.0-1_all.deb
+ - uses: actions/cache@v2
+ id: cache
+ with:
+ path: audio/build
+ key: audio-${{ matrix.os }}-${{ matrix.compiler.cpp }}-${{ hashFiles('audio/CMakeLists.txt') }}
+
+ - name: Build OpenShotAudio (if not cached)
+ if: steps.cache.outputs.cache-hit != 'true'
+ shell: bash
+ run: |
+ pushd audio
+ if [ ! -d build ]; then
+ mkdir build
+ cmake -B build -S .
+ fi
+ cmake --build build
+ popd
- name: Build libopenshot
shell: bash
run: |
mkdir build
pushd build
- cmake -B . -S .. -DCMAKE_INSTALL_PREFIX:PATH="dist" -DCMAKE_BUILD_TYPE="Debug" "${{ steps.coverage.outputs.value }}"
+ cmake -B . -S .. -DCMAKE_INSTALL_PREFIX:PATH="dist" -DCMAKE_BUILD_TYPE="Debug" -DOpenShotAudio_ROOT="../audio/build" "${{ steps.coverage.outputs.value }}"
cmake --build . -- VERBOSE=1
popd
@@ -67,7 +92,7 @@ jobs:
cmake --build . --target install -- VERBOSE=1
popd
- - uses: codecov/codecov-action@v1
- if: ${{ matrix.compiler == 'clang' }}
+ - uses: codecov/codecov-action@v2.1.0
+ if: ${{ matrix.compiler.cc == 'gcc' }}
with:
file: build/coverage.info
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a3535ec9..9ed9de68 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -40,8 +40,8 @@ For more information, please visit .
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules")
################ PROJECT VERSION ####################
-set(PROJECT_VERSION_FULL "0.2.5-dev3")
-set(PROJECT_SO_VERSION 19)
+set(PROJECT_VERSION_FULL "0.2.7-dev")
+set(PROJECT_SO_VERSION 21)
# Remove the dash and anything following, to get the #.#.# version for project()
STRING(REGEX REPLACE "\-.*$" "" VERSION_NUM "${PROJECT_VERSION_FULL}")
@@ -99,6 +99,12 @@ if(ENABLE_TESTS)
set(BUILD_TESTING ${ENABLE_TESTS})
endif()
+### JUCE requires one of -DDEBUG or -DNDEBUG set on the
+### compile command line. CMake automatically sets -DNDEBUG
+### on all non-debug configs, so we'll just add -DDEBUG to
+### the debug build flags
+set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DDEBUG")
+
#### Work around a GCC < 9 bug with handling of _Pragma() in macros
#### See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
if ((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") AND
@@ -200,6 +206,7 @@ if(BUILD_TESTING)
if(VERBOSE_TESTS)
list(APPEND CTEST_OPTIONS "-VV")
endif()
+ list(APPEND CTEST_OPTIONS "--output-on-failure")
add_subdirectory(tests)
endif()
add_feature_info("Unit tests" ${BUILD_TESTING} "Compile unit tests for library functions")
@@ -211,6 +218,7 @@ if (ENABLE_COVERAGE AND DEFINED UNIT_TEST_TARGETS)
"examples/*"
"${CMAKE_CURRENT_BINARY_DIR}/bindings/*"
"${CMAKE_CURRENT_BINARY_DIR}/src/*_autogen/*"
+ "audio/*"
)
setup_target_for_coverage_lcov(
NAME coverage
diff --git a/bindings/python/CMakeLists.txt b/bindings/python/CMakeLists.txt
index 7d588921..8116ccfd 100644
--- a/bindings/python/CMakeLists.txt
+++ b/bindings/python/CMakeLists.txt
@@ -95,6 +95,12 @@ if (DEFINED _inc)
set_property(SOURCE openshot.i PROPERTY INCLUDE_DIRECTORIES ${_inc})
endif()
+### (FINALLY!)
+### Properly manage dependencies (regenerate bindings after changes)
+if (CMAKE_VERSION VERSION_GREATER 3.20)
+ set_property(SOURCE openshot.i PROPERTY USE_SWIG_DEPENDENCIES TRUE)
+endif()
+
### Add the SWIG interface file (which defines all the SWIG methods)
if (CMAKE_VERSION VERSION_LESS 3.8.0)
swig_add_module(pyopenshot python openshot.i)
diff --git a/bindings/ruby/CMakeLists.txt b/bindings/ruby/CMakeLists.txt
index b145494b..36f4a17d 100644
--- a/bindings/ruby/CMakeLists.txt
+++ b/bindings/ruby/CMakeLists.txt
@@ -111,6 +111,12 @@ if (DEFINED _inc)
set_property(SOURCE openshot.i PROPERTY INCLUDE_DIRECTORIES ${_inc})
endif()
+### (FINALLY!)
+### Properly manage dependencies (regenerate bindings after changes)
+if (CMAKE_VERSION VERSION_GREATER 3.20)
+ set_property(SOURCE openshot.i PROPERTY USE_SWIG_DEPENDENCIES TRUE)
+endif()
+
### Add the SWIG interface file (which defines all the SWIG methods)
if (CMAKE_VERSION VERSION_LESS 3.8.0)
swig_add_module(rbopenshot ruby openshot.i)
diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
index b6da9244..4f5093d7 100644
--- a/cmake/Modules/FindFFmpeg.cmake
+++ b/cmake/Modules/FindFFmpeg.cmake
@@ -34,21 +34,23 @@ This module defines the following variables:
::
- FFMPEG_FOUND - System has the all required components.
- FFMPEG_INCLUDE_DIRS - Include directory necessary for using the required components headers.
- FFMPEG_LIBRARIES - Link these to use the required ffmpeg components.
- FFMPEG_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
+ FFmpeg_FOUND - System has the all required components.
+ FFmpeg_INCLUDE_DIRS - Include directory necessary for using the required components headers.
+ FFmpeg_LIBRARIES - Link these to use the required ffmpeg components.
+ FFmpeg_DEFINITIONS - Compiler switches required for using the required ffmpeg components.
+ FFmpeg_VERSION - The FFmpeg package version found.
-For each component, ``_FOUND`` will be set if the component is available.
-
-For each ``_FOUND``, the following variables will be defined:
+For each component, ``FFmpeg__FOUND`` will be set if the component is available.
+
+For each ``FFmpeg__FOUND``, the following variables will be defined:
::
- _INCLUDE_DIRS - Include directory necessary for using the headers
- _LIBRARIES - Link these to use
- _DEFINITIONS - Compiler switches required for using
- _VERSION - The components version
+ FFmpeg__INCLUDE_DIRS - Include directory necessary for using the
+ headers
+ FFmpeg__LIBRARIES - Link these to use
+ FFmpeg__DEFINITIONS - Compiler switches required for
+ FFmpeg__VERSION - The components version
Backwards compatibility
^^^^^^^^^^^^^^^^^^^^^^^
@@ -57,10 +59,20 @@ For compatibility with previous versions of this module, uppercase names
for FFmpeg and for all components are also recognized, and all-uppercase
versions of the cache variables are also created.
+Revision history
+^^^^^^^^^^^^^^^^
+ca. 2019 - Create CMake targets for discovered components
+2019-06-25 - No longer probe for non-requested components
+ - Added fallback version.h parsing for components, when
+ pkgconfig is missing
+ - Added parsing of libavutil/ffversion.h for FFmpeg_VERSION
+ - Adopt standard FFmpeg__ variable names
+ - Switch to full signature for FPHSA, use HANDLE_COMPONENTS
+
Copyright (c) 2006, Matthias Kretz,
Copyright (c) 2008, Alexander Neundorf,
Copyright (c) 2011, Michael Jansen,
-Copyright (c) 2019, FeRD (Frank Dana)
+Copyright (c) 2019-2021, FeRD (Frank Dana)
Redistribution and use is allowed according to the terms of the BSD license.
For details see the accompanying COPYING-CMAKE-SCRIPTS file.
@@ -84,16 +96,44 @@ endif ()
# Marks the given component as found if both *_LIBRARIES AND *_INCLUDE_DIRS is present.
#
macro(set_component_found _component )
- if (${_component}_LIBRARIES AND ${_component}_INCLUDE_DIRS)
+ if (FFmpeg_${_component}_LIBRARIES AND FFmpeg_${_component}_INCLUDE_DIRS)
# message(STATUS "FFmpeg - ${_component} found.")
- set(${_component}_FOUND TRUE)
- else ()
- if (NOT FFmpeg_FIND_QUIETLY AND NOT FFMPEG_FIND_QUIETLY)
- message(STATUS "FFmpeg - ${_component} not found.")
- endif ()
+ set(FFmpeg_${_component}_FOUND TRUE)
+ #else ()
+ # if (NOT FFmpeg_FIND_QUIETLY AND NOT FFMPEG_FIND_QUIETLY)
+ # message(STATUS "FFmpeg - ${_component} not found.")
+ # endif ()
endif ()
endmacro()
+#
+### Macro: parse_lib_version
+#
+# Reads the file '${_pkgconfig}/version.h' in the component's _INCLUDE_DIR,
+# and parses #define statements for COMPONENT_VERSION_(MAJOR|MINOR|PATCH)
+# into a dotted string ${_component}_VERSION.
+#
+# Needed if the version is not supplied via pkgconfig's PC_${_component}_VERSION
+macro(parse_lib_version _component _libname )
+ set(_version_h "${FFmpeg_${_component}_INCLUDE_DIRS}/${_libname}/version.h")
+ if(EXISTS "${_version_h}")
+ #message(STATUS "Parsing ${_component} version from ${_version_h}")
+ string(TOUPPER "${_libname}" _prefix)
+ set(_parts)
+ foreach(_lvl MAJOR MINOR MICRO)
+ file(STRINGS "${_version_h}" _lvl_version
+ REGEX "^[ \t]*#define[ \t]+${_prefix}_VERSION_${_lvl}[ \t]+[0-9]+[ \t]*$")
+ string(REGEX REPLACE
+ "^.*${_prefix}_VERSION_${_lvl}[ \t]+([0-9]+)[ \t]*$"
+ "\\1"
+ _lvl_match "${_lvl_version}")
+ list(APPEND _parts "${_lvl_match}")
+ endforeach()
+ list(JOIN _parts "." FFmpeg_${_component}_VERSION)
+ message(STATUS "Found ${_component} version: ${FFmpeg_${_component}_VERSION}")
+ endif()
+endmacro()
+
#
### Macro: find_component
#
@@ -109,9 +149,9 @@ macro(find_component _component _pkgconfig _library _header)
if (PKG_CONFIG_FOUND)
pkg_check_modules(PC_${_component} ${_pkgconfig})
endif ()
- endif (NOT WIN32)
+ endif()
- find_path(${_component}_INCLUDE_DIRS ${_header}
+ find_path(FFmpeg_${_component}_INCLUDE_DIRS ${_header}
HINTS
/opt/
/opt/include/
@@ -123,7 +163,7 @@ macro(find_component _component _pkgconfig _library _header)
ffmpeg
)
- find_library(${_component}_LIBRARIES NAMES ${_library}
+ find_library(FFmpeg_${_component}_LIBRARIES NAMES ${_library}
HINTS
${PC_${_component}_LIBDIR}
${PC_${_component}_LIBRARY_DIRS}
@@ -132,56 +172,86 @@ macro(find_component _component _pkgconfig _library _header)
$ENV{FFMPEGDIR}/bin/
)
- set(${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
- set(${_component}_VERSION ${PC_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
+ set(FFmpeg_${_component}_DEFINITIONS ${PC_${_component}_CFLAGS_OTHER} CACHE STRING "The ${_component} CFLAGS.")
+
+ # Take version from PkgConfig, or parse from its version.h header
+ if (PC_${_component}_VERSION)
+ set(FFmpeg_${_component}_VERSION ${PC_${_component}_VERSION})
+ else()
+ parse_lib_version(${_component} ${_pkgconfig})
+ endif()
+
+ set(FFmpeg_${_component}_VERSION ${FFmpeg_${_component}_VERSION} CACHE STRING "The ${_component} version number.")
set_component_found(${_component})
mark_as_advanced(
- ${_component}_INCLUDE_DIRS
- ${_component}_LIBRARIES
- ${_component}_DEFINITIONS
- ${_component}_VERSION
+ FFmpeg_${_component}_INCLUDE_DIRS
+ FFmpeg_${_component}_LIBRARIES
+ FFmpeg_${_component}_DEFINITIONS
+ FFmpeg_${_component}_VERSION
)
endmacro()
+#
+### Macro: parse_ff_version
+#
+# Read the libavutil/ffversion.h file and extract the definition
+# for FFMPEG_VERSION, to use as our version string.
+macro (parse_ff_version)
+ set(_header "${FFmpeg_avutil_INCLUDE_DIRS}/libavutil/ffversion.h")
+ if(EXISTS "${_header}")
+ #message(STATUS "Parsing ffmpeg version from ${_header}")
+ file(STRINGS "${_header}" _version_def
+ REGEX "^#define[ \t]+FFMPEG_VERSION[ \t]+\".*\"[ \t]*$")
+ string(REGEX REPLACE
+ "^.*FFMPEG_VERSION[ \t]+\"(.*)\".*$"
+ "\\1"
+ FFmpeg_VERSION "${_version_def}")
+ #message(STATUS "Found FFmpeg version: ${FFmpeg_VERSION}")
+ endif()
+endmacro()
-# Check for cached results. If there are skip the costly part.
-if (NOT FFmpeg_LIBRARIES)
+# Configs for all possible component.
+set(avcodec_params libavcodec avcodec libavcodec/avcodec.h)
+set(avdevice_params libavdevice avdevice libavdevice/avdevice.h)
+set(avformat_params libavformat avformat libavformat/avformat.h)
+set(avfilter_params libavfilter avfilter libavfilter/avfilter.h)
+set(avutil_params libavutil avutil libavutil/avutil.h)
+set(postproc_params libpostproc postproc libpostproc/postprocess.h)
+set(swscale_params libswscale swscale libswscale/swscale.h)
+set(swresample_params libswresample swresample libswresample/swresample.h)
+set(avresample_params libavresample avresample libavresample/avresample.h)
- # Check for all possible component.
- find_component(avcodec libavcodec avcodec libavcodec/avcodec.h)
- find_component(avdevice libavdevice avdevice libavdevice/avdevice.h)
- find_component(avformat libavformat avformat libavformat/avformat.h)
- find_component(avfilter libavfilter avfilter libavfilter/avfilter.h)
- find_component(avutil libavutil avutil libavutil/avutil.h)
- find_component(postproc libpostproc postproc libpostproc/postprocess.h)
- find_component(swscale libswscale swscale libswscale/swscale.h)
- find_component(swresample libswresample swresample libswresample/swresample.h)
- find_component(avresample libavresample avresample libavresample/avresample.h)
-else()
- # Just set the noncached _FOUND vars for the components.
- foreach(_component ${FFmpeg_ALL_COMPONENTS})
- set_component_found(${_component})
- endforeach ()
-endif()
-
-# Check if the requested components were found and add their stuff to the FFmpeg_* vars.
-foreach (_component ${FFmpeg_FIND_COMPONENTS})
+# Gather configs for each requested component
+foreach(_component ${FFmpeg_FIND_COMPONENTS})
string(TOLOWER "${_component}" _component)
- if (${_component}_FOUND)
+ # Only probe if not already _FOUND (expensive)
+ if (NOT FFmpeg_${_component}_FOUND)
+ find_component(${_component} ${${_component}_params})
+ endif()
+
+ # Add the component's configs to the FFmpeg_* variables
+ if (FFmpeg_${_component}_FOUND)
# message(STATUS "Requested component ${_component} present.")
- set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} ${${_component}_LIBRARIES})
- set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} ${${_component}_DEFINITIONS})
- list(APPEND FFmpeg_INCLUDE_DIRS ${${_component}_INCLUDE_DIRS})
+ set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} ${FFmpeg_${_component}_LIBRARIES})
+ set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} ${FFmpeg_${_component}_DEFINITIONS})
+ list(APPEND FFmpeg_INCLUDE_DIRS ${FFmpeg_${_component}_INCLUDE_DIRS})
else ()
# message(STATUS "Requested component ${_component} missing.")
endif ()
-endforeach ()
+endforeach()
+
+# Make sure we've probed for avutil
+if (NOT FFmpeg_avutil_FOUND)
+ find_component(avutil libavutil avutil libavutil/avutil.h)
+endif()
+# Get the overall FFmpeg version from libavutil/ffversion.h
+parse_ff_version()
# Build the result lists with duplicates removed, in case of repeated
-# invocations.
+# invocations or component redundancy.
if (FFmpeg_INCLUDE_DIRS)
list(REMOVE_DUPLICATES FFmpeg_INCLUDE_DIRS)
endif()
@@ -196,57 +266,68 @@ endif ()
set(FFmpeg_INCLUDE_DIRS ${FFmpeg_INCLUDE_DIRS} CACHE STRING "The FFmpeg include directories." FORCE)
set(FFmpeg_LIBRARIES ${FFmpeg_LIBRARIES} CACHE STRING "The FFmpeg libraries." FORCE)
set(FFmpeg_DEFINITIONS ${FFmpeg_DEFINITIONS} CACHE STRING "The FFmpeg cflags." FORCE)
+set(FFmpeg_VERSION ${FFmpeg_VERSION} CACHE STRING "The overall FFmpeg version.")
-mark_as_advanced(FFmpeg_INCLUDE_DIRS
+mark_as_advanced(
+ FFmpeg_INCLUDE_DIRS
FFmpeg_LIBRARIES
- FFmpeg_DEFINITIONS)
+ FFmpeg_DEFINITIONS
+ FFmpeg_VERSION
+)
# Backwards compatibility
-foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS)
+foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS VERSION)
get_property(_help CACHE FFmpeg_${_suffix} PROPERTY HELPSTRING)
set(FFMPEG_${_suffix} ${FFmpeg_${_suffix}} CACHE STRING "${_help}" FORCE)
mark_as_advanced(FFMPEG_${_suffix})
endforeach()
foreach(_component ${FFmpeg_ALL_COMPONENTS})
- if(${_component}_FOUND)
+ if(FFmpeg_${_component}_FOUND)
string(TOUPPER "${_component}" _uc_component)
- set(${_uc_component}_FOUND TRUE)
+ set(FFMPEG_${_uc_component}_FOUND TRUE)
foreach(_suffix INCLUDE_DIRS LIBRARIES DEFINITIONS VERSION)
- get_property(_help CACHE ${_component}_${_suffix} PROPERTY HELPSTRING)
- set(${_uc_component}_${_suffix} ${${_component}_${_suffix}} CACHE STRING "${_help}" FORCE)
- mark_as_advanced(${_uc_component}_${_suffix})
+ get_property(_help CACHE FFmpeg_${_component}_${_suffix} PROPERTY HELPSTRING)
+ set(FFMPEG_${_uc_component}_${_suffix} ${FFmpeg_${_component}_${_suffix}} CACHE STRING "${_help}" FORCE)
+ mark_as_advanced(FFMPEG_${_uc_component}_${_suffix})
endforeach()
endif()
endforeach()
# Compile the list of required vars
set(_FFmpeg_REQUIRED_VARS FFmpeg_LIBRARIES FFmpeg_INCLUDE_DIRS)
-foreach (_component ${FFmpeg_FIND_COMPONENTS})
- list(APPEND _FFmpeg_REQUIRED_VARS
- ${_component}_LIBRARIES
- ${_component}_INCLUDE_DIRS)
-endforeach ()
+# XXX: HANDLE_COMPONENTS should take care of this, maybe? -FeRD
+# foreach (_component ${FFmpeg_FIND_COMPONENTS})
+# list(APPEND _FFmpeg_REQUIRED_VARS
+# FFmpeg_${_component}_LIBRARIES
+# FFmpeg_${_component}_INCLUDE_DIRS)
+# endforeach ()
# Give a nice error message if some of the required vars are missing.
-find_package_handle_standard_args(FFmpeg DEFAULT_MSG ${_FFmpeg_REQUIRED_VARS})
+find_package_handle_standard_args(FFmpeg
+ REQUIRED_VARS ${_FFmpeg_REQUIRED_VARS}
+ VERSION_VAR FFmpeg_VERSION
+ HANDLE_COMPONENTS
+)
# Export targets for each found component
-foreach (_component ${FFmpeg_ALL_COMPONENTS})
+foreach (_component ${FFmpeg_FIND_COMPONENTS})
- if(${_component}_FOUND)
- # message(STATUS "Creating IMPORTED target FFmpeg::${_component}")
+ if(FFmpeg_${_component}_FOUND)
+ #message(STATUS "Creating IMPORTED target FFmpeg::${_component}")
if(NOT TARGET FFmpeg::${_component})
add_library(FFmpeg::${_component} UNKNOWN IMPORTED)
set_target_properties(FFmpeg::${_component} PROPERTIES
- INTERFACE_INCLUDE_DIRECTORIES "${${_component}_INCLUDE_DIRS}")
+ INTERFACE_INCLUDE_DIRECTORIES
+ "${FFmpeg_${_component}_INCLUDE_DIRS}")
set_property(TARGET FFmpeg::${_component} APPEND PROPERTY
- INTERFACE_COMPILE_DEFINITIONS "${${_component}_DEFINITIONS}")
+ INTERFACE_COMPILE_DEFINITIONS
+ "${FFmpeg_${_component}_DEFINITIONS}")
set_property(TARGET FFmpeg::${_component} APPEND PROPERTY
- IMPORTED_LOCATION "${${_component}_LIBRARIES}")
+ IMPORTED_LOCATION "${FFmpeg_${_component}_LIBRARIES}")
endif()
endif()
diff --git a/src/AudioReaderSource.cpp b/src/AudioReaderSource.cpp
index acd69deb..19fbb6ca 100644
--- a/src/AudioReaderSource.cpp
+++ b/src/AudioReaderSource.cpp
@@ -35,21 +35,25 @@ using namespace std;
using namespace openshot;
// Constructor that reads samples from a reader
-AudioReaderSource::AudioReaderSource(ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size)
- : reader(audio_reader), frame_number(starting_frame_number),
- size(buffer_size), position(0), frame_position(0), estimated_frame(0), speed(1) {
-
- // Initialize an audio buffer (based on reader)
- buffer = new juce::AudioSampleBuffer(reader->info.channels, size);
-
- // initialize the audio samples to zero (silence)
+AudioReaderSource::AudioReaderSource(
+ ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size
+) :
+ position(0),
+ size(buffer_size),
+ buffer(new juce::AudioSampleBuffer(audio_reader->info.channels, buffer_size)),
+ speed(1),
+ reader(audio_reader),
+ frame_number(starting_frame_number),
+ frame_position(0),
+ estimated_frame(0)
+{
+ // Zero the buffer contents
buffer->clear();
}
// Destructor
AudioReaderSource::~AudioReaderSource()
{
- // Clear and delete the buffer
delete buffer;
buffer = NULL;
}
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 46663664..e43c1c3a 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -130,7 +130,17 @@ set(EFFECTS_SOURCES
effects/Pixelate.cpp
effects/Saturation.cpp
effects/Shift.cpp
- effects/Wave.cpp)
+ effects/Wave.cpp
+ audio_effects/STFT.cpp
+ audio_effects/Noise.cpp
+ audio_effects/Delay.cpp
+ audio_effects/Echo.cpp
+ audio_effects/Distortion.cpp
+ audio_effects/ParametricEQ.cpp
+ audio_effects/Compressor.cpp
+ audio_effects/Expander.cpp
+ audio_effects/Robotization.cpp
+ audio_effects/Whisperization.cpp)
# Qt video player components
set(QT_PLAYER_SOURCES
@@ -186,56 +196,57 @@ target_link_libraries(openshot PUBLIC OpenShot::Audio)
###
# Find the ImageMagick++ library
-find_package(ImageMagick COMPONENTS Magick++ MagickCore)
+if (ENABLE_MAGICK)
+ find_package(ImageMagick COMPONENTS Magick++ MagickCore)
-if(ImageMagick_FOUND)
- if(NOT TARGET ImageMagick::Magick++ AND NOT TARGET Magick++_TARGET)
- add_library(Magick++_TARGET INTERFACE)
+ if(ImageMagick_FOUND)
+ if(NOT TARGET ImageMagick::Magick++ AND NOT TARGET Magick++_TARGET)
+ add_library(Magick++_TARGET INTERFACE)
- # Include ImageMagick++ headers (needed for compile)
- set_property(TARGET Magick++_TARGET APPEND PROPERTY
- INTERFACE_INCLUDE_DIRECTORIES ${ImageMagick_INCLUDE_DIRS})
+ # Include ImageMagick++ headers (needed for compile)
+ set_property(TARGET Magick++_TARGET APPEND PROPERTY
+ INTERFACE_INCLUDE_DIRECTORIES ${ImageMagick_INCLUDE_DIRS})
+
+ # Set the Quantum Depth that ImageMagick was built with (default to 16 bits)
+ if(NOT DEFINED MAGICKCORE_QUANTUM_DEPTH)
+ set(MAGICKCORE_QUANTUM_DEPTH 16)
+ endif()
+ if(NOT DEFINED MAGICKCORE_HDRI_ENABLE)
+ set(MAGICKCORE_HDRI_ENABLE 0)
+ endif()
+
+ set_property(TARGET Magick++_TARGET APPEND PROPERTY
+ INTERFACE_COMPILE_DEFINITIONS
+ MAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH})
+ set_property(TARGET Magick++_TARGET APPEND PROPERTY
+ INTERFACE_COMPILE_DEFINITIONS
+ MAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE})
+
+ target_link_libraries(Magick++_TARGET INTERFACE
+ ${ImageMagick_LIBRARIES})
+
+ # Alias to our namespaced name
+ add_library(ImageMagick::Magick++ ALIAS Magick++_TARGET)
- # Set the Quantum Depth that ImageMagick was built with (default to 16 bits)
- if(NOT DEFINED MAGICKCORE_QUANTUM_DEPTH)
- set(MAGICKCORE_QUANTUM_DEPTH 16)
- endif()
- if(NOT DEFINED MAGICKCORE_HDRI_ENABLE)
- set(MAGICKCORE_HDRI_ENABLE 0)
endif()
- set_property(TARGET Magick++_TARGET APPEND PROPERTY
- INTERFACE_COMPILE_DEFINITIONS
- MAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH})
- set_property(TARGET Magick++_TARGET APPEND PROPERTY
- INTERFACE_COMPILE_DEFINITIONS
- MAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE})
+ # Add optional ImageMagic-dependent sources
+ target_sources(openshot PRIVATE
+ ImageReader.cpp
+ ImageWriter.cpp
+ TextReader.cpp)
- target_link_libraries(Magick++_TARGET INTERFACE
- ${ImageMagick_LIBRARIES})
+ # define a preprocessor macro (used in the C++ source)
+ target_compile_definitions(openshot PUBLIC USE_IMAGEMAGICK=1)
- # Alias to our namespaced name
- add_library(ImageMagick::Magick++ ALIAS Magick++_TARGET)
+ # Link with ImageMagick library
+ target_link_libraries(openshot PUBLIC ImageMagick::Magick++)
+ set(HAVE_IMAGEMAGICK TRUE CACHE BOOL "Building with ImageMagick support" FORCE)
+ mark_as_advanced(HAVE_IMAGEMAGICK)
endif()
-
- # Add optional ImageMagic-dependent sources
- target_sources(openshot PRIVATE
- ImageReader.cpp
- ImageWriter.cpp
- TextReader.cpp)
-
- # define a preprocessor macro (used in the C++ source)
- target_compile_definitions(openshot PUBLIC USE_IMAGEMAGICK=1)
-
- # Link with ImageMagick library
- target_link_libraries(openshot PUBLIC ImageMagick::Magick++)
-
- set(HAVE_IMAGEMAGICK TRUE CACHE BOOL "Building with ImageMagick support" FORCE)
- mark_as_advanced(HAVE_IMAGEMAGICK)
endif()
-
################### JSONCPP #####################
# Include jsoncpp headers (needed for JSON parsing)
if (USE_SYSTEM_JSONCPP)
@@ -273,7 +284,7 @@ endif ()
################# QT5 ###################
# Find QT5 libraries
-set(_qt_components Core Gui Widgets)
+set(_qt_components Core Gui Widgets Svg)
find_package(Qt5 COMPONENTS ${_qt_components} REQUIRED)
foreach(_qt_comp IN LISTS _qt_components)
@@ -288,31 +299,45 @@ mark_as_advanced(QT_VERSION_STR)
################### FFMPEG #####################
# Find FFmpeg libraries (used for video encoding / decoding)
-find_package(FFmpeg REQUIRED COMPONENTS avcodec avformat avutil swscale)
+find_package(FFmpeg REQUIRED
+ COMPONENTS avcodec avformat avutil swscale
+ OPTIONAL_COMPONENTS swresample avresample
+)
set(all_comps avcodec avformat avutil swscale)
-if(TARGET FFmpeg::swresample)
- list(APPEND all_comps swresample)
-else()
- list(APPEND all_comps avresample)
-endif()
set(version_comps avcodec avformat avutil)
+# Pick a resampler. Prefer swresample if possible
+if(TARGET FFmpeg::swresample AND ${FFmpeg_avformat_VERSION} VERSION_GREATER "57.0.0")
+ set(resample_lib swresample)
+ set(USE_SW TRUE)
+else()
+ set(resample_lib avresample)
+ set(USE_SW FALSE)
+endif()
+list(APPEND all_comps ${resample_lib})
+
foreach(ff_comp IN LISTS all_comps)
if(TARGET FFmpeg::${ff_comp})
target_link_libraries(openshot PUBLIC FFmpeg::${ff_comp})
# Keep track of some FFmpeg lib versions, to embed in our version header
- if(${ff_comp} IN_LIST version_comps AND ${ff_comp}_VERSION)
+ if(${ff_comp} IN_LIST version_comps AND FFmpeg_${ff_comp}_VERSION)
string(TOUPPER ${ff_comp} v_name)
- set(${v_name}_VERSION_STR ${${ff_comp}_VERSION} CACHE STRING "${ff_comp} version used" FORCE)
+ set(${v_name}_VERSION_STR ${FFmpeg_${ff_comp}_VERSION} CACHE STRING "${ff_comp} version used" FORCE)
mark_as_advanced(${v_name}_VERSION_STR)
endif()
endif()
endforeach()
+# Indicate which resampler we linked with, and set a config header flag
+add_feature_info("FFmpeg ${resample_lib}" TRUE "Audio resampling uses ${resample_lib}")
+# Set the appropriate flag in OpenShotVersion.h
+set(FFMPEG_USE_SWRESAMPLE ${USE_SW} CACHE BOOL "libswresample used for audio resampling" FORCE)
+mark_as_advanced(FFMPEG_USE_SWRESAMPLE)
+
# Version check for hardware-acceleration code
-if(USE_HW_ACCEL AND avcodec_VERSION)
- if(${avcodec_VERSION} VERSION_GREATER 57.107.100)
+if(USE_HW_ACCEL AND FFmpeg_avcodec_VERSION)
+ if(${FFmpeg_avcodec_VERSION} VERSION_GREATER "57.106")
set(HAVE_HW_ACCEL TRUE)
endif()
endif()
@@ -537,4 +562,3 @@ endif()
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Jonathan Thomas") #required
include(CPack)
-
diff --git a/src/CVTracker.cpp b/src/CVTracker.cpp
index b4fb85ea..b3891674 100644
--- a/src/CVTracker.cpp
+++ b/src/CVTracker.cpp
@@ -207,7 +207,7 @@ cv::Rect2d CVTracker::filter_box_jitter(size_t frameId){
float curr_box_height = bbox.height;
// keep the last width and height if the difference is less than 1%
float threshold = 0.01;
-
+
cv::Rect2d filtered_box = bbox;
if(std::abs(1-(curr_box_width/last_box_width)) <= threshold){
filtered_box.width = last_box_width;
@@ -299,13 +299,13 @@ void CVTracker::SetJson(const std::string value) {
// Load Json::Value into this object
void CVTracker::SetJsonValue(const Json::Value root) {
- // Set data from Json (if key is found)
- if (!root["protobuf_data_path"].isNull()){
- protobuf_data_path = (root["protobuf_data_path"].asString());
- }
+ // Set data from Json (if key is found)
+ if (!root["protobuf_data_path"].isNull()){
+ protobuf_data_path = (root["protobuf_data_path"].asString());
+ }
if (!root["tracker-type"].isNull()){
- trackerType = (root["tracker-type"].asString());
- }
+ trackerType = (root["tracker-type"].asString());
+ }
if (!root["region"].isNull()){
double x = root["region"]["normalized_x"].asDouble();
@@ -314,20 +314,22 @@ void CVTracker::SetJsonValue(const Json::Value root) {
double h = root["region"]["normalized_height"].asDouble();
cv::Rect2d prev_bbox(x,y,w,h);
bbox = prev_bbox;
+
+ if (!root["region"]["first-frame"].isNull()){
+ start = root["region"]["first-frame"].asInt64();
+ json_interval = true;
+ }
+ else{
+ processingController->SetError(true, "No first-frame");
+ error = true;
+ }
+
}
else{
processingController->SetError(true, "No initial bounding box selected");
error = true;
}
- if (!root["region"]["first-frame"].isNull()){
- start = root["region"]["first-frame"].asInt64();
- json_interval = true;
- }
- else{
- processingController->SetError(true, "No first-frame");
- error = true;
- }
}
/*
diff --git a/src/Clip.cpp b/src/Clip.cpp
index 8caaf836..35d0fba1 100644
--- a/src/Clip.cpp
+++ b/src/Clip.cpp
@@ -121,12 +121,8 @@ void Clip::init_reader_settings() {
// Init reader's rotation (if any)
void Clip::init_reader_rotation() {
- // Only init rotation from reader when needed
- if (rotation.GetCount() > 1)
- // Do nothing if more than 1 rotation Point
- return;
- else if (rotation.GetCount() == 1 && rotation.GetValue(1) != 0.0)
- // Do nothing if 1 Point, and it's not the default value
+ // Dont init rotation if clip has keyframes
+ if (rotation.GetCount() > 0)
return;
// Init rotation
@@ -1384,13 +1380,9 @@ QTransform Clip::get_transform(std::shared_ptr frame, int width, int heig
break;
}
case (SCALE_NONE): {
- // Calculate ratio of source size to project size
- // Even with no scaling, previews need to be adjusted correctly
- // (otherwise NONE scaling draws the frame image outside of the preview)
- float source_width_ratio = source_size.width() / float(width);
- float source_height_ratio = source_size.height() / float(height);
- source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio);
-
+ // Image is already the original size (i.e. no scaling mode) relative
+ // to the preview window size (i.e. timeline / preview ratio). No further
+ // scaling is needed here.
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
diff --git a/src/ClipBase.h b/src/ClipBase.h
index c38b9790..2f138a5c 100644
--- a/src/ClipBase.h
+++ b/src/ClipBase.h
@@ -68,15 +68,13 @@ namespace openshot {
CacheMemory cache;
/// Constructor for the base clip
- ClipBase() {
- // Initialize values
- position = 0.0;
- layer = 0;
- start = 0.0;
- end = 0.0;
- previous_properties = "";
- timeline = NULL;
- };
+ ClipBase() :
+ position(0.0),
+ layer(0),
+ start(0.0),
+ end(0.0),
+ previous_properties(""),
+ timeline(nullptr) {}
// Compare a clip using the Position() property
bool operator< ( ClipBase& a) { return (Position() < a.Position()); }
diff --git a/src/Color.cpp b/src/Color.cpp
index e848f1f7..c877cbd4 100644
--- a/src/Color.cpp
+++ b/src/Color.cpp
@@ -28,6 +28,8 @@
* along with OpenShot Library. If not, see .
*/
+#include
+
#include "Color.h"
#include "Exceptions.h"
diff --git a/src/Coordinate.cpp b/src/Coordinate.cpp
index 6a4abb21..d2a2ed3c 100644
--- a/src/Coordinate.cpp
+++ b/src/Coordinate.cpp
@@ -34,14 +34,14 @@
using namespace openshot;
// Default constructor for a coordinate, delegating to the full signature
-Coordinate::Coordinate() : Coordinate::Coordinate(0, 0) {};
+Coordinate::Coordinate() : Coordinate::Coordinate(0, 0) {}
// Constructor which also allows the user to set the X and Y
-Coordinate::Coordinate(double x, double y) : X(x), Y(y) {};
+Coordinate::Coordinate(double x, double y) : X(x), Y(y) {}
// Constructor which accepts a std::pair for (X, Y)
Coordinate::Coordinate(const std::pair& co)
- : X(co.first), Y(co.second) {};
+ : X(co.first), Y(co.second) {}
// Generate JSON string of this object
std::string Coordinate::Json() const {
diff --git a/src/Coordinate.h b/src/Coordinate.h
index 0a3ba978..d4576cd2 100644
--- a/src/Coordinate.h
+++ b/src/Coordinate.h
@@ -37,43 +37,57 @@
namespace openshot {
- /**
- * @brief This class represents a Cartesian coordinate (X, Y) used in the Keyframe animation system.
- *
- * Animation involves the changing (i.e. interpolation) of numbers over time. A series of Coordinate
- * objects allows us to plot a specific curve or line used during interpolation. In other words, it helps us
- * control how a number changes over time (quickly or slowly).
- *
- * Please see the following Example Code:
- * \code
- * Coordinate c1(2,4);
- * assert(c1.X == 2.0f);
- * assert(c1.Y == 4.0f);
- * \endcode
- */
- class Coordinate {
- public:
- double X; ///< The X value of the coordinate (usually representing the frame #)
- double Y; ///< The Y value of the coordinate (usually representing the value of the property being animated)
+/**
+ * @brief A Cartesian coordinate (X, Y) used in the Keyframe animation system.
+ *
+ * Animation involves the changing (i.e. interpolation) of numbers over time.
+ * A series of Coordinate objects allows us to plot a specific curve or line
+ * used during interpolation. In other words, it helps us control how a
+ * value changes over time — whether it's increasing or decreasing
+ * (the direction of the slope) and how quickly (the steepness of the curve).
+ *
+ * Please see the following Example Code:
+ * \code
+ * Coordinate c1(2,4);
+ * assert(c1.X == 2.0f);
+ * assert(c1.Y == 4.0f);
+ * \endcode
+ */
+class Coordinate {
+public:
+ double X; ///< The X value of the coordinate (usually representing the frame #)
+ double Y; ///< The Y value of the coordinate (usually representing the value of the property being animated)
- /// The default constructor, which defaults to (0,0)
- Coordinate();
+ /// The default constructor, which defaults to (0,0)
+ Coordinate();
- /// @brief Constructor which also sets the X and Y
- /// @param x The X coordinate (usually representing the frame #)
- /// @param y The Y coordinate (usually representing the value of the property being animated)
- Coordinate(double x, double y);
+ /// @brief Constructor which also sets the X and Y
+ /// @param x The X coordinate (usually representing the frame #)
+ /// @param y The Y coordinate (usually representing the value of the property being animated)
+ Coordinate(double x, double y);
- /// @brief Constructor which accepts a std::pair tuple for {X, Y}
- /// @param co A std::pair tuple containing (X, Y)
- Coordinate(const std::pair& co);
+ /// @brief Constructor which accepts a std::pair tuple for {X, Y}
+ /// @param co A std::pair tuple containing (X, Y)
+ Coordinate(const std::pair& co);
- // Get and Set JSON methods
- std::string Json() const; ///< Generate JSON string of this object
- Json::Value JsonValue() const; ///< Generate Json::Value for this object
- void SetJson(const std::string value); ///< Load JSON string into this object
- void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
- };
+ // Get and Set JSON methods
+ std::string Json() const; ///< Generate JSON string of this object
+ Json::Value JsonValue() const; ///< Generate Json::Value for this object
+ void SetJson(const std::string value); ///< Load JSON string into this object
+ void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
+};
+
+/// Stream output operator for openshot::Coordinate
+template
+std::basic_ostream&
+operator<<(std::basic_ostream& o, const openshot::Coordinate& co) {
+ std::basic_ostringstream s;
+ s.flags(o.flags());
+ s.imbue(o.getloc());
+ s.precision(o.precision());
+ s << "(" << co.X << ", " << co.Y << ")";
+ return o << s.str();
+}
}
diff --git a/src/EffectBase.cpp b/src/EffectBase.cpp
index b75a0820..a475c7eb 100644
--- a/src/EffectBase.cpp
+++ b/src/EffectBase.cpp
@@ -28,6 +28,9 @@
* along with OpenShot Library. If not, see .
*/
+#include
+#include
+
#include "EffectBase.h"
#include "Exceptions.h"
@@ -57,16 +60,16 @@ void EffectBase::InitEffectInfo()
}
// Display file information
-void EffectBase::DisplayInfo() {
- std::cout << std::fixed << std::setprecision(2) << std::boolalpha;
- std::cout << "----------------------------" << std::endl;
- std::cout << "----- Effect Information -----" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--> Name: " << info.name << std::endl;
- std::cout << "--> Description: " << info.description << std::endl;
- std::cout << "--> Has Video: " << info.has_video << std::endl;
- std::cout << "--> Has Audio: " << info.has_audio << std::endl;
- std::cout << "----------------------------" << std::endl;
+void EffectBase::DisplayInfo(std::ostream* out) {
+ *out << std::fixed << std::setprecision(2) << std::boolalpha;
+ *out << "----------------------------" << std::endl;
+ *out << "----- Effect Information -----" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--> Name: " << info.name << std::endl;
+ *out << "--> Description: " << info.description << std::endl;
+ *out << "--> Has Video: " << info.has_video << std::endl;
+ *out << "--> Has Audio: " << info.has_audio << std::endl;
+ *out << "----------------------------" << std::endl;
}
// Constrain a color value from 0 to 255
diff --git a/src/EffectBase.h b/src/EffectBase.h
index dc78a7c9..05936500 100644
--- a/src/EffectBase.h
+++ b/src/EffectBase.h
@@ -87,7 +87,7 @@ namespace openshot
EffectInfoStruct info;
/// Display effect information in the standard output stream (stdout)
- void DisplayInfo();
+ void DisplayInfo(std::ostream* out=&std::cout);
/// Constrain a color value from 0 to 255
int constrain(int color_value);
diff --git a/src/EffectInfo.cpp b/src/EffectInfo.cpp
index fb292dde..c6011fbc 100644
--- a/src/EffectInfo.cpp
+++ b/src/EffectInfo.cpp
@@ -88,6 +88,33 @@ EffectBase* EffectInfo::CreateEffect(std::string effect_type) {
else if (effect_type == "Wave")
return new Wave();
+ else if(effect_type == "Noise")
+ return new Noise();
+
+ else if(effect_type == "Delay")
+ return new Delay();
+
+ else if(effect_type == "Echo")
+ return new Echo();
+
+ else if(effect_type == "Distortion")
+ return new Distortion();
+
+ else if(effect_type == "ParametricEQ")
+ return new ParametricEQ();
+
+ else if(effect_type == "Compressor")
+ return new Compressor();
+
+ else if(effect_type == "Expander")
+ return new Expander();
+
+ else if(effect_type == "Robotization")
+ return new Robotization();
+
+ else if(effect_type == "Whisperization")
+ return new Whisperization();
+
#ifdef USE_OPENCV
else if(effect_type == "Stabilizer")
return new Stabilizer();
@@ -124,6 +151,16 @@ Json::Value EffectInfo::JsonValue() {
root.append(Saturation().JsonInfo());
root.append(Shift().JsonInfo());
root.append(Wave().JsonInfo());
+ /* Audio */
+ root.append(Noise().JsonInfo());
+ root.append(Delay().JsonInfo());
+ root.append(Echo().JsonInfo());
+ root.append(Distortion().JsonInfo());
+ root.append(ParametricEQ().JsonInfo());
+ root.append(Compressor().JsonInfo());
+ root.append(Expander().JsonInfo());
+ root.append(Robotization().JsonInfo());
+ root.append(Whisperization().JsonInfo());
#ifdef USE_OPENCV
root.append(Stabilizer().JsonInfo());
diff --git a/src/Effects.h b/src/Effects.h
index e4abc958..c0da0741 100644
--- a/src/Effects.h
+++ b/src/Effects.h
@@ -48,6 +48,18 @@
#include "effects/Shift.h"
#include "effects/Wave.h"
+/* Audio Effects */
+#include "audio_effects/Noise.h"
+#include "audio_effects/Delay.h"
+#include "audio_effects/Echo.h"
+#include "audio_effects/Distortion.h"
+#include "audio_effects/ParametricEQ.h"
+#include "audio_effects/Compressor.h"
+#include "audio_effects/Expander.h"
+#include "audio_effects/Robotization.h"
+#include "audio_effects/Whisperization.h"
+
+/* OpenCV Effects */
#ifdef USE_OPENCV
#include "effects/ObjectDetection.h"
#include "effects/Tracker.h"
diff --git a/src/Enums.h b/src/Enums.h
index 387191ea..601b2de9 100644
--- a/src/Enums.h
+++ b/src/Enums.h
@@ -80,5 +80,58 @@ namespace openshot
VOLUME_MIX_AVERAGE, ///< Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%
VOLUME_MIX_REDUCE ///< Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
};
+
+
+ /// This enumeration determines the distortion type of Distortion Effect.
+ enum DistortionType
+ {
+ HARD_CLIPPING,
+ SOFT_CLIPPING,
+ EXPONENTIAL,
+ FULL_WAVE_RECTIFIER,
+ HALF_WAVE_RECTIFIER,
+ };
+
+ /// This enumeration determines the filter type of ParametricEQ Effect.
+ enum FilterType
+ {
+ LOW_PASS,
+ HIGH_PASS,
+ LOW_SHELF,
+ HIGH_SHELF,
+ BAND_PASS,
+ BAND_STOP,
+ PEAKING_NOTCH,
+ };
+
+ /// This enumeration determines the FFT size.
+ enum FFTSize
+ {
+ FFT_SIZE_32,
+ FFT_SIZE_64,
+ FFT_SIZE_128,
+ FFT_SIZE_256,
+ FFT_SIZE_512,
+ FFT_SIZE_1024,
+ FFT_SIZE_2048,
+ FFT_SIZE_4096,
+ FFT_SIZE_8192,
+ };
+
+ /// This enumeration determines the hop size.
+ enum HopSize {
+ HOP_SIZE_2,
+ HOP_SIZE_4,
+ HOP_SIZE_8,
+ };
+
+ /// This enumeration determines the window type.
+ enum WindowType {
+ RECTANGULAR,
+ BART_LETT,
+ HANN,
+ HAMMING,
+ };
+
}
#endif
diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
index b97d7345..c1eaa748 100644
--- a/src/FFmpegReader.cpp
+++ b/src/FFmpegReader.cpp
@@ -33,6 +33,7 @@
#include "FFmpegReader.h"
#include "Exceptions.h"
+#include "Timeline.h"
#include // for std::this_thread::sleep_for
#include // for std::chrono::milliseconds
@@ -768,12 +769,12 @@ void FFmpegReader::UpdateVideoInfo() {
// Check for valid duration (if found)
if (info.duration <= 0.0f && pFormatCtx->duration >= 0)
// Use the format's duration
- info.duration = pFormatCtx->duration / AV_TIME_BASE;
+ info.duration = float(pFormatCtx->duration) / AV_TIME_BASE;
// Calculate duration from filesize and bitrate (if any)
if (info.duration <= 0.0f && info.video_bit_rate > 0 && info.file_size > 0)
// Estimate from bitrate, total bytes, and framerate
- info.duration = (info.file_size / info.video_bit_rate);
+ info.duration = float(info.file_size) / info.video_bit_rate;
// No duration found in stream of file
if (info.duration <= 0.0f) {
@@ -1285,9 +1286,18 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
}
} else {
- // No scaling, use original image size (slower)
- max_width = info.width;
- max_height = info.height;
+ // Scale video to equivalent unscaled size
+ // Since the preview window can change sizes, we want to always
+ // scale against the ratio of original video size to timeline size
+ float preview_ratio = 1.0;
+ if (parent->ParentTimeline()) {
+ Timeline *t = (Timeline *) parent->ParentTimeline();
+ preview_ratio = t->preview_width / float(t->info.width);
+ }
+ float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
+ float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
+ max_width = info.width * max_scale_x * preview_ratio;
+ max_height = info.height * max_scale_y * preview_ratio;
}
}
diff --git a/src/FFmpegUtilities.h b/src/FFmpegUtilities.h
index fe46a212..42358eed 100644
--- a/src/FFmpegUtilities.h
+++ b/src/FFmpegUtilities.h
@@ -31,269 +31,302 @@
#ifndef OPENSHOT_FFMPEG_UTILITIES_H
#define OPENSHOT_FFMPEG_UTILITIES_H
- // Required for libavformat to build on Windows
- #ifndef INT64_C
- #define INT64_C(c) (c ## LL)
- #define UINT64_C(c) (c ## ULL)
- #endif
-
- #ifndef IS_FFMPEG_3_2
- #define IS_FFMPEG_3_2 (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 64, 101))
- #endif
-
- #ifndef USE_HW_ACCEL
- #define USE_HW_ACCEL (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 107, 100))
- #endif
-
- // Include the FFmpeg headers
- extern "C" {
- #include
- #include
- #if (LIBAVFORMAT_VERSION_MAJOR >= 57)
- #include //PM
- #endif
- #include
- // Change this to the first version swrescale works
- #if (LIBAVFORMAT_VERSION_MAJOR >= 57)
- #define USE_SW
- #endif
- #ifdef USE_SW
- #include
- #else
- #include
- #endif
- #include
- #include
- #include
-
- // libavutil changed folders at some point
- #if LIBAVFORMAT_VERSION_MAJOR >= 53
- #include
- #else
- #include
- #endif
-
- // channel header refactored
- #if LIBAVFORMAT_VERSION_MAJOR >= 54
- #include
- #endif
-
- #if IS_FFMPEG_3_2
- #include "libavutil/imgutils.h"
- #endif
- }
-
- // This was removed from newer versions of FFmpeg (but still used in libopenshot)
- #ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE
- #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
- #endif
- #ifndef AV_ERROR_MAX_STRING_SIZE
- #define AV_ERROR_MAX_STRING_SIZE 64
- #endif
- #ifndef AUDIO_PACKET_ENCODING_SIZE
- #define AUDIO_PACKET_ENCODING_SIZE 768000 // 48khz * S16 (2 bytes) * max channels (8)
- #endif
-
- // This wraps an unsafe C macro to be C++ compatible function
- inline static const std::string av_make_error_string(int errnum)
- {
- char errbuf[AV_ERROR_MAX_STRING_SIZE];
- av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE);
- return (std::string)errbuf;
- }
-
- // Redefine the C macro to use our new C++ function
- #undef av_err2str
- #define av_err2str(errnum) av_make_error_string(errnum).c_str()
-
- // Define this for compatibility
- #ifndef PixelFormat
- #define PixelFormat AVPixelFormat
- #endif
- #ifndef PIX_FMT_RGBA
- #define PIX_FMT_RGBA AV_PIX_FMT_RGBA
- #endif
- #ifndef PIX_FMT_NONE
- #define PIX_FMT_NONE AV_PIX_FMT_NONE
- #endif
- #ifndef PIX_FMT_RGB24
- #define PIX_FMT_RGB24 AV_PIX_FMT_RGB24
- #endif
- #ifndef PIX_FMT_YUV420P
- #define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
- #endif
- #ifndef PIX_FMT_YUV444P
- #define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P
- #endif
-
- // Does ffmpeg pixel format contain an alpha channel?
- inline static const bool ffmpeg_has_alpha(PixelFormat pix_fmt) {
- const AVPixFmtDescriptor *fmt_desc = av_pix_fmt_desc_get(pix_fmt);
- return bool(fmt_desc->flags & AV_PIX_FMT_FLAG_ALPHA);
- }
-
- // FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's
- // definition in ruby/config.h, so we move it to FF_RSHIFT
- #ifdef RSHIFT
- #define FF_RSHIFT(a, b) RSHIFT(a, b)
- #undef RSHIFT
- #endif
-
- #ifdef USE_SW
- #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
- swr_convert(ctx, out, out_count, (const uint8_t **)in, in_count)
- #define SWR_ALLOC() swr_alloc()
- #define SWR_CLOSE(ctx) {}
- #define SWR_FREE(ctx) swr_free(ctx)
- #define SWR_INIT(ctx) swr_init(ctx)
- #define SWRCONTEXT SwrContext
- #else
- #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
- avresample_convert(ctx, out, linesize, out_count, (uint8_t **)in, linesize2, in_count)
- #define SWR_ALLOC() avresample_alloc_context()
- #define SWR_CLOSE(ctx) avresample_close(ctx)
- #define SWR_FREE(ctx) avresample_free(ctx)
- #define SWR_INIT(ctx) avresample_open(ctx)
- #define SWRCONTEXT AVAudioResampleContext
- #endif
-
-
- #if (LIBAVFORMAT_VERSION_MAJOR >= 58)
- #define AV_REGISTER_ALL
- #define AVCODEC_REGISTER_ALL
- #define AV_FILENAME url
- #define AV_SET_FILENAME(oc, f) oc->AV_FILENAME = av_strdup(f)
- #define MY_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE
- #define AV_ALLOCATE_FRAME() av_frame_alloc()
- #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
- #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
- #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
- #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
- #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context)
- #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type
- #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id
- #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \
- ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \
- avcodec_parameters_to_context(context, av_stream->codecpar); \
- context; })
- #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec;
- #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in)
- #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar
- #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format
- #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format
- #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1)
- #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1)
- #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path)
- #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
- #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec);
- #define AV_FORMAT_NEW_STREAM(oc, st_codec_ctx, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\
- if (!av_st) \
- throw OutOfMemory("Could not allocate memory for the video stream.", path); \
- c = avcodec_alloc_context3(av_codec); \
- st_codec_ctx = c; \
- av_st->codecpar->codec_id = av_codec->id;
- #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec_ctx) avcodec_parameters_from_context(av_stream->codecpar, av_codec_ctx);
- #elif IS_FFMPEG_3_2
- #define AV_REGISTER_ALL av_register_all();
- #define AVCODEC_REGISTER_ALL avcodec_register_all();
- #define AV_FILENAME filename
- #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f)
- #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
- #define AV_ALLOCATE_FRAME() av_frame_alloc()
- #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
- #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
- #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
- #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
- #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context)
- #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type
- #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id
- #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \
- ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \
- avcodec_parameters_to_context(context, av_stream->codecpar); \
- context; })
- #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec;
- #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in)
- #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar
- #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format
- #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format
- #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1)
- #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1)
- #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path)
- #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
- #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec);
- #define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\
- if (!av_st) \
- throw OutOfMemory("Could not allocate memory for the video stream.", path); \
- _Pragma ("GCC diagnostic push"); \
- _Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\""); \
- avcodec_get_context_defaults3(av_st->codec, av_codec); \
- c = av_st->codec; \
- _Pragma ("GCC diagnostic pop"); \
- st_codec = c;
- #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
- #elif LIBAVFORMAT_VERSION_MAJOR >= 55
- #define AV_REGISTER_ALL av_register_all();
- #define AVCODEC_REGISTER_ALL avcodec_register_all();
- #define AV_FILENAME filename
- #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f)
- #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
- #define AV_ALLOCATE_FRAME() av_frame_alloc()
- #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
- #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
- #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
- #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
- #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context)
- #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type
- #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id
- #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec
- #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec
- #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in) codec_in = av_stream->codec;
- #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context
- #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt
- #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt
- #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height)
- #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height)
- #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context()
- #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
- #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0)
- #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) av_st = avformat_new_stream(oc, av_codec); \
- if (!av_st) \
- throw OutOfMemory("Could not allocate memory for the video stream.", path); \
- avcodec_get_context_defaults3(av_st->codec, av_codec); \
- c = av_st->codec;
- #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)
- #else
- #define AV_REGISTER_ALL av_register_all();
- #define AVCODEC_REGISTER_ALL avcodec_register_all();
- #define AV_FILENAME filename
- #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f)
- #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
- #define AV_ALLOCATE_FRAME() avcodec_alloc_frame()
- #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
- #define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame)
- #define AV_FREE_FRAME(av_frame) avcodec_free_frame(av_frame)
- #define AV_FREE_PACKET(av_packet) av_free_packet(av_packet)
- #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context)
- #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type
- #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id
- #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec
- #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec
- #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in ) codec_in = av_stream->codec;
- #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context
- #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt
- #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt
- #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height)
- #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height)
- #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context()
- #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
- #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0)
- #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) av_st = avformat_new_stream(oc, av_codec); \
- if (!av_st) \
- throw OutOfMemory("Could not allocate memory for the video stream.", path); \
- avcodec_get_context_defaults3(av_st->codec, av_codec); \
- c = av_st->codec;
- #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)
- #endif
-
+#include "OpenShotVersion.h" // For FFMPEG_USE_SWRESAMPLE
+// Required for libavformat to build on Windows
+#ifndef INT64_C
+#define INT64_C(c) (c ## LL)
+#define UINT64_C(c) (c ## ULL)
#endif
+
+#ifndef IS_FFMPEG_3_2
+#define IS_FFMPEG_3_2 (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 64, 101))
+#endif
+
+#ifndef USE_HW_ACCEL
+#define USE_HW_ACCEL (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 107, 100))
+#endif
+
+#ifndef USE_SW
+#define USE_SW FFMPEG_USE_SWRESAMPLE
+#endif
+
+// Include the FFmpeg headers
+extern "C" {
+ #include
+ #include
+
+#if (LIBAVFORMAT_VERSION_MAJOR >= 57)
+ #include //PM
+#endif
+ #include
+
+#if USE_SW
+ #include
+#else
+ #include
+#endif
+
+ #include
+ #include
+ #include
+
+ // libavutil changed folders at some point
+#if LIBAVFORMAT_VERSION_MAJOR >= 53
+ #include
+#else
+ #include
+#endif
+
+ // channel header refactored
+#if LIBAVFORMAT_VERSION_MAJOR >= 54
+ #include
+#endif
+
+#if IS_FFMPEG_3_2
+ #include "libavutil/imgutils.h"
+#endif
+}
+
+// This was removed from newer versions of FFmpeg (but still used in libopenshot)
+#ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE
+ // 1 second of 48khz 32bit audio
+ #define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000
+#endif
+#ifndef AV_ERROR_MAX_STRING_SIZE
+ #define AV_ERROR_MAX_STRING_SIZE 64
+#endif
+#ifndef AUDIO_PACKET_ENCODING_SIZE
+ // 48khz * S16 (2 bytes) * max channels (8)
+ #define AUDIO_PACKET_ENCODING_SIZE 768000
+#endif
+
+// This wraps an unsafe C macro to be C++ compatible function
+inline static const std::string av_err2string(int errnum)
+{
+ char errbuf[AV_ERROR_MAX_STRING_SIZE];
+ av_strerror(errnum, errbuf, AV_ERROR_MAX_STRING_SIZE);
+ return static_cast(errbuf);
+}
+
+// Redefine the C macro to use our new C++ function
+#undef av_err2str
+#define av_err2str(errnum) av_err2string(errnum).c_str()
+
+// Define this for compatibility
+#ifndef PixelFormat
+ #define PixelFormat AVPixelFormat
+#endif
+#ifndef PIX_FMT_RGBA
+ #define PIX_FMT_RGBA AV_PIX_FMT_RGBA
+#endif
+#ifndef PIX_FMT_NONE
+ #define PIX_FMT_NONE AV_PIX_FMT_NONE
+#endif
+#ifndef PIX_FMT_RGB24
+ #define PIX_FMT_RGB24 AV_PIX_FMT_RGB24
+#endif
+#ifndef PIX_FMT_YUV420P
+ #define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P
+#endif
+#ifndef PIX_FMT_YUV444P
+ #define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P
+#endif
+
+// Does ffmpeg pixel format contain an alpha channel?
+inline static bool ffmpeg_has_alpha(PixelFormat pix_fmt) {
+ const AVPixFmtDescriptor *fmt_desc = av_pix_fmt_desc_get(pix_fmt);
+ return bool(fmt_desc->flags & AV_PIX_FMT_FLAG_ALPHA);
+}
+
+// FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's
+// definition in ruby/config.h, so we move it to FF_RSHIFT
+#ifdef RSHIFT
+ #define FF_RSHIFT(a, b) RSHIFT(a, b)
+ #undef RSHIFT
+#endif
+
+// libswresample/libavresample API switching
+#if USE_SW
+ #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
+ swr_convert(ctx, out, out_count, (const uint8_t **)in, in_count)
+ #define SWR_ALLOC() swr_alloc()
+ #define SWR_CLOSE(ctx) {}
+ #define SWR_FREE(ctx) swr_free(ctx)
+ #define SWR_INIT(ctx) swr_init(ctx)
+ #define SWRCONTEXT SwrContext
+
+#else
+ #define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count) \
+ avresample_convert(ctx, out, linesize, out_count, (uint8_t **)in, linesize2, in_count)
+ #define SWR_ALLOC() avresample_alloc_context()
+ #define SWR_CLOSE(ctx) avresample_close(ctx)
+ #define SWR_FREE(ctx) avresample_free(ctx)
+ #define SWR_INIT(ctx) avresample_open(ctx)
+ #define SWRCONTEXT AVAudioResampleContext
+#endif
+
+
+#if (LIBAVFORMAT_VERSION_MAJOR >= 58)
+ #define AV_REGISTER_ALL
+ #define AVCODEC_REGISTER_ALL
+ #define AV_FILENAME url
+ #define AV_SET_FILENAME(oc, f) oc->AV_FILENAME = av_strdup(f)
+ #define MY_INPUT_BUFFER_PADDING_SIZE AV_INPUT_BUFFER_PADDING_SIZE
+ #define AV_ALLOCATE_FRAME() av_frame_alloc()
+ #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \
+ av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
+ #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
+ #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
+ #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
+ #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context)
+ #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type
+ #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id
+ #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \
+ ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \
+ avcodec_parameters_to_context(context, av_stream->codecpar); \
+ context; })
+ #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec;
+ #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in)
+ #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar
+ #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) (AVPixelFormat) av_stream->codecpar->format
+ #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format
+ #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) \
+ av_image_get_buffer_size(pix_fmt, width, height, 1)
+ #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \
+ av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1)
+ #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path)
+ #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
+ #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) \
+ av_opt_set(priv_data, name, value, 0); \
+ avcodec_parameters_from_context(av_stream->codecpar, avcodec);
+ #define AV_FORMAT_NEW_STREAM(oc, st_codec_ctx, av_codec, av_st) \
+ av_st = avformat_new_stream(oc, NULL);\
+ if (!av_st) \
+ throw OutOfMemory("Could not allocate memory for the video stream.", path); \
+ c = avcodec_alloc_context3(av_codec); \
+ st_codec_ctx = c; \
+ av_st->codecpar->codec_id = av_codec->id;
+ #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec_ctx) \
+ avcodec_parameters_from_context(av_stream->codecpar, av_codec_ctx);
+
+#elif IS_FFMPEG_3_2
+ #define AV_REGISTER_ALL av_register_all();
+ #define AVCODEC_REGISTER_ALL avcodec_register_all();
+ #define AV_FILENAME filename
+ #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f)
+ #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
+ #define AV_ALLOCATE_FRAME() av_frame_alloc()
+ #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \
+ av_image_alloc(av_frame->data, av_frame->linesize, width, height, pix_fmt, 1)
+ #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
+ #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
+ #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
+ #define AV_FREE_CONTEXT(av_context) avcodec_free_context(&av_context)
+ #define AV_GET_CODEC_TYPE(av_stream) av_stream->codecpar->codec_type
+ #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codecpar->codec_id
+ #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) \
+ ({ AVCodecContext *context = avcodec_alloc_context3(av_codec); \
+ avcodec_parameters_to_context(context, av_stream->codecpar); \
+ context; })
+ #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_codec;
+ #define AV_GET_CODEC_FROM_STREAM(av_stream,codec_in)
+ #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_stream->codecpar
+ #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) \
+ (AVPixelFormat) av_stream->codecpar->format
+ #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_stream->codecpar->format
+ #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) av_image_get_buffer_size(pix_fmt, width, height, 1)
+ #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \
+ av_image_fill_arrays(av_frame->data, av_frame->linesize, buffer, pix_fmt, width, height, 1)
+ #define AV_OUTPUT_CONTEXT(output_context, path) \
+ avformat_alloc_output_context2( output_context, NULL, NULL, path)
+ #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
+ #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) \
+ av_opt_set(priv_data, name, value, 0); \
+ avcodec_parameters_from_context(av_stream->codecpar, avcodec);
+ #define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) \
+ av_st = avformat_new_stream(oc, NULL);\
+ if (!av_st) \
+ throw OutOfMemory("Could not allocate memory for the video stream.", path); \
+ _Pragma ("GCC diagnostic push"); \
+ _Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\""); \
+ avcodec_get_context_defaults3(av_st->codec, av_codec); \
+ c = av_st->codec; \
+ _Pragma ("GCC diagnostic pop"); \
+ st_codec = c;
+ #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) \
+ avcodec_parameters_from_context(av_stream->codecpar, av_codec);
+
+#elif LIBAVFORMAT_VERSION_MAJOR >= 55
+ #define AV_REGISTER_ALL av_register_all();
+ #define AVCODEC_REGISTER_ALL avcodec_register_all();
+ #define AV_FILENAME filename
+ #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f)
+ #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
+ #define AV_ALLOCATE_FRAME() av_frame_alloc()
+ #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \
+ avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
+ #define AV_RESET_FRAME(av_frame) av_frame_unref(av_frame)
+ #define AV_FREE_FRAME(av_frame) av_frame_free(av_frame)
+ #define AV_FREE_PACKET(av_packet) av_packet_unref(av_packet)
+ #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context)
+ #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type
+ #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id
+ #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec
+ #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec
+ #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in) codec_in = av_stream->codec;
+ #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context
+ #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt
+ #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt
+ #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height)
+ #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \
+ avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height)
+ #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context()
+ #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
+ #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0)
+ #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) \
+ av_st = avformat_new_stream(oc, av_codec); \
+ if (!av_st) \
+ throw OutOfMemory("Could not allocate memory for the video stream.", path); \
+ avcodec_get_context_defaults3(av_st->codec, av_codec); \
+ c = av_st->codec;
+ #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)
+
+#else
+ #define AV_REGISTER_ALL av_register_all();
+ #define AVCODEC_REGISTER_ALL avcodec_register_all();
+ #define AV_FILENAME filename
+ #define AV_SET_FILENAME(oc, f) snprintf(oc->AV_FILENAME, sizeof(oc->AV_FILENAME), "%s", f)
+ #define MY_INPUT_BUFFER_PADDING_SIZE FF_INPUT_BUFFER_PADDING_SIZE
+ #define AV_ALLOCATE_FRAME() avcodec_alloc_frame()
+ #define AV_ALLOCATE_IMAGE(av_frame, pix_fmt, width, height) \
+ avpicture_alloc((AVPicture *) av_frame, pix_fmt, width, height)
+ #define AV_RESET_FRAME(av_frame) avcodec_get_frame_defaults(av_frame)
+ #define AV_FREE_FRAME(av_frame) avcodec_free_frame(av_frame)
+ #define AV_FREE_PACKET(av_packet) av_free_packet(av_packet)
+ #define AV_FREE_CONTEXT(av_context) avcodec_close(av_context)
+ #define AV_GET_CODEC_TYPE(av_stream) av_stream->codec->codec_type
+ #define AV_FIND_DECODER_CODEC_ID(av_stream) av_stream->codec->codec_id
+ #define AV_GET_CODEC_CONTEXT(av_stream, av_codec) av_stream->codec
+ #define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec) av_stream->codec
+ #define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in ) codec_in = av_stream->codec;
+ #define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context) av_context
+ #define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context) av_context->pix_fmt
+ #define AV_GET_SAMPLE_FORMAT(av_stream, av_context) av_context->sample_fmt
+ #define AV_GET_IMAGE_SIZE(pix_fmt, width, height) avpicture_get_size(pix_fmt, width, height)
+ #define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height) \
+ avpicture_fill((AVPicture *) av_frame, buffer, pix_fmt, width, height)
+ #define AV_OUTPUT_CONTEXT(output_context, path) oc = avformat_alloc_context()
+ #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
+ #define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec) av_opt_set (priv_data, name, value, 0)
+ #define AV_FORMAT_NEW_STREAM( oc, av_context, av_codec, av_st) \
+ av_st = avformat_new_stream(oc, av_codec); \
+ if (!av_st) \
+ throw OutOfMemory("Could not allocate memory for the video stream.", path); \
+ avcodec_get_context_defaults3(av_st->codec, av_codec); \
+ c = av_st->codec;
+ #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)
+#endif
+
+
+#endif // OPENSHOT_FFMPEG_UTILITIES_H
diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
index 4cfc4633..8707756c 100644
--- a/src/FFmpegWriter.cpp
+++ b/src/FFmpegWriter.cpp
@@ -67,7 +67,7 @@ static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx, int6
frames_ctx->initial_pool_size = 20;
if ((err = av_hwframe_ctx_init(hw_frames_ref)) < 0) {
std::clog << "Failed to initialize HW frame context. " <<
- "Error code: " << av_err2str(err) << "\n";
+ "Error code: " << av_err2string(err) << "\n";
av_buffer_unref(&hw_frames_ref);
return err;
}
@@ -84,7 +84,7 @@ FFmpegWriter::FFmpegWriter(const std::string& path) :
path(path), fmt(NULL), oc(NULL), audio_st(NULL), video_st(NULL), samples(NULL),
audio_outbuf(NULL), audio_outbuf_size(0), audio_input_frame_size(0), audio_input_position(0),
initial_audio_input_frame_size(0), img_convert_ctx(NULL), cache_size(8), num_of_rescalers(32),
- rescaler_position(0), video_codec_ctx(NULL), audio_codec_ctx(NULL), is_writing(false), write_video_count(0), write_audio_count(0),
+ rescaler_position(0), video_codec_ctx(NULL), audio_codec_ctx(NULL), is_writing(false), video_timestamp(0), audio_timestamp(0),
original_sample_rate(0), original_channels(0), avr(NULL), avr_planar(NULL), is_open(false), prepare_streams(false),
write_header(false), write_trailer(false), audio_encoder_buffer_size(0), audio_encoder_buffer(NULL) {
@@ -847,7 +847,7 @@ void FFmpegWriter::flush_encoders() {
for (;;) {
// Increment PTS (in frames and scaled to the codec's timebase)
- write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
+ video_timestamp += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
AVPacket pkt;
av_init_packet(&pkt);
@@ -870,12 +870,7 @@ void FFmpegWriter::flush_encoders() {
avcodec_flush_buffers(video_codec_ctx);
break;
}
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
+ av_packet_rescale_ts(&pkt, video_codec_ctx->time_base, video_st->time_base);
pkt.stream_index = video_st->index;
error_code = av_interleaved_write_frame(oc, &pkt);
}
@@ -887,83 +882,75 @@ void FFmpegWriter::flush_encoders() {
#endif // IS_FFMPEG_3_2
if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]", "error_code", error_code);
}
if (!got_packet) {
break;
}
// set the timestamp
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
+ av_packet_rescale_ts(&pkt, video_codec_ctx->time_base, video_st->time_base);
pkt.stream_index = video_st->index;
// Write packet
error_code = av_interleaved_write_frame(oc, &pkt);
if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]", "error_code", error_code);
}
}
// FLUSH AUDIO ENCODER
- if (info.has_audio)
- for (;;) {
+ if (info.has_audio) {
+ for (;;) {
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.data = NULL;
+ pkt.size = 0;
+ pkt.pts = pkt.dts = audio_timestamp;
- // Increment PTS (in samples and scaled to the codec's timebase)
- // for some reason, it requires me to multiply channels X 2
- write_audio_count += av_rescale_q(audio_input_position / (audio_codec_ctx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), av_make_q(1, info.sample_rate), audio_codec_ctx->time_base);
-
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.data = NULL;
- pkt.size = 0;
- pkt.pts = pkt.dts = write_audio_count;
-
- /* encode the image */
- int error_code = 0;
- int got_packet = 0;
+ /* encode the image */
+ int error_code = 0;
+ int got_packet = 0;
#if IS_FFMPEG_3_2
- error_code = avcodec_send_frame(audio_codec_ctx, NULL);
+ error_code = avcodec_send_frame(audio_codec_ctx, NULL);
#else
- error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, NULL, &got_packet);
+ error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, NULL, &got_packet);
#endif
- if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code);
- }
- if (!got_packet) {
- break;
- }
+ if (error_code < 0) {
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]",
+ "error_code", error_code);
+ }
+ if (!got_packet) {
+ break;
+ }
- // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
- // but it fixes lots of PTS related issues when I do this.
- pkt.pts = pkt.dts = write_audio_count;
+ // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
+ // but it fixes lots of PTS related issues when I do this.
+ pkt.pts = pkt.dts = audio_timestamp;
- // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base);
+ // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
+ av_packet_rescale_ts(&pkt, audio_codec_ctx->time_base, audio_st->time_base);
- // set stream
- pkt.stream_index = audio_st->index;
- pkt.flags |= AV_PKT_FLAG_KEY;
+ // set stream
+ pkt.stream_index = audio_st->index;
+ pkt.flags |= AV_PKT_FLAG_KEY;
- // Write packet
- error_code = av_interleaved_write_frame(oc, &pkt);
- if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code);
- }
+ // Write packet
+ error_code = av_interleaved_write_frame(oc, &pkt);
+ if (error_code < 0) {
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FFmpegWriter::flush_encoders ERROR [" + av_err2string(error_code) + "]",
+ "error_code", error_code);
+ }
- // deallocate memory for packet
- AV_FREE_PACKET(&pkt);
- }
+ // Increment PTS by duration of packet
+ audio_timestamp += pkt.duration;
+ // deallocate memory for packet
+ AV_FREE_PACKET(&pkt);
+ }
+ }
}
@@ -1027,8 +1014,8 @@ void FFmpegWriter::Close() {
}
// Reset frame counters
- write_video_count = 0;
- write_audio_count = 0;
+ video_timestamp = 0;
+ audio_timestamp = 0;
// Free the context which frees the streams too
avformat_free_context(oc);
@@ -1505,7 +1492,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
int err;
if ((err = set_hwframe_ctx(video_codec_ctx, hw_device_ctx, info.width, info.height)) < 0) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video (set_hwframe_ctx) ERROR faled to set hwframe context",
- "width", info.width, "height", info.height, av_err2str(err), -1);
+ "width", info.width, "height", info.height, av_err2string(err), -1);
}
}
#endif // USE_HW_ACCEL
@@ -1611,7 +1598,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Fill input frame with sample data
int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) all_queued_samples, all_queued_samples_size, 0);
if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + av_err2string(error_code) + "]", "error_code", error_code);
}
// Do not convert audio to planar format (yet). We need to keep everything interleaved at this point.
@@ -1735,7 +1722,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Convert to planar (if needed by audio codec)
AVFrame *frame_final = AV_ALLOCATE_FRAME();
AV_RESET_FRAME(frame_final);
- if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) {
+ if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) {
ZmqLogger::Instance()->AppendDebugMethod(
"FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)",
"in_sample_fmt", output_sample_fmt,
@@ -1781,8 +1768,11 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Create output frame (and allocate arrays)
frame_final->nb_samples = audio_input_frame_size;
+ frame_final->channels = info.channels;
+ frame_final->format = audio_codec_ctx->sample_fmt;
+ frame_final->channel_layout = info.channel_layout;
av_samples_alloc(frame_final->data, frame_final->linesize, info.channels,
- frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0);
+ frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0);
// Convert audio samples
int nb_samples = SWR_CONVERT(
@@ -1799,7 +1789,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
if (nb_samples > 0) {
memcpy(samples, frame_final->data[0],
nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels);
- }
+ }
// deallocate AVFrame
av_freep(&(audio_frame->data[0]));
@@ -1829,9 +1819,8 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
audio_encoder_buffer_size, 0);
}
- // Increment PTS (in samples)
- write_audio_count += FFMIN(audio_input_frame_size, audio_input_position);
- frame_final->pts = write_audio_count; // Set the AVFrame's PTS
+ // Set the AVFrame's PTS
+ frame_final->pts = audio_timestamp;
// Init the packet
AVPacket pkt;
@@ -1840,7 +1829,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
pkt.size = audio_encoder_buffer_size;
// Set the packet's PTS prior to encoding
- pkt.pts = pkt.dts = write_audio_count;
+ pkt.pts = pkt.dts = audio_timestamp;
/* encode the audio samples */
int got_packet_ptr = 0;
@@ -1882,15 +1871,10 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Since the PTS can change during encoding, set the value again. This seems like a huge hack,
// but it fixes lots of PTS related issues when I do this.
- pkt.pts = pkt.dts = write_audio_count;
+ pkt.pts = pkt.dts = audio_timestamp;
// Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base);
+ av_packet_rescale_ts(&pkt, audio_codec_ctx->time_base, audio_st->time_base);
// set stream
pkt.stream_index = audio_st->index;
@@ -1901,9 +1885,12 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
}
if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + av_err2string(error_code) + "]", "error_code", error_code);
}
+ // Increment PTS (no pkt.duration, so calculate with maths)
+ audio_timestamp += FFMIN(audio_input_frame_size, audio_input_position);
+
// deallocate AVFrame
av_freep(&(frame_final->data[0]));
AV_FREE_FRAME(&frame_final);
@@ -2042,14 +2029,13 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
pkt.data = (uint8_t *) frame_final->data;
pkt.size = sizeof(AVPicture);
- // Increment PTS (in frames and scaled to the codec's timebase)
- write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
- pkt.pts = write_video_count;
+ // Set PTS (in frames and scaled to the codec's timebase)
+ pkt.pts = video_timestamp;
/* write the compressed frame in the media file */
int error_code = av_interleaved_write_frame(oc, &pkt);
if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + av_err2string(error_code) + "]", "error_code", error_code);
return false;
}
@@ -2065,11 +2051,8 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
pkt.size = 0;
pkt.pts = pkt.dts = AV_NOPTS_VALUE;
- // Increment PTS (in frames and scaled to the codec's timebase)
- write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
-
// Assign the initial AVFrame PTS from the frame counter
- frame_final->pts = write_video_count;
+ frame_final->pts = video_timestamp;
#if USE_HW_ACCEL
if (hw_en_on && hw_en_supported) {
if (!(hw_frame = av_frame_alloc())) {
@@ -2133,7 +2116,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
// Write video packet (older than FFmpeg 3.2)
error_code = avcodec_encode_video2(video_codec_ctx, &pkt, frame_final, &got_packet_ptr);
if (error_code != 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + av_err2string(error_code) + "]", "error_code", error_code);
}
if (got_packet_ptr == 0) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet (Frame gotpacket error)");
@@ -2142,24 +2125,14 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
/* if zero size, it means the image was buffered */
if (error_code == 0 && got_packet_ptr) {
-
- // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
- // but it fixes lots of PTS related issues when I do this.
- //pkt.pts = pkt.dts = write_video_count;
-
// set the timestamp
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
+ av_packet_rescale_ts(&pkt, video_codec_ctx->time_base, video_st->time_base);
pkt.stream_index = video_st->index;
/* write the compressed frame in the media file */
int result = av_interleaved_write_frame(oc, &pkt);
if (result < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (std::string) av_err2str(result) + "]", "result", result);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + av_err2string(result) + "]", "result", result);
return false;
}
}
@@ -2176,6 +2149,9 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
#endif // USE_HW_ACCEL
}
+ // Increment PTS (in frames and scaled to the codec's timebase)
+ video_timestamp += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
+
// Success
return true;
}
diff --git a/src/FFmpegWriter.h b/src/FFmpegWriter.h
index 44f7cb35..79564b5c 100644
--- a/src/FFmpegWriter.h
+++ b/src/FFmpegWriter.h
@@ -151,8 +151,8 @@ namespace openshot {
int cache_size;
bool is_writing;
bool is_open;
- int64_t write_video_count;
- int64_t write_audio_count;
+ int64_t video_timestamp;
+ int64_t audio_timestamp;
bool prepare_streams;
bool write_header;
diff --git a/src/Fraction.cpp b/src/Fraction.cpp
index d4898326..cb13c393 100644
--- a/src/Fraction.cpp
+++ b/src/Fraction.cpp
@@ -34,21 +34,20 @@
using namespace openshot;
// Delegating constructors
-Fraction::Fraction() : Fraction::Fraction(1, 1) {};
+Fraction::Fraction() : Fraction::Fraction(1, 1) {}
Fraction::Fraction(std::pair pair)
- : Fraction::Fraction(pair.first, pair.second) {};
+ : Fraction::Fraction(pair.first, pair.second) {}
Fraction::Fraction(std::map mapping)
- : Fraction::Fraction(mapping["num"], mapping["den"]) {};
+ : Fraction::Fraction(mapping["num"], mapping["den"]) {}
Fraction::Fraction(std::vector vector)
- : Fraction::Fraction(vector[0], vector[1]) {};
+ : Fraction::Fraction(vector[0], vector[1]) {}
// Full constructor
Fraction::Fraction(int num, int den) :
- num(num), den(den) {
-}
+ num(num), den(den) {}
// Return this fraction as a float (i.e. 1/2 = 0.5)
float Fraction::ToFloat() {
diff --git a/src/Fraction.h b/src/Fraction.h
index fb36e88b..3033cb92 100644
--- a/src/Fraction.h
+++ b/src/Fraction.h
@@ -38,53 +38,63 @@
namespace openshot {
- /**
- * @brief This class represents a fraction
- *
- * Fractions are often used in video editing to represent ratios and rates, for example:
- * pixel ratios, frames per second, timebase, and other common ratios. Fractions are preferred
- * over decimals due to their increased precision.
- */
- class Fraction {
- public:
- int num; /// pair);
+ /// Constructor that accepts a (num, den) pair
+ Fraction(std::pair pair);
- /// Constructor that takes a vector of length 2 (containing {num, den})
- Fraction(std::vector vector);
+ /// Constructor that takes a vector of length 2 (containing {num, den})
+ Fraction(std::vector vector);
- /// Constructor that takes a key-value mapping (keys: 'num'. 'den')
- Fraction(std::map mapping);
+ /// Constructor that takes a key-value mapping (keys: 'num'. 'den')
+ Fraction(std::map mapping);
- /// Calculate the greatest common denominator
- int GreatestCommonDenominator();
+ /// Calculate the greatest common denominator
+ int GreatestCommonDenominator();
- /// Reduce this fraction (i.e. 640/480 = 4/3)
- void Reduce();
+ /// Reduce this fraction (i.e. 640/480 = 4/3)
+ void Reduce();
- /// Return this fraction as a float (i.e. 1/2 = 0.5)
- float ToFloat();
+ /// Return this fraction as a float (i.e. 1/2 = 0.5)
+ float ToFloat();
- /// Return this fraction as a double (i.e. 1/2 = 0.5)
- double ToDouble() const;
+ /// Return this fraction as a double (i.e. 1/2 = 0.5)
+ double ToDouble() const;
- /// Return a rounded integer of the fraction (for example 30000/1001 returns 30)
- int ToInt();
-
- /// Return the reciprocal as a Fraction
- Fraction Reciprocal() const;
- };
+ /// Return a rounded integer of the fraction (for example 30000/1001 returns 30)
+ int ToInt();
+ /// Return the reciprocal as a Fraction
+ Fraction Reciprocal() const;
+};
+// Stream output operator for openshot::Fraction
+template
+std::basic_ostream&
+operator<<(std::basic_ostream& o, const openshot::Fraction& frac) {
+ std::basic_ostringstream s;
+ s.flags(o.flags());
+ s.imbue(o.getloc());
+ s.precision(o.precision());
+ s << "Fraction(" << frac.num << ", " << frac.den << ")";
+ return o << s.str();
}
+} // namespace openshot
#endif
diff --git a/src/Frame.cpp b/src/Frame.cpp
index 244b065f..aed38777 100644
--- a/src/Frame.cpp
+++ b/src/Frame.cpp
@@ -66,15 +66,15 @@ Frame::Frame(int64_t number, int width, int height, std::string color, int sampl
}
// Delegating Constructor - blank frame
-Frame::Frame() : Frame::Frame(1, 1, 1, "#000000", 0, 2) {};
+Frame::Frame() : Frame::Frame(1, 1, 1, "#000000", 0, 2) {}
// Delegating Constructor - image only
Frame::Frame(int64_t number, int width, int height, std::string color)
- : Frame::Frame(number, width, height, color, 0, 2) {};
+ : Frame::Frame(number, width, height, color, 0, 2) {}
// Delegating Constructor - audio only
Frame::Frame(int64_t number, int samples, int channels)
- : Frame::Frame(number, 1, 1, "#000000", samples, channels) {};
+ : Frame::Frame(number, 1, 1, "#000000", samples, channels) {}
// Copy constructor
@@ -918,7 +918,7 @@ cv::Mat Frame::Qimage2mat( std::shared_ptr& qimage) {
cv::mixChannels( &mat, 1, &mat2, 1, from_to, 3 );
cv::cvtColor(mat2, mat2, cv::COLOR_RGB2BGR);
return mat2;
-};
+}
// Get pointer to OpenCV image object
cv::Mat Frame::GetImageCV()
diff --git a/src/Frame.h b/src/Frame.h
index 18a22d11..e35ea9ca 100644
--- a/src/Frame.h
+++ b/src/Frame.h
@@ -109,7 +109,7 @@ namespace openshot
private:
std::shared_ptr image;
std::shared_ptr wave_image;
- std::shared_ptr audio;
+
std::shared_ptr previewApp;
juce::CriticalSection addingImageSection;
juce::CriticalSection addingAudioSection;
@@ -131,6 +131,7 @@ namespace openshot
int constrain(int color_value);
public:
+ std::shared_ptr audio;
int64_t number; ///< This is the frame number (starting at 1)
bool has_audio_data; ///< This frame has been loaded with audio data
bool has_image_data; ///< This frame has been loaded with pixel data
diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp
index 0e3b0272..687ac595 100644
--- a/src/FrameMapper.cpp
+++ b/src/FrameMapper.cpp
@@ -28,6 +28,10 @@
* along with OpenShot Library. If not, see .
*/
+#include
+#include
+#include
+
#include "FrameMapper.h"
#include "Exceptions.h"
#include "Clip.h"
@@ -517,7 +521,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame)
copy_samples.sample_end += EXTRA_INPUT_SAMPLES;
int samples_per_end_frame =
Frame::GetSamplesPerFrame(copy_samples.frame_end, original,
- reader->info.sample_rate, reader->info.channels);
+ reader->info.sample_rate, reader->info.channels);
if (copy_samples.sample_end >= samples_per_end_frame)
{
// check for wrapping
@@ -533,7 +537,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame)
copy_samples.sample_start += EXTRA_INPUT_SAMPLES;
int samples_per_start_frame =
Frame::GetSamplesPerFrame(copy_samples.frame_start, original,
- reader->info.sample_rate, reader->info.channels);
+ reader->info.sample_rate, reader->info.channels);
if (copy_samples.sample_start >= samples_per_start_frame)
{
// check for wrapping
@@ -611,7 +615,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame)
return final_cache.GetFrame(requested_frame);
}
-void FrameMapper::PrintMapping()
+void FrameMapper::PrintMapping(std::ostream* out)
{
// Check if mappings are dirty (and need to be recalculated)
if (is_dirty)
@@ -622,8 +626,16 @@ void FrameMapper::PrintMapping()
for (float map = 1; map <= frames.size(); map++)
{
MappedFrame frame = frames[map - 1];
- cout << "Target frame #: " << map << " mapped to original frame #:\t(" << frame.Odd.Frame << " odd, " << frame.Even.Frame << " even)" << endl;
- cout << " - Audio samples mapped to frame " << frame.Samples.frame_start << ":" << frame.Samples.sample_start << " to frame " << frame.Samples.frame_end << ":" << frame.Samples.sample_end << endl;
+ *out << "Target frame #: " << map
+ << " mapped to original frame #:\t("
+ << frame.Odd.Frame << " odd, "
+ << frame.Even.Frame << " even)" << std::endl;
+
+ *out << " - Audio samples mapped to frame "
+ << frame.Samples.frame_start << ":"
+ << frame.Samples.sample_start << " to frame "
+ << frame.Samples.frame_end << ":"
+ << frame.Samples.sample_end << endl;
}
}
@@ -733,7 +745,14 @@ void FrameMapper::SetJsonValue(const Json::Value root) {
// Change frame rate or audio mapping details
void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
{
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ChangeMapping", "target_fps.num", target_fps.num, "target_fps.den", target_fps.den, "target_pulldown", target_pulldown, "target_sample_rate", target_sample_rate, "target_channels", target_channels, "target_channel_layout", target_channel_layout);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ChangeMapping",
+ "target_fps.num", target_fps.num,
+ "target_fps.den", target_fps.den,
+ "target_pulldown", target_pulldown,
+ "target_sample_rate", target_sample_rate,
+ "target_channels", target_channels,
+ "target_channel_layout", target_channel_layout);
// Mark as dirty
is_dirty = true;
@@ -779,7 +798,13 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
int samples_in_frame = frame->GetAudioSamplesCount();
ChannelLayout channel_layout_in_frame = frame->ChannelsLayout();
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio", "frame->number", frame->number, "original_frame_number", original_frame_number, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "sample_rate_in_frame", sample_rate_in_frame);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ResampleMappedAudio",
+ "frame->number", frame->number,
+ "original_frame_number", original_frame_number,
+ "channels_in_frame", channels_in_frame,
+ "samples_in_frame", samples_in_frame,
+ "sample_rate_in_frame", sample_rate_in_frame);
// Get audio sample array
float* frame_samples_float = NULL;
@@ -815,7 +840,14 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
delete[] frame_samples_float;
frame_samples_float = NULL;
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (got sample data from frame)", "frame->number", frame->number, "total_frame_samples", total_frame_samples, "target channels", info.channels, "channels_in_frame", channels_in_frame, "target sample_rate", info.sample_rate, "samples_in_frame", samples_in_frame);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ResampleMappedAudio (got sample data from frame)",
+ "frame->number", frame->number,
+ "total_frame_samples", total_frame_samples,
+ "target channels", info.channels,
+ "channels_in_frame", channels_in_frame,
+ "target sample_rate", info.sample_rate,
+ "samples_in_frame", samples_in_frame);
// Create input frame (and allocate arrays)
@@ -823,19 +855,30 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
AV_RESET_FRAME(audio_frame);
audio_frame->nb_samples = total_frame_samples / channels_in_frame;
- int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) frame_samples,
- audio_frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * channels_in_frame, 1);
+ int buf_size = audio_frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * channels_in_frame;
+ int error_code = avcodec_fill_audio_frame(
+ audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16,
+ (uint8_t *) frame_samples, buf_size, 1);
if (error_code < 0)
{
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ResampleMappedAudio ERROR [" + av_err2string(error_code) + "]",
+ "error_code", error_code);
throw ErrorEncodingVideo("Error while resampling audio in frame mapper", frame->number);
}
// Update total samples & input frame size (due to bigger or smaller data types)
total_frame_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame->number), target, info.sample_rate, info.channels);
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (adjust # of samples)", "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "sample_rate_in_frame", sample_rate_in_frame, "info.channels", info.channels, "channels_in_frame", channels_in_frame, "original_frame_number", original_frame_number);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ResampleMappedAudio (adjust # of samples)",
+ "total_frame_samples", total_frame_samples,
+ "info.sample_rate", info.sample_rate,
+ "sample_rate_in_frame", sample_rate_in_frame,
+ "info.channels", info.channels,
+ "channels_in_frame", channels_in_frame,
+ "original_frame_number", original_frame_number);
// Create output frame (and allocate arrays)
AVFrame *audio_converted = AV_ALLOCATE_FRAME();
@@ -843,32 +886,39 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
audio_converted->nb_samples = total_frame_samples;
av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, total_frame_samples, AV_SAMPLE_FMT_S16, 0);
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (preparing for resample)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", AV_SAMPLE_FMT_S16, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ResampleMappedAudio (preparing for resample)",
+ "in_sample_fmt", AV_SAMPLE_FMT_S16,
+ "out_sample_fmt", AV_SAMPLE_FMT_S16,
+ "in_sample_rate", sample_rate_in_frame,
+ "out_sample_rate", info.sample_rate,
+ "in_channels", channels_in_frame,
+ "out_channels", info.channels);
int nb_samples = 0;
// setup resample context
if (!avr) {
avr = SWR_ALLOC();
- av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
- av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
- av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0);
- av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
- av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
- av_opt_set_int(avr, "out_channels", info.channels, 0);
+ av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
+ av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
+ av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
+ av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
+ av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0);
+ av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
+ av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
+ av_opt_set_int(avr, "out_channels", info.channels, 0);
SWR_INIT(avr);
}
// Convert audio samples
- nb_samples = SWR_CONVERT(avr, // audio resample context
- audio_converted->data, // output data pointers
- audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
- audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
- audio_frame->data, // input data pointers
- audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
- audio_frame->nb_samples); // number of input samples to convert
+ nb_samples = SWR_CONVERT(avr, // audio resample context
+ audio_converted->data, // output data pointers
+ audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
+ audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
+ audio_frame->data, // input data pointers
+ audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
+ audio_frame->nb_samples); // number of input samples to convert
// Create a new array (to hold all resampled S16 audio samples)
int16_t* resampled_samples = new int16_t[(nb_samples * info.channels)];
@@ -887,7 +937,14 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
int channel_buffer_size = nb_samples;
frame->ResizeAudio(info.channels, channel_buffer_size, info.sample_rate, info.channel_layout);
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (Audio successfully resampled)", "nb_samples", nb_samples, "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "channels_in_frame", channels_in_frame, "info.channels", info.channels, "info.channel_layout", info.channel_layout);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ResampleMappedAudio (Audio successfully resampled)",
+ "nb_samples", nb_samples,
+ "total_frame_samples", total_frame_samples,
+ "info.sample_rate", info.sample_rate,
+ "channels_in_frame", channels_in_frame,
+ "info.channels", info.channels,
+ "info.channel_layout", info.channel_layout);
// Array of floats (to hold samples for each channel)
float *channel_buffer = new float[channel_buffer_size];
@@ -927,7 +984,10 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig
// Add samples to frame for this channel
frame->AddAudio(true, channel_filter, 0, channel_buffer, position, 1.0f);
- ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (Add audio to channel)", "number of samples", position, "channel_filter", channel_filter);
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FrameMapper::ResampleMappedAudio (Add audio to channel)",
+ "number of samples", position,
+ "channel_filter", channel_filter);
}
// Update frame's audio meta data
@@ -955,9 +1015,10 @@ int64_t FrameMapper::AdjustFrameNumber(int64_t clip_frame_number) {
start = parent->Start();
}
- // Adjust start frame and position based on parent clip. This prevents ensures the same
- // frame # is used by mapped readers and clips, when calculating samples per frame. Thus,
- // this prevents gaps and mismatches in # of samples.
+ // Adjust start frame and position based on parent clip.
+ // This ensures the same frame # is used by mapped readers and clips,
+ // when calculating samples per frame.
+ // Thus, this prevents gaps and mismatches in # of samples.
int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
diff --git a/src/FrameMapper.h b/src/FrameMapper.h
index 62615cfb..813c644f 100644
--- a/src/FrameMapper.h
+++ b/src/FrameMapper.h
@@ -33,9 +33,9 @@
#include
#include
-#include
#include
#include
+
#include "CacheMemory.h"
#include "ReaderBase.h"
#include "Frame.h"
@@ -211,7 +211,7 @@ namespace openshot
void Open() override;
/// Print all of the original frames and which new frames they map to
- void PrintMapping();
+ void PrintMapping(std::ostream* out=&std::cout);
/// Get the current reader
ReaderBase* Reader();
diff --git a/src/KeyFrame.cpp b/src/KeyFrame.cpp
index 4107f2de..e25f4d7c 100644
--- a/src/KeyFrame.cpp
+++ b/src/KeyFrame.cpp
@@ -31,12 +31,14 @@
#include "KeyFrame.h"
#include "Exceptions.h"
-#include
-#include
-#include
-#include // For assert()
-#include // For std::cout
-#include // For std::setprecision
+#include // For std::lower_bound, std::move_backward
+#include // For std::less, std::less_equal, etc…
+#include // For std::swap
+#include // For std::accumulate
+#include // For assert()
+#include // For fabs, round
+#include // For std::cout
+#include // For std::setprecision
using namespace std;
using namespace openshot;
@@ -559,22 +561,51 @@ void Keyframe::UpdatePoint(int64_t index, Point p) {
AddPoint(p);
}
-void Keyframe::PrintPoints() const {
- cout << fixed << setprecision(4);
- for (std::vector::const_iterator it = Points.begin(); it != Points.end(); it++) {
- Point p = *it;
- cout << p.co.X << "\t" << p.co.Y << endl;
- }
+void Keyframe::PrintPoints(std::ostream* out) const {
+ *out << std::right << std::setprecision(4) << std::setfill(' ');
+ for (const auto& p : Points) {
+ *out << std::defaultfloat
+ << std::setw(6) << p.co.X
+ << std::setw(14) << std::fixed << p.co.Y
+ << '\n';
+ }
+ *out << std::flush;
}
-void Keyframe::PrintValues() const {
- cout << fixed << setprecision(4);
- cout << "Frame Number (X)\tValue (Y)\tIs Increasing\tRepeat Numerator\tRepeat Denominator\tDelta (Y Difference)\n";
+void Keyframe::PrintValues(std::ostream* out) const {
+ // Column widths
+ std::vector w{10, 12, 8, 11, 19};
- for (int64_t i = 1; i < GetLength(); ++i) {
- cout << i << "\t" << GetValue(i) << "\t" << IsIncreasing(i) << "\t" ;
- cout << GetRepeatFraction(i).num << "\t" << GetRepeatFraction(i).den << "\t" << GetDelta(i) << "\n";
- }
+ *out << std::right << std::setfill(' ') << std::boolalpha
+ << std::setprecision(4);
+ // Headings
+ *out << "│"
+ << std::setw(w[0]) << "Frame# (X)" << " │"
+ << std::setw(w[1]) << "Y Value" << " │"
+ << std::setw(w[2]) << "Delta Y" << " │ "
+ << std::setw(w[3]) << "Increasing?" << " │ "
+ << std::setw(w[4]) << std::left << "Repeat Fraction" << std::right
+ << "│\n";
+ // Divider
+ *out << "├───────────"
+ << "┼─────────────"
+ << "┼─────────"
+ << "┼─────────────"
+ << "┼────────────────────┤\n";
+
+ for (int64_t i = 1; i < GetLength(); ++i) {
+ *out << "│"
+ << std::setw(w[0]-2) << std::defaultfloat << i
+ << (Contains(Point(i, 1)) ? " *" : " ") << " │"
+ << std::setw(w[1]) << std::fixed << GetValue(i) << " │"
+ << std::setw(w[2]) << std::defaultfloat << std::showpos
+ << GetDelta(i) << " │ " << std::noshowpos
+ << std::setw(w[3]) << IsIncreasing(i) << " │ "
+ << std::setw(w[4]) << std::left << GetRepeatFraction(i)
+ << std::right << "│\n";
+ }
+ *out << " * = Keyframe point (non-interpolated)\n";
+ *out << std::flush;
}
diff --git a/src/KeyFrame.h b/src/KeyFrame.h
index 6da34cac..45624dd2 100644
--- a/src/KeyFrame.h
+++ b/src/KeyFrame.h
@@ -31,7 +31,7 @@
#ifndef OPENSHOT_KEYFRAME_H
#define OPENSHOT_KEYFRAME_H
-#include
+#include
#include
#include "Fraction.h"
@@ -160,10 +160,10 @@ namespace openshot {
void UpdatePoint(int64_t index, Point p);
/// Print a list of points
- void PrintPoints() const;
+ void PrintPoints(std::ostream* out=&std::cout) const;
/// Print just the Y value of the point's primary coordinate
- void PrintValues() const;
+ void PrintValues(std::ostream* out=&std::cout) const;
};
diff --git a/src/OpenShotVersion.h.in b/src/OpenShotVersion.h.in
index 197242d3..b6f41890 100644
--- a/src/OpenShotVersion.h.in
+++ b/src/OpenShotVersion.h.in
@@ -51,6 +51,7 @@
#cmakedefine01 HAVE_IMAGEMAGICK
#cmakedefine01 HAVE_RESVG
#cmakedefine01 HAVE_OPENCV
+#cmakedefine01 FFMPEG_USE_SWRESAMPLE
#cmakedefine01 APPIMAGE_BUILD
#include
diff --git a/src/Point.cpp b/src/Point.cpp
index 44b6883f..b11aa7ce 100644
--- a/src/Point.cpp
+++ b/src/Point.cpp
@@ -35,24 +35,24 @@ using namespace std;
using namespace openshot;
// Default constructor
-Point::Point() : Point::Point(Coordinate(1, 0), BEZIER, AUTO) {};
+Point::Point() : Point::Point(Coordinate(1, 0), BEZIER, AUTO) {}
// Constructor which creates a single coordinate at X=1
-Point::Point(float y) : Point::Point(Coordinate(1, y), CONSTANT, AUTO) {};
+Point::Point(float y) : Point::Point(Coordinate(1, y), CONSTANT, AUTO) {}
// Constructor which creates a Bezier curve with point at (x, y)
-Point::Point(float x, float y) : Point::Point(Coordinate(x, y), BEZIER, AUTO) {};
+Point::Point(float x, float y) : Point::Point(Coordinate(x, y), BEZIER, AUTO) {}
// Constructor which also creates a Point, setting X,Y, and interpolation.
Point::Point(float x, float y, InterpolationType interpolation)
- : Point::Point(Coordinate(x, y), interpolation, AUTO) {};
+ : Point::Point(Coordinate(x, y), interpolation, AUTO) {}
// Direct Coordinate-accepting constructors
-Point::Point(const Coordinate& co) : Point::Point(co, BEZIER, AUTO) {};
+Point::Point(const Coordinate& co) : Point::Point(co, BEZIER, AUTO) {}
Point::Point(const Coordinate& co, InterpolationType interpolation)
- : Point::Point(co, interpolation, AUTO) {};
+ : Point::Point(co, interpolation, AUTO) {}
Point::Point(const Coordinate& co, InterpolationType interpolation, HandleType handle_type) :
co(co), interpolation(interpolation), handle_type(handle_type) {
diff --git a/src/Point.h b/src/Point.h
index 1795c469..118e6d39 100644
--- a/src/Point.h
+++ b/src/Point.h
@@ -37,95 +37,118 @@
namespace openshot
{
- /**
- * @brief This controls how a Keyframe uses this point to interpolate between two points.
- *
- * Bezier is a smooth curve. Linear is a straight line. Constant is a jump from the
- * previous point to this one.
- */
- enum InterpolationType {
- BEZIER, ///< Bezier curves are quadratic curves, which create a smooth curve.
- LINEAR, ///< Linear curves are angular, straight lines between two points.
- CONSTANT ///< Constant curves jump from their previous position to a new one (with no interpolation).
- };
+/**
+ * @brief This controls how a Keyframe uses this point to interpolate between two points.
+ *
+ * Bezier is a smooth curve. Linear is a straight line. Constant is a jump from the
+ * previous point to this one.
+ */
+enum InterpolationType {
+ BEZIER, ///< Bezier curves are quadratic curves, which create a smooth curve.
+ LINEAR, ///< Linear curves are angular, straight lines between two points.
+ CONSTANT ///< Constant curves jump from their previous position to a new one (with no interpolation).
+};
- /**
- * @brief When BEZIER interpolation is used, the point's left and right handles are used
- * to influence the direction of the curve.
- *
- * AUTO will try and adjust the handles automatically, to achieve the smoothest curves.
- * MANUAL will leave the handles alone, making it the responsibility of the user to set them.
- */
- enum HandleType {
- AUTO, ///< Automatically adjust the handles to achieve the smoothest curve
- MANUAL ///< Do not automatically adjust handles (set them manually)
- };
+/**
+ * @brief When BEZIER interpolation is used, the point's left and right handles are used
+ * to influence the direction of the curve.
+ *
+ * AUTO will try and adjust the handles automatically, to achieve the smoothest curves.
+ * MANUAL will leave the handles alone, making it the responsibility of the user to set them.
+ */
+enum HandleType {
+ AUTO, ///< Automatically adjust the handles to achieve the smoothest curve
+ MANUAL ///< Do not automatically adjust handles (set them manually)
+};
- /**
- * @brief A Point is the basic building block of a key-frame curve.
- *
- * Points have a primary coordinate and a left and right handle coordinate.
- * The handles are used to influence the direction of the curve as it
- * moves between the primary coordinate and the next primary coordinate when the
- * interpolation mode is BEZIER. When using LINEAR or CONSTANT, the handles are
- * ignored.
- *
- * Please see the following Example Code:
- * \code
- * Coordinate c1(3,9);
- * Point p1(c1, BEZIER);
- * assert(c1.X == 3);
- * assert(c1.Y == 9);
- *
- * \endcode
- */
- class Point {
- public:
- Coordinate co; ///< This is the primary coordinate
- Coordinate handle_left; ///< This is the left handle coordinate (in percentages from 0 to 1)
- Coordinate handle_right; ///< This is the right handle coordinate (in percentages from 0 to 1)
- InterpolationType interpolation; ///< This is the interpolation mode
- HandleType handle_type; ///< This is the handle mode
+/**
+ * @brief A Point is the basic building block of a key-frame curve.
+ *
+ * Points have a primary coordinate and a left and right handle coordinate.
+ * The handles are used to influence the direction of the curve as it
+ * moves between the primary coordinate and the next primary coordinate when the
+ * interpolation mode is BEZIER. When using LINEAR or CONSTANT, the handles are
+ * ignored.
+ *
+ * Please see the following Example Code:
+ * \code
+ * Coordinate c1(3,9);
+ * Point p1(c1, BEZIER);
+ * assert(c1.X == 3);
+ * assert(c1.Y == 9);
+ *
+ * \endcode
+ */
+class Point {
+public:
+ Coordinate co; ///< This is the primary coordinate
+ Coordinate handle_left; ///< This is the left handle coordinate (in percentages from 0 to 1)
+ Coordinate handle_right; ///< This is the right handle coordinate (in percentages from 0 to 1)
+ InterpolationType interpolation; ///< This is the interpolation mode
+ HandleType handle_type; ///< This is the handle mode
- /// Default constructor (defaults to 1,0)
- Point();
+ /// Default constructor (defaults to 1,0)
+ Point();
- /// Constructor which creates a single coordinate at X=1
- Point(float y);
+ /// Constructor which creates a single coordinate at X=1
+ Point(float y);
- /// Constructor which also creates a Point and sets the X and Y of the Point.
- Point(float x, float y);
+ /// Constructor which also creates a Point and sets the X and Y of the Point.
+ Point(float x, float y);
- /// Constructor which also creates a Point and sets the X,Y, and interpolation of the Point.
- Point(float x, float y, InterpolationType interpolation);
+ /// Constructor which also creates a Point and sets the X,Y, and interpolation of the Point.
+ Point(float x, float y, InterpolationType interpolation);
- /// Constructor which takes a coordinate
- Point(const Coordinate& co);
+ /// Constructor which takes a coordinate
+ Point(const Coordinate& co);
- /// Constructor which takes a coordinate and interpolation mode
- Point(const Coordinate& co, InterpolationType interpolation);
+ /// Constructor which takes a coordinate and interpolation mode
+ Point(const Coordinate& co, InterpolationType interpolation);
- /// Constructor which takes a coordinate, interpolation mode, and handle type
- Point(const Coordinate& co, InterpolationType interpolation, HandleType handle_type);
+ /// Constructor which takes a coordinate, interpolation mode, and handle type
+ Point(const Coordinate& co, InterpolationType interpolation, HandleType handle_type);
- /// Set the left and right handles to a percent of the primary coordinate (0 to 1)
- /// Defaults to a smooth curve (Ease in and out)
- void Initialize_Handles();
+ /// Set the left and right handles to a percent of the primary coordinate (0 to 1)
+ /// Defaults to a smooth curve (Ease in and out)
+ void Initialize_Handles();
- /// Set the left handle to a percent of the primary coordinate (0 to 1)
- void Initialize_LeftHandle(float x, float y);
+ /// Set the left handle to a percent of the primary coordinate (0 to 1)
+ void Initialize_LeftHandle(float x, float y);
- /// Set the right handle to a percent of the primary coordinate (0 to 1)
- void Initialize_RightHandle(float x, float y);
+ /// Set the right handle to a percent of the primary coordinate (0 to 1)
+ void Initialize_RightHandle(float x, float y);
- // Get and Set JSON methods
- std::string Json() const; ///< Generate JSON string of this object
- Json::Value JsonValue() const; ///< Generate Json::Value for this object
- void SetJson(const std::string value); ///< Load JSON string into this object
- void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
+ // Get and Set JSON methods
+ std::string Json() const; ///< Generate JSON string of this object
+ Json::Value JsonValue() const; ///< Generate Json::Value for this object
+ void SetJson(const std::string value); ///< Load JSON string into this object
+ void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
- };
+};
+// Stream output operator for openshot::Point
+template
+std::basic_ostream&
+operator<<(std::basic_ostream& o, const openshot::Point& p) {
+ std::basic_ostringstream s;
+ s.flags(o.flags());
+ s.imbue(o.getloc());
+ s.precision(o.precision());
+ s << "co" << p.co;
+ switch(p.interpolation) {
+ case(InterpolationType::LINEAR):
+ s << " LINEAR";
+ break;
+ case(InterpolationType::CONSTANT):
+ s << " CONSTANT";
+ break;
+ case(InterpolationType::BEZIER):
+ s << " BEZIER[L" << p.handle_left << ",R" << p.handle_right << ']';
+ break;
+ }
+ return o << s.str();
}
+} // namespace openshot
+
#endif
diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp
index 29243ed4..1800a0f2 100644
--- a/src/QtImageReader.cpp
+++ b/src/QtImageReader.cpp
@@ -68,10 +68,14 @@ void QtImageReader::Open()
if (!is_open)
{
bool loaded = false;
+ QSize default_svg_size;
// Check for SVG files and rasterizing them to QImages
if (path.toLower().endsWith(".svg") || path.toLower().endsWith(".svgz")) {
- loaded = load_svg_path(path);
+ default_svg_size = load_svg_path(path);
+ if (!default_svg_size.isEmpty()) {
+ loaded = true;
+ }
}
if (!loaded) {
@@ -100,8 +104,15 @@ void QtImageReader::Open()
info.file_size = image->byteCount();
#endif
info.vcodec = "QImage";
- info.width = image->width();
- info.height = image->height();
+ if (!default_svg_size.isEmpty()) {
+ // Use default SVG size (if detected)
+ info.width = default_svg_size.width();
+ info.height = default_svg_size.height();
+ } else {
+ // Use Qt Image size as a fallback
+ info.width = image->width();
+ info.height = image->height();
+ }
info.pixel_ratio.num = 1;
info.pixel_ratio.den = 1;
info.duration = 60 * 60 * 1; // 1 hour duration
@@ -196,7 +207,7 @@ QSize QtImageReader::calculate_max_size() {
int max_width = info.width;
int max_height = info.height;
if (max_width == 0 || max_height == 0) {
- // If no size determined yet, default to 4K
+ // If no size determined yet
max_width = 1920;
max_height = 1080;
}
@@ -227,11 +238,23 @@ QSize QtImageReader::calculate_max_size() {
if (width_size.width() >= max_width && width_size.height() >= max_height) {
max_width = std::max(max_width, width_size.width());
max_height = std::max(max_height, width_size.height());
- }
- else {
+ } else {
max_width = std::max(max_width, height_size.width());
max_height = std::max(max_height, height_size.height());
}
+ } else if (parent->scale == SCALE_NONE) {
+ // Scale images to equivalent unscaled size
+ // Since the preview window can change sizes, we want to always
+ // scale against the ratio of original image size to timeline size
+ float preview_ratio = 1.0;
+ if (parent->ParentTimeline()) {
+ Timeline *t = (Timeline *) parent->ParentTimeline();
+ preview_ratio = t->preview_width / float(t->info.width);
+ }
+ float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
+ float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
+ max_width = info.width * max_scale_x * preview_ratio;
+ max_height = info.height * max_scale_y * preview_ratio;
}
}
@@ -240,8 +263,9 @@ QSize QtImageReader::calculate_max_size() {
}
// Load an SVG file with Resvg or fallback with Qt
-bool QtImageReader::load_svg_path(QString) {
+QSize QtImageReader::load_svg_path(QString) {
bool loaded = false;
+ QSize default_size(0,0);
// Calculate max image size
QSize current_max_size = calculate_max_size();
@@ -250,8 +274,12 @@ bool QtImageReader::load_svg_path(QString) {
// Use libresvg for parsing/rasterizing SVG
ResvgRenderer renderer(path);
if (renderer.isValid()) {
+ // Set default SVG size
+ default_size.setWidth(renderer.defaultSize().width());
+ default_size.setHeight(renderer.defaultSize().height());
+
// Scale SVG size to keep aspect ratio, and fill the max_size as best as possible
- QSize svg_size(renderer.defaultSize().width(), renderer.defaultSize().height());
+ QSize svg_size(default_size.width(), default_size.height());
svg_size.scale(current_max_size.width(), current_max_size.height(), Qt::KeepAspectRatio);
// Load SVG at max size
@@ -269,22 +297,28 @@ bool QtImageReader::load_svg_path(QString) {
image = std::make_shared();
loaded = image->load(path);
- if (loaded && (image->width() < current_max_size.width() || image->height() < current_max_size.height())) {
- // Load SVG into larger/project size (so image is not blurry)
- QSize svg_size = image->size().scaled(current_max_size.width(), current_max_size.height(), Qt::KeepAspectRatio);
- if (QCoreApplication::instance()) {
- // Requires QApplication to be running (for QPixmap support)
- // Re-rasterize SVG image to max size
- image = std::make_shared(QIcon(path).pixmap(svg_size).toImage());
- } else {
- // Scale image without re-rasterizing it (due to lack of QApplication)
- image = std::make_shared(image->scaled(
- svg_size.width(), svg_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation));
+ if (loaded) {
+ // Set default SVG size
+ default_size.setWidth(image->width());
+ default_size.setHeight(image->height());
+
+ if (image->width() < current_max_size.width() || image->height() < current_max_size.height()) {
+ // Load SVG into larger/project size (so image is not blurry)
+ QSize svg_size = image->size().scaled(current_max_size.width(), current_max_size.height(), Qt::KeepAspectRatio);
+ if (QCoreApplication::instance()) {
+ // Requires QApplication to be running (for QPixmap support)
+ // Re-rasterize SVG image to max size
+ image = std::make_shared(QIcon(path).pixmap(svg_size).toImage());
+ } else {
+ // Scale image without re-rasterizing it (due to lack of QApplication)
+ image = std::make_shared(image->scaled(
+ svg_size.width(), svg_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation));
+ }
}
}
}
- return loaded;
+ return default_size;
}
// Generate JSON string of this object
diff --git a/src/QtImageReader.h b/src/QtImageReader.h
index 0dc359b1..687e85e1 100644
--- a/src/QtImageReader.h
+++ b/src/QtImageReader.h
@@ -77,7 +77,7 @@ namespace openshot
///
/// @returns Success as a boolean
/// @param path The file path of the SVG file
- bool load_svg_path(QString path);
+ QSize load_svg_path(QString path);
/// Calculate the max_size QSize, based on parent timeline and parent clip settings
QSize calculate_max_size();
diff --git a/src/QtPlayer.cpp b/src/QtPlayer.cpp
index c43cdf80..f3697f71 100644
--- a/src/QtPlayer.cpp
+++ b/src/QtPlayer.cpp
@@ -119,7 +119,7 @@ namespace openshot
void QtPlayer::Play()
{
- // Set mode to playing, and speed to normal
+ // Set mode to playing, and speed to normal
mode = PLAYBACK_PLAY;
Speed(1);
diff --git a/src/ReaderBase.cpp b/src/ReaderBase.cpp
index 127fefbe..63b5cada 100644
--- a/src/ReaderBase.cpp
+++ b/src/ReaderBase.cpp
@@ -28,8 +28,14 @@
* along with OpenShot Library. If not, see .
*/
+#include
+#include
+#include
+
#include "ReaderBase.h"
+#include "Json.h"
+
using namespace openshot;
/// Constructor for the base reader, where many things are initialized.
@@ -67,49 +73,49 @@ ReaderBase::ReaderBase()
}
// Display file information
-void ReaderBase::DisplayInfo() {
- std::cout << std::fixed << std::setprecision(2) << std::boolalpha;
- std::cout << "----------------------------" << std::endl;
- std::cout << "----- File Information -----" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--> Has Video: " << info.has_video << std::endl;
- std::cout << "--> Has Audio: " << info.has_audio << std::endl;
- std::cout << "--> Has Single Image: " << info.has_single_image << std::endl;
- std::cout << "--> Duration: " << info.duration << " Seconds" << std::endl;
- std::cout << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "----- Video Attributes -----" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--> Width: " << info.width << std::endl;
- std::cout << "--> Height: " << info.height << std::endl;
- std::cout << "--> Pixel Format: " << info.pixel_format << std::endl;
- std::cout << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl;
- std::cout << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl;
- std::cout << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl;
- std::cout << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl;
- std::cout << "--> Video Codec: " << info.vcodec << std::endl;
- std::cout << "--> Video Length: " << info.video_length << " Frames" << std::endl;
- std::cout << "--> Video Stream Index: " << info.video_stream_index << std::endl;
- std::cout << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl;
- std::cout << "--> Interlaced: " << info.interlaced_frame << std::endl;
- std::cout << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "----- Audio Attributes -----" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--> Audio Codec: " << info.acodec << std::endl;
- std::cout << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl;
- std::cout << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl;
- std::cout << "--> # of Channels: " << info.channels << std::endl;
- std::cout << "--> Channel Layout: " << info.channel_layout << std::endl;
- std::cout << "--> Audio Stream Index: " << info.audio_stream_index << std::endl;
- std::cout << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--------- Metadata ---------" << std::endl;
- std::cout << "----------------------------" << std::endl;
+void ReaderBase::DisplayInfo(std::ostream* out) {
+ *out << std::fixed << std::setprecision(2) << std::boolalpha;
+ *out << "----------------------------" << std::endl;
+ *out << "----- File Information -----" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--> Has Video: " << info.has_video << std::endl;
+ *out << "--> Has Audio: " << info.has_audio << std::endl;
+ *out << "--> Has Single Image: " << info.has_single_image << std::endl;
+ *out << "--> Duration: " << info.duration << " Seconds" << std::endl;
+ *out << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "----- Video Attributes -----" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--> Width: " << info.width << std::endl;
+ *out << "--> Height: " << info.height << std::endl;
+ *out << "--> Pixel Format: " << info.pixel_format << std::endl;
+ *out << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl;
+ *out << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl;
+ *out << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl;
+ *out << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl;
+ *out << "--> Video Codec: " << info.vcodec << std::endl;
+ *out << "--> Video Length: " << info.video_length << " Frames" << std::endl;
+ *out << "--> Video Stream Index: " << info.video_stream_index << std::endl;
+ *out << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl;
+ *out << "--> Interlaced: " << info.interlaced_frame << std::endl;
+ *out << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "----- Audio Attributes -----" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--> Audio Codec: " << info.acodec << std::endl;
+ *out << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl;
+ *out << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl;
+ *out << "--> # of Channels: " << info.channels << std::endl;
+ *out << "--> Channel Layout: " << info.channel_layout << std::endl;
+ *out << "--> Audio Stream Index: " << info.audio_stream_index << std::endl;
+ *out << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--------- Metadata ---------" << std::endl;
+ *out << "----------------------------" << std::endl;
// Iterate through metadata
for (auto it : info.metadata)
- std::cout << "--> " << it.first << ": " << it.second << std::endl;
+ *out << "--> " << it.first << ": " << it.second << std::endl;
}
// Generate Json::Value for this object
diff --git a/src/ReaderBase.h b/src/ReaderBase.h
index 7b7847a8..4d2d3afa 100644
--- a/src/ReaderBase.h
+++ b/src/ReaderBase.h
@@ -31,11 +31,9 @@
#ifndef OPENSHOT_READER_BASE_H
#define OPENSHOT_READER_BASE_H
-#include
-#include
#include
-#include
-#include
+#include
+
#include "CacheMemory.h"
#include "ChannelLayouts.h"
#include "ClipBase.h"
@@ -120,7 +118,7 @@ namespace openshot
virtual void Close() = 0;
/// Display file information in the standard output stream (stdout)
- void DisplayInfo();
+ void DisplayInfo(std::ostream* out=&std::cout);
/// Get the cache object used by this reader (note: not all readers use cache)
virtual openshot::CacheBase* GetCache() = 0;
diff --git a/src/Timeline.cpp b/src/Timeline.cpp
index aea9595c..347733bd 100644
--- a/src/Timeline.cpp
+++ b/src/Timeline.cpp
@@ -80,11 +80,12 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha
info.acodec = "openshot::timeline";
info.vcodec = "openshot::timeline";
- // Init cache
- final_cache = new CacheMemory();
-
// Init max image size
SetMaxSize(info.width, info.height);
+
+ // Init cache
+ final_cache = new CacheMemory();
+ final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, info.width, info.height, info.sample_rate, info.channels);
}
// Delegating constructor that copies parameters from a provided ReaderInfo
@@ -95,7 +96,7 @@ Timeline::Timeline(const ReaderInfo info) : Timeline::Timeline(
// Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline)
Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths) :
is_open(false), auto_map_clips(true), managed_cache(true), path(projectPath),
- max_concurrent_frames(OPEN_MP_NUM_PROCESSORS) {
+ max_concurrent_frames(OPEN_MP_NUM_PROCESSORS) {
// Create CrashHandler and Attach (incase of errors)
CrashHandler::Instance();
@@ -212,11 +213,12 @@ Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths)
info.has_video = true;
info.has_audio = true;
- // Init cache
- final_cache = new CacheMemory();
-
// Init max image size
SetMaxSize(info.width, info.height);
+
+ // Init cache
+ final_cache = new CacheMemory();
+ final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, info.width, info.height, info.sample_rate, info.channels);
}
Timeline::~Timeline() {
@@ -1063,8 +1065,8 @@ void Timeline::SetJsonValue(const Json::Value root) {
// When a clip is attached to an object, it searches for the object
// on it's parent timeline. Setting the parent timeline of the clip here
// allows attaching it to an object when exporting the project (because)
- // the exporter script initializes the clip and it's effects
- // before setting it's parent timeline.
+ // the exporter script initializes the clip and it's effects
+ // before setting its parent timeline.
c->ParentTimeline(this);
// Load Json into Clip
@@ -1495,8 +1497,8 @@ void Timeline::apply_json_to_timeline(Json::Value change) {
// Clear all caches
void Timeline::ClearAllCache() {
- // Get lock (prevent getting frames while this happens)
- const GenericScopedLock lock(getFrameCriticalSection);
+ // Get lock (prevent getting frames while this happens)
+ const GenericScopedLock lock(getFrameCriticalSection);
// Clear primary cache
final_cache->Clear();
@@ -1509,11 +1511,10 @@ void Timeline::ClearAllCache() {
// Clear nested Reader (if any)
if (clip->Reader()->Name() == "FrameMapper") {
- FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
- if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
- nested_reader->Reader()->GetCache()->Clear();
- }
-
+ FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
+ if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
+ nested_reader->Reader()->GetCache()->Clear();
+ }
}
}
@@ -1530,7 +1531,4 @@ void Timeline::SetMaxSize(int width, int height) {
// Update preview settings
preview_width = display_ratio_size.width();
preview_height = display_ratio_size.height();
-
- // Update timeline cache size
- final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, preview_width, preview_height, info.sample_rate, info.channels);
}
diff --git a/src/TrackedObjectBBox.cpp b/src/TrackedObjectBBox.cpp
index 451cde04..e577e4d0 100644
--- a/src/TrackedObjectBBox.cpp
+++ b/src/TrackedObjectBBox.cpp
@@ -395,10 +395,13 @@ void TrackedObjectBBox::SetJsonValue(const Json::Value root)
// Set the protobuf data path by the given JSON object
if (!root["protobuf_data_path"].isNull())
protobufDataPath = root["protobuf_data_path"].asString();
+
// Set the id of the child clip
- if (!root["child_clip_id"].isNull() && root["child_clip_id"].asString() != ""){
+ if (!root["child_clip_id"].isNull() && root["child_clip_id"].asString() != "" && root["child_clip_id"].asString() != Id()){
Clip* parentClip = (Clip *) ParentClip();
- ChildClipId(root["child_clip_id"].asString());
+
+ if (root["child_clip_id"].asString() != parentClip->Id())
+ ChildClipId(root["child_clip_id"].asString());
}
// Set the Keyframes by the given JSON object
diff --git a/src/WriterBase.cpp b/src/WriterBase.cpp
index fff93988..8faab981 100644
--- a/src/WriterBase.cpp
+++ b/src/WriterBase.cpp
@@ -28,6 +28,9 @@
* along with OpenShot Library. If not, see .
*/
+#include
+#include
+
#include "WriterBase.h"
#include "Exceptions.h"
@@ -100,43 +103,43 @@ void WriterBase::CopyReaderInfo(ReaderBase* reader)
}
// Display file information
-void WriterBase::DisplayInfo() {
- std::cout << std::fixed << std::setprecision(2) << std::boolalpha;
- std::cout << "----------------------------" << std::endl;
- std::cout << "----- File Information -----" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--> Has Video: " << info.has_video << std::endl;
- std::cout << "--> Has Audio: " << info.has_audio << std::endl;
- std::cout << "--> Has Single Image: " << info.has_single_image << std::endl;
- std::cout << "--> Duration: " << info.duration << " Seconds" << std::endl;
- std::cout << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "----- Video Attributes -----" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--> Width: " << info.width << std::endl;
- std::cout << "--> Height: " << info.height << std::endl;
- std::cout << "--> Pixel Format: " << info.pixel_format << std::endl;
- std::cout << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl;
- std::cout << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl;
- std::cout << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl;
- std::cout << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl;
- std::cout << "--> Video Codec: " << info.vcodec << std::endl;
- std::cout << "--> Video Length: " << info.video_length << " Frames" << std::endl;
- std::cout << "--> Video Stream Index: " << info.video_stream_index << std::endl;
- std::cout << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl;
- std::cout << "--> Interlaced: " << info.interlaced_frame << std::endl;
- std::cout << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "----- Audio Attributes -----" << std::endl;
- std::cout << "----------------------------" << std::endl;
- std::cout << "--> Audio Codec: " << info.acodec << std::endl;
- std::cout << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl;
- std::cout << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl;
- std::cout << "--> # of Channels: " << info.channels << std::endl;
- std::cout << "--> Channel Layout: " << info.channel_layout << std::endl;
- std::cout << "--> Audio Stream Index: " << info.audio_stream_index << std::endl;
- std::cout << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl;
- std::cout << "----------------------------" << std::endl;
+void WriterBase::DisplayInfo(std::ostream* out) {
+ *out << std::fixed << std::setprecision(2) << std::boolalpha;
+ *out << "----------------------------" << std::endl;
+ *out << "----- File Information -----" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--> Has Video: " << info.has_video << std::endl;
+ *out << "--> Has Audio: " << info.has_audio << std::endl;
+ *out << "--> Has Single Image: " << info.has_single_image << std::endl;
+ *out << "--> Duration: " << info.duration << " Seconds" << std::endl;
+ *out << "--> File Size: " << double(info.file_size) / 1024 / 1024 << " MB" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "----- Video Attributes -----" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--> Width: " << info.width << std::endl;
+ *out << "--> Height: " << info.height << std::endl;
+ *out << "--> Pixel Format: " << info.pixel_format << std::endl;
+ *out << "--> Frames Per Second: " << info.fps.ToDouble() << " (" << info.fps.num << "/" << info.fps.den << ")" << std::endl;
+ *out << "--> Video Bit Rate: " << info.video_bit_rate/1000 << " kb/s" << std::endl;
+ *out << "--> Pixel Ratio: " << info.pixel_ratio.ToDouble() << " (" << info.pixel_ratio.num << "/" << info.pixel_ratio.den << ")" << std::endl;
+ *out << "--> Display Aspect Ratio: " << info.display_ratio.ToDouble() << " (" << info.display_ratio.num << "/" << info.display_ratio.den << ")" << std::endl;
+ *out << "--> Video Codec: " << info.vcodec << std::endl;
+ *out << "--> Video Length: " << info.video_length << " Frames" << std::endl;
+ *out << "--> Video Stream Index: " << info.video_stream_index << std::endl;
+ *out << "--> Video Timebase: " << info.video_timebase.ToDouble() << " (" << info.video_timebase.num << "/" << info.video_timebase.den << ")" << std::endl;
+ *out << "--> Interlaced: " << info.interlaced_frame << std::endl;
+ *out << "--> Interlaced: Top Field First: " << info.top_field_first << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "----- Audio Attributes -----" << std::endl;
+ *out << "----------------------------" << std::endl;
+ *out << "--> Audio Codec: " << info.acodec << std::endl;
+ *out << "--> Audio Bit Rate: " << info.audio_bit_rate/1000 << " kb/s" << std::endl;
+ *out << "--> Sample Rate: " << info.sample_rate << " Hz" << std::endl;
+ *out << "--> # of Channels: " << info.channels << std::endl;
+ *out << "--> Channel Layout: " << info.channel_layout << std::endl;
+ *out << "--> Audio Stream Index: " << info.audio_stream_index << std::endl;
+ *out << "--> Audio Timebase: " << info.audio_timebase.ToDouble() << " (" << info.audio_timebase.num << "/" << info.audio_timebase.den << ")" << std::endl;
+ *out << "----------------------------" << std::endl;
}
// Generate JSON string of this object
diff --git a/src/WriterBase.h b/src/WriterBase.h
index d18f329d..3939ca6e 100644
--- a/src/WriterBase.h
+++ b/src/WriterBase.h
@@ -32,7 +32,7 @@
#define OPENSHOT_WRITER_BASE_H
#include
-#include
+
#include "ChannelLayouts.h"
#include "Fraction.h"
#include "Frame.h"
@@ -113,7 +113,7 @@ namespace openshot
void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
/// Display file information in the standard output stream (stdout)
- void DisplayInfo();
+ void DisplayInfo(std::ostream* out=&std::cout);
/// Open the writer (and start initializing streams)
virtual void Open() = 0;
diff --git a/src/audio_effects/Compressor.cpp b/src/audio_effects/Compressor.cpp
new file mode 100644
index 00000000..b0cb34e0
--- /dev/null
+++ b/src/audio_effects/Compressor.cpp
@@ -0,0 +1,224 @@
+/**
+ * @file
+ * @brief Source file for Compressor audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Compressor.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+Compressor::Compressor() : threshold(-10), ratio(1), attack(1), release(1), makeup_gain(1), bypass(false) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Compressor::Compressor(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass) :
+ threshold(new_threshold), ratio(new_ratio), attack(new_attack), release(new_release), makeup_gain(new_makeup_gain), bypass(new_bypass)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Compressor::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Compressor";
+ info.name = "Compressor";
+ info.description = "Reduce the volume of loud sounds or amplify quiet sounds.";
+ info.has_audio = true;
+ info.has_video = false;
+
+ input_level = 0.0f;
+ yl_prev = 0.0f;
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Compressor::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ // Adding Compressor
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+
+ mixed_down_input.setSize(1, num_samples);
+ inverse_sample_rate = 1.0f / frame->SampleRate();
+ inverseE = 1.0f / M_E;
+
+ if ((bool)bypass.GetValue(frame_number))
+ return frame;
+
+ mixed_down_input.clear();
+
+ for (int channel = 0; channel < num_input_channels; ++channel)
+ mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
+
+ for (int sample = 0; sample < num_samples; ++sample) {
+ float T = threshold.GetValue(frame_number);
+ float R = ratio.GetValue(frame_number);
+ float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
+ float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
+ float gain = makeup_gain.GetValue(frame_number);
+ float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f);
+
+ input_level = input_squared;
+
+ xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
+
+ if (xg < T)
+ yg = xg;
+ else
+ yg = T + (xg - T) / R;
+
+ xl = xg - yg;
+
+ if (xl > yl_prev)
+ yl = alphaA * yl_prev + (1.0f - alphaA) * xl;
+ else
+ yl = alphaR * yl_prev + (1.0f - alphaR) * xl;
+
+ control = powf (10.0f, (gain - yl) * 0.05f);
+ yl_prev = yl;
+
+ for (int channel = 0; channel < num_input_channels; ++channel) {
+ float new_value = frame->audio->getSample(channel, sample)*control;
+ frame->audio->setSample(channel, sample, new_value);
+ }
+ }
+
+ for (int channel = num_input_channels; channel < num_output_channels; ++channel)
+ frame->audio->clear(channel, 0, num_samples);
+
+ // return the modified frame
+ return frame;
+}
+
+float Compressor::calculateAttackOrRelease(float value)
+{
+ if (value == 0.0f)
+ return 0.0f;
+ else
+ return pow (inverseE, inverse_sample_rate / value);
+}
+
+// Generate JSON string of this object
+std::string Compressor::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value Compressor::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["threshold"] = threshold.JsonValue();
+ root["ratio"] = ratio.JsonValue();
+ root["attack"] = attack.JsonValue();
+ root["release"] = release.JsonValue();
+ root["makeup_gain"] = makeup_gain.JsonValue();
+ root["bypass"] = bypass.JsonValue();
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Compressor::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Compressor::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["threshold"].isNull())
+ threshold.SetJsonValue(root["threshold"]);
+
+ if (!root["ratio"].isNull())
+ ratio.SetJsonValue(root["ratio"]);
+
+ if (!root["attack"].isNull())
+ attack.SetJsonValue(root["attack"]);
+
+ if (!root["release"].isNull())
+ release.SetJsonValue(root["release"]);
+
+ if (!root["makeup_gain"].isNull())
+ makeup_gain.SetJsonValue(root["makeup_gain"]);
+
+ if (!root["bypass"].isNull())
+ bypass.SetJsonValue(root["bypass"]);
+}
+
+// Get all properties for a specific frame
+std::string Compressor::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 0, false, requested_frame);
+ root["ratio"] = add_property_json("Ratio", ratio.GetValue(requested_frame), "float", "", &ratio, 1, 100, false, requested_frame);
+ root["attack"] = add_property_json("Attack (ms)", attack.GetValue(requested_frame), "float", "", &attack, 0.1, 100, false, requested_frame);
+ root["release"] = add_property_json("Release (ms)", release.GetValue(requested_frame), "float", "", &release, 10, 1000, false, requested_frame);
+ root["makeup_gain"] = add_property_json("Makeup gain (dB)", makeup_gain.GetValue(requested_frame), "float", "", &makeup_gain, -12, 12, false, requested_frame);
+ root["bypass"] = add_property_json("Bypass", bypass.GetValue(requested_frame), "bool", "", &bypass, 0, 1, false, requested_frame);
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/Compressor.h b/src/audio_effects/Compressor.h
new file mode 100644
index 00000000..7dcdb252
--- /dev/null
+++ b/src/audio_effects/Compressor.h
@@ -0,0 +1,125 @@
+/**
+ * @file
+ * @brief Header file for Compressor audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_COMPRESSOR_AUDIO_EFFECT_H
+#define OPENSHOT_COMPRESSOR_AUDIO_EFFECT_H
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+#include "../Enums.h"
+
+#include
+#include
+#include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a compressor into the audio
+ *
+ */
+ class Compressor : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+
+ public:
+ Keyframe threshold;
+ Keyframe ratio;
+ Keyframe attack;
+ Keyframe release;
+ Keyframe makeup_gain;
+ Keyframe bypass;
+
+ juce::AudioSampleBuffer mixed_down_input;
+ float xl;
+ float yl;
+ float xg;
+ float yg;
+ float control;
+
+ float input_level;
+ float yl_prev;
+
+ float inverse_sample_rate;
+ float inverseE;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Compressor();
+
+ /// Default constructor
+ ///
+ /// @param new_level The audio default Compressor level (between 1 and 100)
+ Compressor(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass);
+
+ float calculateAttackOrRelease(float value);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/Delay.cpp b/src/audio_effects/Delay.cpp
new file mode 100644
index 00000000..467ba756
--- /dev/null
+++ b/src/audio_effects/Delay.cpp
@@ -0,0 +1,190 @@
+/**
+ * @file
+ * @brief Source file for Delay audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Delay.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+Delay::Delay() : delay_time(1) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Delay::Delay(Keyframe new_delay_time) : delay_time(new_delay_time)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Delay::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Delay";
+ info.name = "Delay";
+ info.description = "Adjust the synchronism between the audio and video track.";
+ info.has_audio = true;
+ info.has_video = false;
+ initialized = false;
+}
+
+void Delay::setup(std::shared_ptr frame)
+{
+ if (!initialized)
+ {
+ const float max_delay_time = 5;
+ delay_buffer_samples = (int)(max_delay_time * (float)frame->SampleRate()) + 1;
+
+ if (delay_buffer_samples < 1)
+ delay_buffer_samples = 1;
+
+ delay_buffer_channels = frame->audio->getNumChannels();
+ delay_buffer.setSize(delay_buffer_channels, delay_buffer_samples);
+ delay_buffer.clear();
+ delay_write_position = 0;
+ initialized = true;
+ }
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Delay::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ const float delay_time_value = (float)delay_time.GetValue(frame_number)*(float)frame->SampleRate();
+ int local_write_position;
+
+ setup(frame);
+
+ for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
+ {
+ float *channel_data = frame->audio->getWritePointer(channel);
+ float *delay_data = delay_buffer.getWritePointer(channel);
+ local_write_position = delay_write_position;
+
+ for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
+ {
+ const float in = (float)(channel_data[sample]);
+ float out = 0.0f;
+
+ float read_position = fmodf((float)local_write_position - delay_time_value + (float)delay_buffer_samples, delay_buffer_samples);
+ int local_read_position = floorf(read_position);
+
+ if (local_read_position != local_write_position)
+ {
+ float fraction = read_position - (float)local_read_position;
+ float delayed1 = delay_data[(local_read_position + 0)];
+ float delayed2 = delay_data[(local_read_position + 1) % delay_buffer_samples];
+ out = (float)(delayed1 + fraction * (delayed2 - delayed1));
+
+ channel_data[sample] = in + (out - in);
+ delay_data[local_write_position] = in;
+ }
+
+ if (++local_write_position >= delay_buffer_samples)
+ local_write_position -= delay_buffer_samples;
+ }
+ }
+
+ delay_write_position = local_write_position;
+
+ // return the modified frame
+ return frame;
+}
+
+// Generate JSON string of this object
+std::string Delay::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value Delay::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["delay_time"] = delay_time.JsonValue();
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Delay::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Delay::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["delay_time"].isNull())
+ delay_time.SetJsonValue(root["delay_time"]);
+}
+
+// Get all properties for a specific frame
+std::string Delay::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["delay_time"] = add_property_json("Delay Time", delay_time.GetValue(requested_frame), "float", "", &delay_time, 0, 5, false, requested_frame);
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/Delay.h b/src/audio_effects/Delay.h
new file mode 100644
index 00000000..a693c292
--- /dev/null
+++ b/src/audio_effects/Delay.h
@@ -0,0 +1,110 @@
+/**
+ * @file
+ * @brief Header file for Delay audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_DELAY_AUDIO_EFFECT_H
+#define OPENSHOT_DELAY_AUDIO_EFFECT_H
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+
+#include
+#include
+#include
+#include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a delay into the audio
+ *
+ */
+ class Delay : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+ public:
+ Keyframe delay_time;
+
+ juce::AudioSampleBuffer delay_buffer;
+ int delay_buffer_samples;
+ int delay_buffer_channels;
+ int delay_write_position;
+ bool initialized;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Delay();
+
+ /// Default constructor
+ Delay(Keyframe new_delay_time);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ void setup(std::shared_ptr frame);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp
new file mode 100644
index 00000000..1f4d58b6
--- /dev/null
+++ b/src/audio_effects/Distortion.cpp
@@ -0,0 +1,261 @@
+/**
+ * @file
+ * @brief Source file for Distortion audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Distortion.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+Distortion::Distortion() : distortion_type(HARD_CLIPPING), input_gain(10), output_gain(-10), tone(5) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Distortion::Distortion(openshot::DistortionType new_distortion_type, Keyframe new_input_gain, Keyframe new_output_gain, Keyframe new_tone) :
+ distortion_type(new_distortion_type), input_gain(new_input_gain), output_gain(new_output_gain), tone(new_tone)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Distortion::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Distortion";
+ info.name = "Distortion";
+ info.description = "Alter the audio by clipping the signal.";
+ info.has_audio = true;
+ info.has_video = false;
+}
+
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Distortion::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ filters.clear();
+
+ for (int i = 0; i < frame->audio->getNumChannels(); ++i) {
+ Filter* filter;
+ filters.add (filter = new Filter());
+ }
+
+ updateFilters(frame_number);
+
+ // Add distortion
+ for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
+ {
+ auto *channel_data = frame->audio->getWritePointer(channel);
+ float out;
+
+ for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
+ {
+
+ const int input_gain_value = (int)input_gain.GetValue(frame_number);
+ const int output_gain_value = (int)output_gain.GetValue(frame_number);
+ const float in = channel_data[sample]*powf(10.0f, input_gain_value * 0.05f);
+
+ // Use the current distortion type
+ switch (distortion_type) {
+
+ case HARD_CLIPPING: {
+ float threshold = 0.5f;
+ if (in > threshold)
+ out = threshold;
+ else if (in < -threshold)
+ out = -threshold;
+ else
+ out = in;
+ break;
+ }
+
+ case SOFT_CLIPPING: {
+ float threshold1 = 1.0f / 3.0f;
+ float threshold2 = 2.0f / 3.0f;
+ if (in > threshold2)
+ out = 1.0f;
+ else if (in > threshold1)
+ out = 1.0f - powf (2.0f - 3.0f * in, 2.0f) / 3.0f;
+ else if (in < -threshold2)
+ out = -1.0f;
+ else if (in < -threshold1)
+ out = -1.0f + powf (2.0f + 3.0f * in, 2.0f) / 3.0f;
+ else
+ out = 2.0f * in;
+ out *= 0.5f;
+ break;
+ }
+
+ case EXPONENTIAL: {
+ if (in > 0.0f)
+ out = 1.0f - expf (-in);
+ else
+ out = -1.0f + expf (in);
+ break;
+ }
+
+ case FULL_WAVE_RECTIFIER: {
+ out = fabsf (in);
+ break;
+ }
+
+ case HALF_WAVE_RECTIFIER: {
+ if (in > 0.0f)
+ out = in;
+ else
+ out = 0.0f;
+ break;
+ }
+ }
+
+ float filtered = filters[channel]->processSingleSampleRaw(out);
+ channel_data[sample] = filtered*powf(10.0f, output_gain_value * 0.05f);
+ }
+ }
+
+ // return the modified frame
+ return frame;
+}
+
+void Distortion::updateFilters(int64_t frame_number)
+{
+ double discrete_frequency = M_PI * 0.01;
+ double gain = pow(10.0, (float)tone.GetValue(frame_number) * 0.05);
+
+ for (int i = 0; i < filters.size(); ++i)
+ filters[i]->updateCoefficients(discrete_frequency, gain);
+}
+
+// Generate JSON string of this object
+std::string Distortion::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+void Distortion::Filter::updateCoefficients(const double discrete_frequency, const double gain)
+{
+ double tan_half_wc = tan(discrete_frequency / 2.0);
+ double sqrt_gain = sqrt(gain);
+
+ coefficients = juce::IIRCoefficients(/* b0 */ sqrt_gain * tan_half_wc + gain,
+ /* b1 */ sqrt_gain * tan_half_wc - gain,
+ /* b2 */ 0.0,
+ /* a0 */ sqrt_gain * tan_half_wc + 1.0,
+ /* a1 */ sqrt_gain * tan_half_wc - 1.0,
+ /* a2 */ 0.0);
+ setCoefficients(coefficients);
+}
+
+// Generate Json::Value for this object
+Json::Value Distortion::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["distortion_type"] = distortion_type;
+ root["input_gain"] = input_gain.JsonValue();
+ root["output_gain"] = output_gain.JsonValue();
+ root["tone"] = tone.JsonValue();
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Distortion::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Distortion::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["distortion_type"].isNull())
+ distortion_type = (DistortionType)root["distortion_type"].asInt();
+
+ if (!root["input_gain"].isNull())
+ input_gain.SetJsonValue(root["input_gain"]);
+
+ if (!root["output_gain"].isNull())
+ output_gain.SetJsonValue(root["output_gain"]);
+
+ if (!root["tone"].isNull())
+ tone.SetJsonValue(root["tone"]);
+}
+
+// Get all properties for a specific frame
+std::string Distortion::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["distortion_type"] = add_property_json("Distortion Type", distortion_type, "int", "", NULL, 0, 3, false, requested_frame);
+ root["input_gain"] = add_property_json("Input Gain (dB)", input_gain.GetValue(requested_frame), "int", "", &input_gain, -24, 24, false, requested_frame);
+ root["output_gain"] = add_property_json("Output Gain (dB)", output_gain.GetValue(requested_frame), "int", "", &output_gain, -24, 24, false, requested_frame);
+ root["tone"] = add_property_json("Tone (dB)", tone.GetValue(requested_frame), "int", "", &tone, -24, 24, false, requested_frame);
+
+ // Add distortion_type choices (dropdown style)
+ root["distortion_type"]["choices"].append(add_property_choice_json("Hard Clipping", HARD_CLIPPING, distortion_type));
+ root["distortion_type"]["choices"].append(add_property_choice_json("Soft Clipping", SOFT_CLIPPING, distortion_type));
+ root["distortion_type"]["choices"].append(add_property_choice_json("Exponential", EXPONENTIAL, distortion_type));
+ root["distortion_type"]["choices"].append(add_property_choice_json("Full Wave Rectifier", FULL_WAVE_RECTIFIER, distortion_type));
+ root["distortion_type"]["choices"].append(add_property_choice_json("Half Wave Rectifier", HALF_WAVE_RECTIFIER, distortion_type));
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/Distortion.h b/src/audio_effects/Distortion.h
new file mode 100644
index 00000000..8163a075
--- /dev/null
+++ b/src/audio_effects/Distortion.h
@@ -0,0 +1,119 @@
+/**
+ * @file
+ * @brief Header file for Distortion audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_DISTORTION_AUDIO_EFFECT_H
+#define OPENSHOT_DISTORTION_AUDIO_EFFECT_H
+#define _USE_MATH_DEFINES
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+#include "../Enums.h"
+
+#include
+#include
+#include
+// #include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a distortion into the audio
+ *
+ */
+ class Distortion : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+ public:
+ openshot::DistortionType distortion_type;
+ Keyframe input_gain;
+ Keyframe output_gain;
+ Keyframe tone;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Distortion();
+
+ /// Default constructor
+ ///
+ /// @param new_level The audio default distortion level (between 1 and 100)
+ Distortion(openshot::DistortionType new_distortion_type, Keyframe new_input_gain, Keyframe new_output_gain, Keyframe new_tone);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+
+ class Filter : public juce::IIRFilter
+ {
+ public:
+ void updateCoefficients(const double discrete_frequency, const double gain);
+ };
+
+ juce::OwnedArray filters;
+
+ void updateFilters(int64_t frame_number);
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/Echo.cpp b/src/audio_effects/Echo.cpp
new file mode 100644
index 00000000..442a5df1
--- /dev/null
+++ b/src/audio_effects/Echo.cpp
@@ -0,0 +1,200 @@
+/**
+ * @file
+ * @brief Source file for Echo audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Echo.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+Echo::Echo() : echo_time(0.1), feedback(0.5), mix(0.5) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Echo::Echo(Keyframe new_echo_time, Keyframe new_feedback, Keyframe new_mix) :
+ echo_time(new_echo_time), feedback(new_feedback), mix(new_mix)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Echo::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Echo";
+ info.name = "Echo";
+ info.description = "Reflection of sound with a delay after the direct sound.";
+ info.has_audio = true;
+ info.has_video = false;
+ initialized = false;
+}
+
+void Echo::setup(std::shared_ptr frame)
+{
+ if (!initialized)
+ {
+ const float max_echo_time = 5;
+ echo_buffer_samples = (int)(max_echo_time * (float)frame->SampleRate()) + 1;
+
+ if (echo_buffer_samples < 1)
+ echo_buffer_samples = 1;
+
+ echo_buffer_channels = frame->audio->getNumChannels();
+ echo_buffer.setSize(echo_buffer_channels, echo_buffer_samples);
+ echo_buffer.clear();
+ echo_write_position = 0;
+ initialized = true;
+ }
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Echo::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ const float echo_time_value = (float)echo_time.GetValue(frame_number)*(float)frame->SampleRate();
+ const float feedback_value = feedback.GetValue(frame_number);
+ const float mix_value = mix.GetValue(frame_number);
+ int local_write_position;
+
+ setup(frame);
+
+ for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
+ {
+ float *channel_data = frame->audio->getWritePointer(channel);
+ float *echo_data = echo_buffer.getWritePointer(channel);
+ local_write_position = echo_write_position;
+
+ for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
+ {
+ const float in = (float)(channel_data[sample]);
+ float out = 0.0f;
+
+ float read_position = fmodf((float)local_write_position - echo_time_value + (float)echo_buffer_samples, echo_buffer_samples);
+ int local_read_position = floorf(read_position);
+
+ if (local_read_position != local_write_position)
+ {
+ float fraction = read_position - (float)local_read_position;
+ float echoed1 = echo_data[(local_read_position + 0)];
+ float echoed2 = echo_data[(local_read_position + 1) % echo_buffer_samples];
+ out = (float)(echoed1 + fraction * (echoed2 - echoed1));
+ channel_data[sample] = in + mix_value*(out - in);
+ echo_data[local_write_position] = in + out*feedback_value;
+ }
+
+ if (++local_write_position >= echo_buffer_samples)
+ local_write_position -= echo_buffer_samples;
+ }
+ }
+
+ echo_write_position = local_write_position;
+
+ // return the modified frame
+ return frame;
+}
+
+// Generate JSON string of this object
+std::string Echo::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value Echo::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["echo_time"] = echo_time.JsonValue();
+ root["feedback"] = feedback.JsonValue();
+ root["mix"] = mix.JsonValue();
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Echo::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Echo::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["echo_time"].isNull())
+ echo_time.SetJsonValue(root["echo_time"]);
+ if (!root["feedback"].isNull())
+ feedback.SetJsonValue(root["feedback"]);
+ if (!root["mix"].isNull())
+ mix.SetJsonValue(root["mix"]);
+}
+
+// Get all properties for a specific frame
+std::string Echo::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["echo_time"] = add_property_json("Time", echo_time.GetValue(requested_frame), "float", "", &echo_time, 0, 5, false, requested_frame);
+ root["feedback"] = add_property_json("Feedback", feedback.GetValue(requested_frame), "float", "", &feedback, 0, 1, false, requested_frame);
+ root["mix"] = add_property_json("Mix", mix.GetValue(requested_frame), "float", "", &mix, 0, 1, false, requested_frame);
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/Echo.h b/src/audio_effects/Echo.h
new file mode 100644
index 00000000..9a120b6e
--- /dev/null
+++ b/src/audio_effects/Echo.h
@@ -0,0 +1,112 @@
+/**
+ * @file
+ * @brief Header file for Echo audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_ECHO_AUDIO_EFFECT_H
+#define OPENSHOT_ECHO_AUDIO_EFFECT_H
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+
+#include
+#include
+#include
+#include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a echo into the audio
+ *
+ */
+ class Echo : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+ public:
+ Keyframe echo_time;
+ Keyframe feedback;
+ Keyframe mix;
+
+ juce::AudioSampleBuffer echo_buffer;
+ int echo_buffer_samples;
+ int echo_buffer_channels;
+ int echo_write_position;
+ bool initialized;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Echo();
+
+ /// Default constructor
+ Echo(Keyframe new_echo_time, Keyframe new_feedback, Keyframe new_mix);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ void setup(std::shared_ptr frame);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/Expander.cpp b/src/audio_effects/Expander.cpp
new file mode 100644
index 00000000..3d60974f
--- /dev/null
+++ b/src/audio_effects/Expander.cpp
@@ -0,0 +1,228 @@
+/**
+ * @file
+ * @brief Source file for Expander audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Expander.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+Expander::Expander() : threshold(-10), ratio(1), attack(1), release(1), makeup_gain(1), bypass(false) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Expander::Expander(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass) :
+ threshold(new_threshold), ratio(new_ratio), attack(new_attack), release(new_release), makeup_gain(new_makeup_gain), bypass(new_bypass)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Expander::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Expander";
+ info.name = "Expander";
+ info.description = "Louder parts of audio becomes relatively louder and quieter parts becomes quieter.";
+ info.has_audio = true;
+ info.has_video = false;
+
+ input_level = 0.0f;
+ yl_prev = 0.0f;
+
+
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Expander::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ // Adding Expander
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+
+ mixed_down_input.setSize(1, num_samples);
+ inverse_sample_rate = 1.0f / frame->SampleRate();
+ inverseE = 1.0f / M_E;
+
+ if ((bool)bypass.GetValue(frame_number))
+ return frame;
+
+ mixed_down_input.clear();
+
+ for (int channel = 0; channel < num_input_channels; ++channel)
+ mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
+
+ for (int sample = 0; sample < num_samples; ++sample) {
+ float T = threshold.GetValue(frame_number);
+ float R = ratio.GetValue(frame_number);
+ float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
+ float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
+ float gain = makeup_gain.GetValue(frame_number);
+ float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f);
+
+ const float average_factor = 0.9999f;
+ input_level = average_factor * input_level + (1.0f - average_factor) * input_squared;
+
+ xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
+
+ if (xg > T)
+ yg = xg;
+ else
+ yg = T + (xg - T) * R;
+
+ xl = xg - yg;
+
+ if (xl < yl_prev)
+ yl = alphaA * yl_prev + (1.0f - alphaA) * xl;
+ else
+ yl = alphaR * yl_prev + (1.0f - alphaR) * xl;
+
+
+ control = powf (10.0f, (gain - yl) * 0.05f);
+ yl_prev = yl;
+
+ for (int channel = 0; channel < num_input_channels; ++channel) {
+ float new_value = frame->audio->getSample(channel, sample)*control;
+ frame->audio->setSample(channel, sample, new_value);
+ }
+ }
+
+ for (int channel = num_input_channels; channel < num_output_channels; ++channel)
+ frame->audio->clear(channel, 0, num_samples);
+
+ // return the modified frame
+ return frame;
+}
+
+float Expander::calculateAttackOrRelease(float value)
+{
+ if (value == 0.0f)
+ return 0.0f;
+ else
+ return pow (inverseE, inverse_sample_rate / value);
+}
+
+// Generate JSON string of this object
+std::string Expander::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value Expander::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["threshold"] = threshold.JsonValue();
+ root["ratio"] = ratio.JsonValue();
+ root["attack"] = attack.JsonValue();
+ root["release"] = release.JsonValue();
+ root["makeup_gain"] = makeup_gain.JsonValue();
+ root["bypass"] = bypass.JsonValue();
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Expander::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Expander::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["threshold"].isNull())
+ threshold.SetJsonValue(root["threshold"]);
+
+ if (!root["ratio"].isNull())
+ ratio.SetJsonValue(root["ratio"]);
+
+ if (!root["attack"].isNull())
+ attack.SetJsonValue(root["attack"]);
+
+ if (!root["release"].isNull())
+ release.SetJsonValue(root["release"]);
+
+ if (!root["makeup_gain"].isNull())
+ makeup_gain.SetJsonValue(root["makeup_gain"]);
+
+ if (!root["bypass"].isNull())
+ bypass.SetJsonValue(root["bypass"]);
+}
+
+// Get all properties for a specific frame
+std::string Expander::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 0, false, requested_frame);
+ root["ratio"] = add_property_json("Ratio", ratio.GetValue(requested_frame), "float", "", &ratio, 1, 100, false, requested_frame);
+ root["attack"] = add_property_json("Attack (ms)", attack.GetValue(requested_frame), "float", "", &attack, 0.1, 100, false, requested_frame);
+ root["release"] = add_property_json("Release (ms)", release.GetValue(requested_frame), "float", "", &release, 10, 1000, false, requested_frame);
+ root["makeup_gain"] = add_property_json("Makeup gain (dB)", makeup_gain.GetValue(requested_frame), "float", "", &makeup_gain, -12, 12, false, requested_frame);
+ root["bypass"] = add_property_json("Bypass", bypass.GetValue(requested_frame), "bool", "", &bypass, 0, 1, false, requested_frame);
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/Expander.h b/src/audio_effects/Expander.h
new file mode 100644
index 00000000..4eee84af
--- /dev/null
+++ b/src/audio_effects/Expander.h
@@ -0,0 +1,125 @@
+/**
+ * @file
+ * @brief Header file for Expander audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_EXPANDER_AUDIO_EFFECT_H
+#define OPENSHOT_EXPANDER_AUDIO_EFFECT_H
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+#include "../Enums.h"
+
+#include
+#include
+#include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a expander (or noise gate) into the audio
+ *
+ */
+ class Expander : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+
+ public:
+ Keyframe threshold;
+ Keyframe ratio;
+ Keyframe attack;
+ Keyframe release;
+ Keyframe makeup_gain;
+ Keyframe bypass;
+
+ juce::AudioSampleBuffer mixed_down_input;
+ float xl;
+ float yl;
+ float xg;
+ float yg;
+ float control;
+
+ float input_level;
+ float yl_prev;
+
+ float inverse_sample_rate;
+ float inverseE;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Expander();
+
+ /// Default constructor
+ ///
+ /// @param new_level The audio default Expander level (between 1 and 100)
+ Expander(Keyframe new_threshold, Keyframe new_ratio, Keyframe new_attack, Keyframe new_release, Keyframe new_makeup_gain, Keyframe new_bypass);
+
+ float calculateAttackOrRelease(float value);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/Noise.cpp b/src/audio_effects/Noise.cpp
new file mode 100644
index 00000000..5a329ba6
--- /dev/null
+++ b/src/audio_effects/Noise.cpp
@@ -0,0 +1,149 @@
+/**
+ * @file
+ * @brief Source file for Noise audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Noise.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+Noise::Noise() : level(30) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Noise::Noise(Keyframe new_level) : level(new_level)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Noise::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Noise";
+ info.name = "Noise";
+ info.description = "Random signal having equal intensity at different frequencies.";
+ info.has_audio = true;
+ info.has_video = false;
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Noise::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ // Adding Noise
+ srand ( time(NULL) );
+ int noise = level.GetValue(frame_number);
+
+ for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
+ {
+ auto *buffer = frame->audio->getWritePointer(channel);
+
+ for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
+ {
+ buffer[sample] = buffer[sample]*(1 - (1+(float)noise)/100) + buffer[sample]*0.0001*(rand()%100+1)*noise;
+ }
+ }
+
+
+ // return the modified frame
+ return frame;
+}
+
+// Generate JSON string of this object
+std::string Noise::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value Noise::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["level"] = level.JsonValue();
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Noise::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Noise::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["level"].isNull())
+ level.SetJsonValue(root["level"]);
+}
+
+// Get all properties for a specific frame
+std::string Noise::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["level"] = add_property_json("Level", level.GetValue(requested_frame), "int", "", &level, 0, 100, false, requested_frame);
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/Noise.h b/src/audio_effects/Noise.h
new file mode 100644
index 00000000..9de41458
--- /dev/null
+++ b/src/audio_effects/Noise.h
@@ -0,0 +1,104 @@
+/**
+ * @file
+ * @brief Header file for Noise audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_NOISE_AUDIO_EFFECT_H
+#define OPENSHOT_NOISE_AUDIO_EFFECT_H
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+
+#include
+#include
+#include
+#include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a noise into the audio
+ *
+ */
+ class Noise : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+ public:
+ Keyframe level; ///< Noise level keyframe. The amount of noise inserted on the audio.
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Noise();
+
+ /// Default constructor
+ ///
+ /// @param new_level The audio default noise level (between 1 and 100)
+ Noise(Keyframe new_level);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/ParametricEQ.cpp b/src/audio_effects/ParametricEQ.cpp
new file mode 100644
index 00000000..87997b86
--- /dev/null
+++ b/src/audio_effects/ParametricEQ.cpp
@@ -0,0 +1,281 @@
+/**
+ * @file
+ * @brief Source file for ParametricEQ audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "ParametricEQ.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+ParametricEQ::ParametricEQ() : filter_type(LOW_PASS), frequency(500), gain(0), q_factor(0) {
+ // Init effect properties
+ init_effect_details();
+}
+
+
+// Default constructor
+ParametricEQ::ParametricEQ(openshot::FilterType new_filter_type, Keyframe new_frequency, Keyframe new_gain, Keyframe new_q_factor) :
+ filter_type(new_filter_type), frequency(new_frequency), gain(new_gain), q_factor(new_q_factor)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void ParametricEQ::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "ParametricEQ";
+ info.name = "Parametric EQ";
+ info.description = "Filter that allows you to adjust the volume level of a frequency in the audio track.";
+ info.has_audio = true;
+ info.has_video = false;
+ initialized = false;
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr ParametricEQ::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ if (!initialized)
+ {
+ filters.clear();
+
+ for (int i = 0; i < frame->audio->getNumChannels(); ++i) {
+ Filter *filter;
+ filters.add(filter = new Filter());
+ }
+
+ initialized = true;
+ }
+
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+ updateFilters(frame_number, num_samples);
+
+ for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
+ {
+ auto *channel_data = frame->audio->getWritePointer(channel);
+ filters[channel]->processSamples(channel_data, num_samples);
+ }
+
+ for (int channel = num_input_channels; channel < num_output_channels; ++channel)
+ {
+ frame->audio->clear(channel, 0, num_samples);
+ }
+
+ // return the modified frame
+ return frame;
+}
+
+void ParametricEQ::Filter::updateCoefficients (
+ const double discrete_frequency,
+ const double q_factor,
+ const double gain,
+ const int filter_type)
+{
+ double bandwidth = jmin (discrete_frequency / q_factor, M_PI * 0.99);
+ double two_cos_wc = -2.0 * cos (discrete_frequency);
+ double tan_half_bw = tan (bandwidth / 2.0);
+ double tan_half_wc = tan (discrete_frequency / 2.0);
+ double sqrt_gain = sqrt (gain);
+
+ switch (filter_type) {
+ case 0 /* LOW_PASS */: {
+ coefficients = IIRCoefficients (/* b0 */ tan_half_wc,
+ /* b1 */ tan_half_wc,
+ /* b2 */ 0.0,
+ /* a0 */ tan_half_wc + 1.0,
+ /* a1 */ tan_half_wc - 1.0,
+ /* a2 */ 0.0);
+ break;
+ }
+ case 1 /* HIGH_PASS */: {
+ coefficients = IIRCoefficients (/* b0 */ 1.0,
+ /* b1 */ -1.0,
+ /* b2 */ 0.0,
+ /* a0 */ tan_half_wc + 1.0,
+ /* a1 */ tan_half_wc - 1.0,
+ /* a2 */ 0.0);
+ break;
+ }
+ case 2 /* LOW_SHELF */: {
+ coefficients = IIRCoefficients (/* b0 */ gain * tan_half_wc + sqrt_gain,
+ /* b1 */ gain * tan_half_wc - sqrt_gain,
+ /* b2 */ 0.0,
+ /* a0 */ tan_half_wc + sqrt_gain,
+ /* a1 */ tan_half_wc - sqrt_gain,
+ /* a2 */ 0.0);
+ break;
+ }
+ case 3 /* HIGH_SHELF */: {
+ coefficients = IIRCoefficients (/* b0 */ sqrt_gain * tan_half_wc + gain,
+ /* b1 */ sqrt_gain * tan_half_wc - gain,
+ /* b2 */ 0.0,
+ /* a0 */ sqrt_gain * tan_half_wc + 1.0,
+ /* a1 */ sqrt_gain * tan_half_wc - 1.0,
+ /* a2 */ 0.0);
+ break;
+ }
+ case 4 /* BAND_PASS */: {
+ coefficients = IIRCoefficients (/* b0 */ tan_half_bw,
+ /* b1 */ 0.0,
+ /* b2 */ -tan_half_bw,
+ /* a0 */ 1.0 + tan_half_bw,
+ /* a1 */ two_cos_wc,
+ /* a2 */ 1.0 - tan_half_bw);
+ break;
+ }
+ case 5 /* BAND_STOP */: {
+ coefficients = IIRCoefficients (/* b0 */ 1.0,
+ /* b1 */ two_cos_wc,
+ /* b2 */ 1.0,
+ /* a0 */ 1.0 + tan_half_bw,
+ /* a1 */ two_cos_wc,
+ /* a2 */ 1.0 - tan_half_bw);
+ break;
+ }
+ case 6 /* PEAKING_NOTCH */: {
+ coefficients = IIRCoefficients (/* b0 */ sqrt_gain + gain * tan_half_bw,
+ /* b1 */ sqrt_gain * two_cos_wc,
+ /* b2 */ sqrt_gain - gain * tan_half_bw,
+ /* a0 */ sqrt_gain + tan_half_bw,
+ /* a1 */ sqrt_gain * two_cos_wc,
+ /* a2 */ sqrt_gain - tan_half_bw);
+ break;
+ }
+ }
+
+ setCoefficients(coefficients);
+}
+
+void ParametricEQ::updateFilters(int64_t frame_number, double sample_rate)
+{
+ double discrete_frequency = 2.0 * M_PI * (double)frequency.GetValue(frame_number) / sample_rate;
+ double q_value = (double)q_factor.GetValue(frame_number);
+ double gain_value = pow(10.0, (double)gain.GetValue(frame_number) * 0.05);
+ int filter_type_value = (int)filter_type;
+
+ for (int i = 0; i < filters.size(); ++i)
+ filters[i]->updateCoefficients(discrete_frequency, q_value, gain_value, filter_type_value);
+}
+
+// Generate JSON string of this object
+std::string ParametricEQ::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value ParametricEQ::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["filter_type"] = filter_type;
+ root["frequency"] = frequency.JsonValue();;
+ root["q_factor"] = q_factor.JsonValue();
+ root["gain"] = gain.JsonValue();
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void ParametricEQ::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void ParametricEQ::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ // Set data from Json (if key is found)
+ if (!root["filter_type"].isNull())
+ filter_type = (FilterType)root["filter_type"].asInt();
+
+ if (!root["frequency"].isNull())
+ frequency.SetJsonValue(root["frequency"]);
+
+ if (!root["gain"].isNull())
+ gain.SetJsonValue(root["gain"]);
+
+ if (!root["q_factor"].isNull())
+ q_factor.SetJsonValue(root["q_factor"]);
+}
+
+// Get all properties for a specific frame
+std::string ParametricEQ::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["filter_type"] = add_property_json("Filter Type", filter_type, "int", "", NULL, 0, 3, false, requested_frame);
+ root["frequency"] = add_property_json("Frequency (Hz)", frequency.GetValue(requested_frame), "int", "", &frequency, 20, 20000, false, requested_frame);
+ root["gain"] = add_property_json("Gain (dB)", gain.GetValue(requested_frame), "int", "", &gain, -24, 24, false, requested_frame);
+ root["q_factor"] = add_property_json("Q Factor", q_factor.GetValue(requested_frame), "float", "", &q_factor, 0, 20, false, requested_frame);
+
+ // Add filter_type choices (dropdown style)
+ root["filter_type"]["choices"].append(add_property_choice_json("Low Pass", LOW_PASS, filter_type));
+ root["filter_type"]["choices"].append(add_property_choice_json("High Pass", HIGH_PASS, filter_type));
+ root["filter_type"]["choices"].append(add_property_choice_json("Low Shelf", LOW_SHELF, filter_type));
+ root["filter_type"]["choices"].append(add_property_choice_json("High Shelf", HIGH_SHELF, filter_type));
+ root["filter_type"]["choices"].append(add_property_choice_json("Band Pass", BAND_PASS, filter_type));
+ root["filter_type"]["choices"].append(add_property_choice_json("Band Stop", BAND_STOP, filter_type));
+ root["filter_type"]["choices"].append(add_property_choice_json("Peaking Notch", PEAKING_NOTCH, filter_type));
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/ParametricEQ.h b/src/audio_effects/ParametricEQ.h
new file mode 100644
index 00000000..d131d98d
--- /dev/null
+++ b/src/audio_effects/ParametricEQ.h
@@ -0,0 +1,123 @@
+/**
+ * @file
+ * @brief Header file for Parametric EQ audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_PARAMETRIC_EQ_AUDIO_EFFECT_H
+#define OPENSHOT_PARAMETRIC_EQ_AUDIO_EFFECT_H
+#define _USE_MATH_DEFINES
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+#include "../Enums.h"
+
+#include
+#include
+#include
+// #include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a equalization into the audio
+ *
+ */
+ class ParametricEQ : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+ public:
+ openshot::FilterType filter_type;
+ Keyframe frequency;
+ Keyframe q_factor;
+ Keyframe gain;
+ bool initialized;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ ParametricEQ();
+
+ /// Default constructor
+ ///
+ /// @param new_level
+ ParametricEQ(openshot::FilterType new_filter_type, Keyframe new_frequency, Keyframe new_gain, Keyframe new_q_factor);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+
+ class Filter : public IIRFilter
+ {
+ public:
+ void updateCoefficients (const double discrete_frequency,
+ const double q_factor,
+ const double gain,
+ const int filter_type);
+ };
+
+ juce::OwnedArray filters;
+
+ void updateFilters(int64_t frame_number, double sample_rate);
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/Robotization.cpp b/src/audio_effects/Robotization.cpp
new file mode 100644
index 00000000..d1687150
--- /dev/null
+++ b/src/audio_effects/Robotization.cpp
@@ -0,0 +1,192 @@
+/**
+ * @file
+ * @brief Source file for Robotization audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Robotization.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+
+/// Blank constructor, useful when using Json to load the effect properties
+Robotization::Robotization() : fft_size(FFT_SIZE_512), hop_size(HOP_SIZE_2), window_type(RECTANGULAR), stft(*this) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Robotization::Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type) :
+ fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type), stft(*this)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Robotization::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Robotization";
+ info.name = "Robotization";
+ info.description = "Transform the voice present in an audio track into a robotic voice effect.";
+ info.has_audio = true;
+ info.has_video = false;
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Robotization::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ const ScopedLock sl (lock);
+ ScopedNoDenormals noDenormals;
+
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+ const int hop_size_value = 1 << ((int)hop_size + 1);
+ const int fft_size_value = 1 << ((int)fft_size + 5);
+
+ stft.setup(num_output_channels);
+ stft.updateParameters((int)fft_size_value,
+ (int)hop_size_value,
+ (int)window_type);
+
+ stft.process(*frame->audio);
+
+ // return the modified frame
+ return frame;
+}
+
+void Robotization::RobotizationEffect::modification(const int channel)
+{
+ fft->perform(time_domain_buffer, frequency_domain_buffer, false);
+
+ for (int index = 0; index < fft_size; ++index) {
+ float magnitude = abs(frequency_domain_buffer[index]);
+ frequency_domain_buffer[index].real(magnitude);
+ frequency_domain_buffer[index].imag(0.0f);
+ }
+
+ fft->perform(frequency_domain_buffer, time_domain_buffer, true);
+}
+
+// Generate JSON string of this object
+std::string Robotization::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value Robotization::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["fft_size"] = fft_size;
+ root["hop_size"] = hop_size;
+ root["window_type"] = window_type;
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Robotization::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Robotization::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ if (!root["fft_size"].isNull())
+ fft_size = (FFTSize)root["fft_size"].asInt();
+
+ if (!root["hop_size"].isNull())
+ hop_size = (HopSize)root["hop_size"].asInt();
+
+ if (!root["window_type"].isNull())
+ window_type = (WindowType)root["window_type"].asInt();
+}
+
+// Get all properties for a specific frame
+std::string Robotization::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame);
+ root["hop_size"] = add_property_json("Hop Size", hop_size, "int", "", NULL, 0, 2, false, requested_frame);
+ root["window_type"] = add_property_json("Window Type", window_type, "int", "", NULL, 0, 3, false, requested_frame);
+
+ // Add fft_size choices (dropdown style)
+ root["fft_size"]["choices"].append(add_property_choice_json("128", FFT_SIZE_128, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("256", FFT_SIZE_256, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("512", FFT_SIZE_512, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("1024", FFT_SIZE_1024, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("2048", FFT_SIZE_2048, fft_size));
+
+ // Add hop_size choices (dropdown style)
+ root["hop_size"]["choices"].append(add_property_choice_json("1/2", HOP_SIZE_2, hop_size));
+ root["hop_size"]["choices"].append(add_property_choice_json("1/4", HOP_SIZE_4, hop_size));
+ root["hop_size"]["choices"].append(add_property_choice_json("1/8", HOP_SIZE_8, hop_size));
+
+ // Add window_type choices (dropdown style)
+ root["window_type"]["choices"].append(add_property_choice_json("Rectangular", RECTANGULAR, window_type));
+ root["window_type"]["choices"].append(add_property_choice_json("Bart Lett", BART_LETT, window_type));
+ root["window_type"]["choices"].append(add_property_choice_json("Hann", HANN, window_type));
+ root["window_type"]["choices"].append(add_property_choice_json("Hamming", HAMMING, window_type));
+
+ // Return formatted string
+ return root.toStyledString();
+}
\ No newline at end of file
diff --git a/src/audio_effects/Robotization.h b/src/audio_effects/Robotization.h
new file mode 100644
index 00000000..88a61116
--- /dev/null
+++ b/src/audio_effects/Robotization.h
@@ -0,0 +1,125 @@
+/**
+ * @file
+ * @brief Header file for Robotization audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_ROBOTIZATION_AUDIO_EFFECT_H
+#define OPENSHOT_ROBOTIZATION_AUDIO_EFFECT_H
+#define _USE_MATH_DEFINES
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+#include "../Enums.h"
+#include "STFT.h"
+
+#include
+#include
+#include
+#include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a robotization effect into the audio
+ *
+ */
+ class Robotization : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+ public:
+ openshot::FFTSize fft_size;
+ openshot::HopSize hop_size;
+ openshot::WindowType window_type;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Robotization();
+
+ /// Default constructor
+ ///
+ /// @param new_level The audio default Robotization level (between 1 and 100)
+ Robotization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+
+
+ class RobotizationEffect : public STFT
+ {
+ public:
+ RobotizationEffect (Robotization& p) : parent (p) { }
+
+ private:
+ void modification(const int channel) override;
+
+ Robotization &parent;
+ };
+
+ juce::CriticalSection lock;
+ RobotizationEffect stft;
+ std::unique_ptr fft;
+ };
+
+}
+
+#endif
diff --git a/src/audio_effects/STFT.cpp b/src/audio_effects/STFT.cpp
new file mode 100644
index 00000000..3b87e6e3
--- /dev/null
+++ b/src/audio_effects/STFT.cpp
@@ -0,0 +1,188 @@
+#include "STFT.h"
+
+using namespace openshot;
+
+void STFT::setup(const int num_input_channels)
+{
+ num_channels = (num_input_channels > 0) ? num_input_channels : 1;
+}
+
+void STFT::updateParameters(const int new_fft_size, const int new_overlap, const int new_window_type)
+{
+ updateFftSize(new_fft_size);
+ updateHopSize(new_overlap);
+ updateWindow(new_window_type);
+}
+
+void STFT::process(juce::AudioSampleBuffer &block)
+{
+ num_samples = block.getNumSamples();
+
+ for (int channel = 0; channel < num_channels; ++channel) {
+ float *channel_data = block.getWritePointer(channel);
+
+ current_input_buffer_write_position = input_buffer_write_position;
+ current_output_buffer_write_position = output_buffer_write_position;
+ current_output_buffer_read_position = output_buffer_read_position;
+ current_samples_since_last_FFT = samples_since_last_FFT;
+
+ for (int sample = 0; sample < num_samples; ++sample) {
+ const float input_sample = channel_data[sample];
+
+ input_buffer.setSample(channel, current_input_buffer_write_position, input_sample);
+ if (++current_input_buffer_write_position >= input_buffer_length)
+ current_input_buffer_write_position = 0;
+ // diff
+ channel_data[sample] = output_buffer.getSample(channel, current_output_buffer_read_position);
+
+ output_buffer.setSample(channel, current_output_buffer_read_position, 0.0f);
+ if (++current_output_buffer_read_position >= output_buffer_length)
+ current_output_buffer_read_position = 0;
+
+ if (++current_samples_since_last_FFT >= hop_size) {
+ current_samples_since_last_FFT = 0;
+ analysis(channel);
+ modification(channel);
+ synthesis(channel);
+ }
+ }
+ }
+
+ input_buffer_write_position = current_input_buffer_write_position;
+ output_buffer_write_position = current_output_buffer_write_position;
+ output_buffer_read_position = current_output_buffer_read_position;
+ samples_since_last_FFT = current_samples_since_last_FFT;
+}
+
+
+void STFT::updateFftSize(const int new_fft_size)
+{
+ if (new_fft_size != fft_size)
+ {
+ fft_size = new_fft_size;
+ fft = std::make_unique(log2(fft_size));
+
+ input_buffer_length = fft_size;
+ input_buffer.clear();
+ input_buffer.setSize(num_channels, input_buffer_length);
+
+ output_buffer_length = fft_size;
+ output_buffer.clear();
+ output_buffer.setSize(num_channels, output_buffer_length);
+
+ fft_window.realloc(fft_size);
+ fft_window.clear(fft_size);
+
+ time_domain_buffer.realloc(fft_size);
+ time_domain_buffer.clear(fft_size);
+
+ frequency_domain_buffer.realloc(fft_size);
+ frequency_domain_buffer.clear(fft_size);
+
+ input_buffer_write_position = 0;
+ output_buffer_write_position = 0;
+ output_buffer_read_position = 0;
+ samples_since_last_FFT = 0;
+ }
+}
+
+void STFT::updateHopSize(const int new_overlap)
+{
+ if (new_overlap != overlap)
+ {
+ overlap = new_overlap;
+
+ if (overlap != 0) {
+ hop_size = fft_size / overlap;
+ output_buffer_write_position = hop_size % output_buffer_length;
+ }
+ }
+}
+
+
+void STFT::updateWindow(const int new_window_type)
+{
+ window_type = new_window_type;
+
+ switch (window_type) {
+ case RECTANGULAR: {
+ for (int sample = 0; sample < fft_size; ++sample)
+ fft_window[sample] = 1.0f;
+ break;
+ }
+ case BART_LETT: {
+ for (int sample = 0; sample < fft_size; ++sample)
+ fft_window[sample] = 1.0f - fabs (2.0f * (float)sample / (float)(fft_size - 1) - 1.0f);
+ break;
+ }
+ case HANN: {
+ for (int sample = 0; sample < fft_size; ++sample)
+ fft_window[sample] = 0.5f - 0.5f * cosf (2.0f * M_PI * (float)sample / (float)(fft_size - 1));
+ break;
+ }
+ case HAMMING: {
+ for (int sample = 0; sample < fft_size; ++sample)
+ fft_window[sample] = 0.54f - 0.46f * cosf (2.0f * M_PI * (float)sample / (float)(fft_size - 1));
+ break;
+ }
+ }
+
+ float window_sum = 0.0f;
+ for (int sample = 0; sample < fft_size; ++sample)
+ window_sum += fft_window[sample];
+
+ window_scale_factor = 0.0f;
+ if (overlap != 0 && window_sum != 0.0f)
+ window_scale_factor = 1.0f / (float)overlap / window_sum * (float)fft_size;
+}
+
+
+
+void STFT::analysis(const int channel)
+{
+ int input_buffer_index = current_input_buffer_write_position;
+ for (int index = 0; index < fft_size; ++index) {
+ time_domain_buffer[index].real(fft_window[index] * input_buffer.getSample(channel, input_buffer_index));
+ time_domain_buffer[index].imag(0.0f);
+
+ if (++input_buffer_index >= input_buffer_length)
+ input_buffer_index = 0;
+ }
+}
+
+void STFT::modification(const int channel)
+{
+ fft->perform(time_domain_buffer, frequency_domain_buffer, false);
+
+ for (int index = 0; index < fft_size / 2 + 1; ++index) {
+ float magnitude = abs(frequency_domain_buffer[index]);
+ float phase = arg(frequency_domain_buffer[index]);
+
+ frequency_domain_buffer[index].real(magnitude * cosf (phase));
+ frequency_domain_buffer[index].imag(magnitude * sinf (phase));
+
+ if (index > 0 && index < fft_size / 2) {
+ frequency_domain_buffer[fft_size - index].real(magnitude * cosf (phase));
+ frequency_domain_buffer[fft_size - index].imag(magnitude * sinf (-phase));
+ }
+ }
+
+ fft->perform(frequency_domain_buffer, time_domain_buffer, true);
+}
+
+void STFT::synthesis(const int channel)
+{
+ int output_buffer_index = current_output_buffer_write_position;
+ for (int index = 0; index < fft_size; ++index) {
+ float output_sample = output_buffer.getSample(channel, output_buffer_index);
+ output_sample += time_domain_buffer[index].real() * window_scale_factor;
+ output_buffer.setSample(channel, output_buffer_index, output_sample);
+
+ if (++output_buffer_index >= output_buffer_length)
+ output_buffer_index = 0;
+ }
+
+ current_output_buffer_write_position += hop_size;
+ if (current_output_buffer_write_position >= output_buffer_length)
+ current_output_buffer_write_position = 0;
+}
\ No newline at end of file
diff --git a/src/audio_effects/STFT.h b/src/audio_effects/STFT.h
new file mode 100644
index 00000000..71f71a78
--- /dev/null
+++ b/src/audio_effects/STFT.h
@@ -0,0 +1,74 @@
+#pragma once
+
+#ifndef OPENSHOT_STFT_AUDIO_EFFECT_H
+#define OPENSHOT_STFT_AUDIO_EFFECT_H
+#define _USE_MATH_DEFINES
+
+#include "../EffectBase.h"
+#include "../Enums.h"
+
+namespace openshot
+{
+
+ class STFT
+ {
+ public:
+ STFT() : num_channels (1) { }
+
+ virtual ~STFT() { }
+
+ void setup(const int num_input_channels);
+
+ void process(juce::AudioSampleBuffer &block);
+
+ void updateParameters(const int new_fft_size, const int new_overlap, const int new_window_type);
+
+ virtual void updateFftSize(const int new_fft_size);
+
+ virtual void updateHopSize(const int new_overlap);
+
+ virtual void updateWindow(const int new_window_type);
+
+ private:
+
+ virtual void modification(const int channel);
+
+ virtual void analysis(const int channel);
+
+ virtual void synthesis(const int channel);
+
+ protected:
+ int num_channels;
+ int num_samples;
+
+ int fft_size;
+ std::unique_ptr fft;
+
+ int input_buffer_length;
+ juce::AudioSampleBuffer input_buffer;
+
+ int output_buffer_length;
+ juce::AudioSampleBuffer output_buffer;
+
+ juce::HeapBlock fft_window;
+ juce::HeapBlock> time_domain_buffer;
+ juce::HeapBlock> frequency_domain_buffer;
+
+ int overlap;
+ int hop_size;
+ int window_type;
+ float window_scale_factor;
+
+ int input_buffer_write_position;
+ int output_buffer_write_position;
+ int output_buffer_read_position;
+ int samples_since_last_FFT;
+
+ int current_input_buffer_write_position;
+ int current_output_buffer_write_position;
+ int current_output_buffer_read_position;
+ int current_samples_since_last_FFT;
+ };
+}
+
+#endif
diff --git a/src/audio_effects/Whisperization.cpp b/src/audio_effects/Whisperization.cpp
new file mode 100644
index 00000000..7c235e26
--- /dev/null
+++ b/src/audio_effects/Whisperization.cpp
@@ -0,0 +1,200 @@
+/**
+ * @file
+ * @brief Source file for Whisperization audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#include "Whisperization.h"
+#include "Exceptions.h"
+
+using namespace openshot;
+
+/// Blank constructor, useful when using Json to load the effect properties
+Whisperization::Whisperization() : fft_size(FFT_SIZE_512), hop_size(HOP_SIZE_8), window_type(RECTANGULAR), stft(*this) {
+ // Init effect properties
+ init_effect_details();
+}
+
+// Default constructor
+Whisperization::Whisperization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type) :
+ fft_size(new_fft_size), hop_size(new_hop_size), window_type(new_window_type), stft(*this)
+{
+ // Init effect properties
+ init_effect_details();
+}
+
+// Init effect settings
+void Whisperization::init_effect_details()
+{
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
+
+ /// Set the effect info
+ info.class_name = "Whisperization";
+ info.name = "Whisperization";
+ info.description = "Transform the voice present in an audio track into a whispering voice effect.";
+ info.has_audio = true;
+ info.has_video = false;
+}
+
+// This method is required for all derived classes of EffectBase, and returns a
+// modified openshot::Frame object
+std::shared_ptr Whisperization::GetFrame(std::shared_ptr frame, int64_t frame_number)
+{
+ const ScopedLock sl (lock);
+ ScopedNoDenormals noDenormals;
+
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+ const int hop_size_value = 1 << ((int)hop_size + 1);
+ const int fft_size_value = 1 << ((int)fft_size + 5);
+
+ stft.setup(num_output_channels);
+ stft.updateParameters((int)fft_size_value,
+ (int)hop_size_value,
+ (int)window_type);
+
+ stft.process(*frame->audio);
+
+ // return the modified frame
+ return frame;
+}
+
+void Whisperization::WhisperizationEffect::modification(const int channel)
+{
+ fft->perform(time_domain_buffer, frequency_domain_buffer, false);
+
+ for (int index = 0; index < fft_size / 2 + 1; ++index) {
+ float magnitude = abs(frequency_domain_buffer[index]);
+ float phase = 2.0f * M_PI * (float)rand() / (float)RAND_MAX;
+
+ frequency_domain_buffer[index].real(magnitude * cosf(phase));
+ frequency_domain_buffer[index].imag(magnitude * sinf(phase));
+
+ if (index > 0 && index < fft_size / 2) {
+ frequency_domain_buffer[fft_size - index].real(magnitude * cosf (phase));
+ frequency_domain_buffer[fft_size - index].imag(magnitude * sinf (-phase));
+ }
+ }
+
+ fft->perform(frequency_domain_buffer, time_domain_buffer, true);
+}
+
+
+// Generate JSON string of this object
+std::string Whisperization::Json() const {
+
+ // Return formatted string
+ return JsonValue().toStyledString();
+}
+
+// Generate Json::Value for this object
+Json::Value Whisperization::JsonValue() const {
+
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["fft_size"] = fft_size;
+ root["hop_size"] = hop_size;
+ root["window_type"] = window_type;
+
+ // return JsonValue
+ return root;
+}
+
+// Load JSON string into this object
+void Whisperization::SetJson(const std::string value) {
+
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
+}
+
+// Load Json::Value into this object
+void Whisperization::SetJsonValue(const Json::Value root) {
+
+ // Set parent data
+ EffectBase::SetJsonValue(root);
+
+ if (!root["fft_size"].isNull())
+ fft_size = (FFTSize)root["fft_size"].asInt();
+
+ if (!root["hop_size"].isNull())
+ hop_size = (HopSize)root["hop_size"].asInt();
+
+ if (!root["window_type"].isNull())
+ window_type = (WindowType)root["window_type"].asInt();
+}
+
+// Get all properties for a specific frame
+std::string Whisperization::PropertiesJSON(int64_t requested_frame) const {
+
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+
+ // Keyframes
+ root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame);
+ root["hop_size"] = add_property_json("Hop Size", hop_size, "int", "", NULL, 0, 2, false, requested_frame);
+ root["window_type"] = add_property_json("Window Type", window_type, "int", "", NULL, 0, 3, false, requested_frame);
+
+ // Add fft_size choices (dropdown style)
+ root["fft_size"]["choices"].append(add_property_choice_json("128", FFT_SIZE_128, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("256", FFT_SIZE_256, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("512", FFT_SIZE_512, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("1024", FFT_SIZE_1024, fft_size));
+ root["fft_size"]["choices"].append(add_property_choice_json("2048", FFT_SIZE_2048, fft_size));
+
+ // Add hop_size choices (dropdown style)
+ root["hop_size"]["choices"].append(add_property_choice_json("1/2", HOP_SIZE_2, hop_size));
+ root["hop_size"]["choices"].append(add_property_choice_json("1/4", HOP_SIZE_4, hop_size));
+ root["hop_size"]["choices"].append(add_property_choice_json("1/8", HOP_SIZE_8, hop_size));
+
+ // Add window_type choices (dropdown style)
+ root["window_type"]["choices"].append(add_property_choice_json("Rectangular", RECTANGULAR, window_type));
+ root["window_type"]["choices"].append(add_property_choice_json("Bart Lett", BART_LETT, window_type));
+ root["window_type"]["choices"].append(add_property_choice_json("Hann", HANN, window_type));
+ root["window_type"]["choices"].append(add_property_choice_json("Hamming", HAMMING, window_type));
+
+
+ // Return formatted string
+ return root.toStyledString();
+}
diff --git a/src/audio_effects/Whisperization.h b/src/audio_effects/Whisperization.h
new file mode 100644
index 00000000..5659a861
--- /dev/null
+++ b/src/audio_effects/Whisperization.h
@@ -0,0 +1,125 @@
+/**
+ * @file
+ * @brief Header file for whisperization audio effect class
+ * @author
+ *
+ * @ref License
+ */
+
+/* LICENSE
+ *
+ * Copyright (c) 2008-2019 OpenShot Studios, LLC
+ * . This file is part of
+ * OpenShot Library (libopenshot), an open-source project dedicated to
+ * delivering high quality video editing and animation solutions to the
+ * world. For more information visit .
+ *
+ * OpenShot Library (libopenshot) is free software: you can redistribute it
+ * and/or modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation, either version 3 of the
+ * License, or (at your option) any later version.
+ *
+ * OpenShot Library (libopenshot) is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with OpenShot Library. If not, see .
+ */
+
+#ifndef OPENSHOT_WHISPERIZATION_AUDIO_EFFECT_H
+#define OPENSHOT_WHISPERIZATION_AUDIO_EFFECT_H
+#define _USE_MATH_DEFINES
+
+#include "../EffectBase.h"
+
+#include "../Frame.h"
+#include "../Json.h"
+#include "../KeyFrame.h"
+#include "../Enums.h"
+#include "STFT.h"
+
+#include
+#include
+#include
+#include
+
+
+namespace openshot
+{
+
+ /**
+ * @brief This class adds a whisperization effect into the audio
+ *
+ */
+ class Whisperization : public EffectBase
+ {
+ private:
+ /// Init effect settings
+ void init_effect_details();
+
+ public:
+ openshot::FFTSize fft_size;
+ openshot::HopSize hop_size;
+ openshot::WindowType window_type;
+
+ /// Blank constructor, useful when using Json to load the effect properties
+ Whisperization();
+
+ /// Default constructor
+ ///
+ /// @param new_level The audio default Whisperization level (between 1 and 100)
+ Whisperization(openshot::FFTSize new_fft_size, openshot::HopSize new_hop_size, openshot::WindowType new_window_type);
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// new openshot::Frame object. All Clip keyframes and effects are resolved into
+ /// pixels.
+ ///
+ /// @returns A new openshot::Frame object
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
+
+ /// @brief This method is required for all derived classes of ClipBase, and returns a
+ /// modified openshot::Frame object
+ ///
+ /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// All Clip keyframes and effects are resolved into pixels.
+ ///
+ /// @returns The modified openshot::Frame object
+ /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
+ std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+
+ // Get and Set JSON methods
+ std::string Json() const override; ///< Generate JSON string of this object
+ void SetJson(const std::string value) override; ///< Load JSON string into this object
+ Json::Value JsonValue() const override; ///< Generate Json::Value for this object
+ void SetJsonValue(const Json::Value root) override; ///< Load Json::Value into this object
+
+ /// Get all properties for a specific frame (perfect for a UI to display the current state
+ /// of all properties at any time)
+ std::string PropertiesJSON(int64_t requested_frame) const override;
+
+
+ class WhisperizationEffect : public STFT
+ {
+ public:
+ WhisperizationEffect(Whisperization& p) : parent (p) { }
+
+ private:
+ void modification(const int channel) override;
+
+ Whisperization &parent;
+ };
+
+ juce::CriticalSection lock;
+ WhisperizationEffect stft;
+ std::unique_ptr fft;
+ };
+
+}
+
+#endif
diff --git a/src/effects/Caption.cpp b/src/effects/Caption.cpp
index 8d9bec92..a19b8e2d 100644
--- a/src/effects/Caption.cpp
+++ b/src/effects/Caption.cpp
@@ -129,7 +129,7 @@ std::shared_ptr Caption::GetFrame(std::shared_ptrParentTimeline() != NULL) {
+ if (clip && clip->ParentTimeline() != NULL) {
timeline = (Timeline*) clip->ParentTimeline();
} else if (this->ParentTimeline() != NULL) {
timeline = (Timeline*) this->ParentTimeline();
diff --git a/src/effects/Crop.cpp b/src/effects/Crop.cpp
index bf89b257..5ef78520 100644
--- a/src/effects/Crop.cpp
+++ b/src/effects/Crop.cpp
@@ -1,6 +1,6 @@
/**
* @file
- * @brief Source file for Crop effect class
+ * @brief Source file for Crop effect class (cropping any side, with x/y offsets)
* @author Jonathan Thomas
*
* @ref License
@@ -34,14 +34,14 @@
using namespace openshot;
/// Blank constructor, useful when using Json to load the effect properties
-Crop::Crop() : left(0.1), top(0.1), right(0.1), bottom(0.1) {
+Crop::Crop() : left(0.0), top(0.0), right(0.0), bottom(0.0), x(0.0), y(0.0) {
// Init effect properties
init_effect_details();
}
// Default constructor
Crop::Crop(Keyframe left, Keyframe top, Keyframe right, Keyframe bottom) :
- left(left), top(top), right(right), bottom(bottom)
+ left(left), top(top), right(right), bottom(bottom), x(0.0), y(0.0)
{
// Init effect properties
init_effect_details();
@@ -68,10 +68,10 @@ std::shared_ptr Crop::GetFrame(std::shared_ptr
// Get the frame's image
std::shared_ptr frame_image = frame->GetImage();
- // Get transparent color (and create small transparent image)
- auto tempColor = std::make_shared(
- frame_image->width(), 1, QImage::Format_RGBA8888_Premultiplied);
- tempColor->fill(QColor(QString::fromStdString("transparent")));
+ // Get transparent color target image (which will become the cropped image)
+ auto cropped_image = std::make_shared(
+ frame_image->width(), frame_image->height(), QImage::Format_RGBA8888_Premultiplied);
+ cropped_image->fill(QColor(QString::fromStdString("transparent")));
// Get current keyframe values
double left_value = left.GetValue(frame_number);
@@ -79,37 +79,69 @@ std::shared_ptr Crop::GetFrame(std::shared_ptr
double right_value = right.GetValue(frame_number);
double bottom_value = bottom.GetValue(frame_number);
+ // Get the current shift amount (if any... to slide the image around in the cropped area)
+ double x_shift = x.GetValue(frame_number);
+ double y_shift = y.GetValue(frame_number);
+
// Get pixel array pointers
unsigned char *pixels = (unsigned char *) frame_image->bits();
- unsigned char *color_pixels = (unsigned char *) tempColor->bits();
+ unsigned char *cropped_pixels = (unsigned char *) cropped_image->bits();
// Get pixels sizes of all crop sides
int top_bar_height = top_value * frame_image->height();
int bottom_bar_height = bottom_value * frame_image->height();
int left_bar_width = left_value * frame_image->width();
int right_bar_width = right_value * frame_image->width();
+ int column_offset = x_shift * frame_image->width();
+ int row_offset = y_shift * frame_image->height();
- // Loop through rows
+ // Image copy variables
+ int image_width = frame_image->width();
+ int src_start = left_bar_width;
+ int dst_start = left_bar_width;
+ int copy_length = frame_image->width() - right_bar_width - left_bar_width;
+
+ // Adjust for x offset
+ int copy_offset = 0;
+
+ if (column_offset < 0) {
+ // dest to the right
+ src_start += column_offset;
+ if (src_start < 0) {
+ int diff = 0 - src_start; // how far under 0 are we?
+ src_start = 0;
+ dst_start += diff;
+ copy_offset = -diff;
+ } else {
+ copy_offset = 0;
+ }
+
+ } else {
+ // dest to the left
+ src_start += column_offset;
+ if (image_width - src_start >= copy_length) {
+ // We have plenty pixels, use original copy-length
+ copy_offset = 0;
+ } else {
+ // We don't have enough pixels, shorten copy-length
+ copy_offset = (image_width - src_start) - copy_length;
+ }
+ }
+
+ // Loop through rows of pixels
for (int row = 0; row < frame_image->height(); row++) {
-
- // Top & Bottom Crop
- if ((top_bar_height > 0.0 && row <= top_bar_height) || (bottom_bar_height > 0.0 && row >= frame_image->height() - bottom_bar_height)) {
- memcpy(&pixels[row * frame_image->width() * 4], color_pixels, sizeof(char) * frame_image->width() * 4);
- } else {
- // Left Crop
- if (left_bar_width > 0.0) {
- memcpy(&pixels[row * frame_image->width() * 4], color_pixels, sizeof(char) * left_bar_width * 4);
- }
-
- // Right Crop
- if (right_bar_width > 0.0) {
- memcpy(&pixels[((row * frame_image->width()) + (frame_image->width() - right_bar_width)) * 4], color_pixels, sizeof(char) * right_bar_width * 4);
- }
+ int adjusted_row = row - row_offset;
+ // Is this row visible?
+ if (adjusted_row >= top_bar_height && adjusted_row < (frame_image->height() - bottom_bar_height) && (copy_length + copy_offset > 0)) {
+ // Copy image (row by row, with offsets for x and y offset, and src/dst starting points for column filtering)
+ memcpy(&cropped_pixels[((adjusted_row * frame_image->width()) + dst_start) * 4],
+ &pixels[((row * frame_image->width()) + src_start) * 4],
+ sizeof(char) * (copy_length + copy_offset) * 4);
}
}
- // Cleanup colors and arrays
- tempColor.reset();
+ // Set frame image
+ frame->AddImage(cropped_image);
// return the modified frame
return frame;
@@ -132,6 +164,8 @@ Json::Value Crop::JsonValue() const {
root["top"] = top.JsonValue();
root["right"] = right.JsonValue();
root["bottom"] = bottom.JsonValue();
+ root["x"] = x.JsonValue();
+ root["y"] = y.JsonValue();
// return JsonValue
return root;
@@ -169,6 +203,10 @@ void Crop::SetJsonValue(const Json::Value root) {
right.SetJsonValue(root["right"]);
if (!root["bottom"].isNull())
bottom.SetJsonValue(root["bottom"]);
+ if (!root["x"].isNull())
+ x.SetJsonValue(root["x"]);
+ if (!root["y"].isNull())
+ y.SetJsonValue(root["y"]);
}
// Get all properties for a specific frame
@@ -188,6 +226,8 @@ std::string Crop::PropertiesJSON(int64_t requested_frame) const {
root["top"] = add_property_json("Top Size", top.GetValue(requested_frame), "float", "", &top, 0.0, 1.0, false, requested_frame);
root["right"] = add_property_json("Right Size", right.GetValue(requested_frame), "float", "", &right, 0.0, 1.0, false, requested_frame);
root["bottom"] = add_property_json("Bottom Size", bottom.GetValue(requested_frame), "float", "", &bottom, 0.0, 1.0, false, requested_frame);
+ root["x"] = add_property_json("X Offset", x.GetValue(requested_frame), "float", "", &x, -1.0, 1.0, false, requested_frame);
+ root["y"] = add_property_json("Y Offset", y.GetValue(requested_frame), "float", "", &y, -1.0, 1.0, false, requested_frame);
// Set the parent effect which properties this effect will inherit
root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
diff --git a/src/effects/Crop.h b/src/effects/Crop.h
index f43f549c..a09c1257 100644
--- a/src/effects/Crop.h
+++ b/src/effects/Crop.h
@@ -60,11 +60,12 @@ namespace openshot
public:
- Color color; ///< Color of bars
Keyframe left; ///< Size of left bar
Keyframe top; ///< Size of top bar
Keyframe right; ///< Size of right bar
Keyframe bottom; ///< Size of bottom bar
+ Keyframe x; ///< X-offset
+ Keyframe y; ///< Y-offset
/// Blank constructor, useful when using Json to load the effect properties
Crop();
diff --git a/src/effects/ObjectDetection.cpp b/src/effects/ObjectDetection.cpp
index 13093475..b279a6fc 100644
--- a/src/effects/ObjectDetection.cpp
+++ b/src/effects/ObjectDetection.cpp
@@ -146,13 +146,22 @@ std::shared_ptr ObjectDetection::GetFrame(std::shared_ptr frame, i
// DrawRectangleRGBA(cv_image, box, bg_rgba, bg_alpha, 1, true);
// DrawRectangleRGBA(cv_image, box, stroke_rgba, stroke_alpha, stroke_width, false);
-
+
+
cv::Rect2d box(
(int)( (trackedBox.cx-trackedBox.width/2)*fw),
(int)( (trackedBox.cy-trackedBox.height/2)*fh),
(int)( trackedBox.width*fw),
(int)( trackedBox.height*fh)
);
+
+ // If the Draw Box property is off, then make the box invisible
+ if (trackedObject->draw_box.GetValue(frame_number) == 0)
+ {
+ bg_alpha = 1.0;
+ stroke_alpha = 1.0;
+ }
+
drawPred(detections.classIds.at(i), detections.confidences.at(i),
box, cv_image, detections.objectIds.at(i), bg_rgba, bg_alpha, 1, true, draw_text);
drawPred(detections.classIds.at(i), detections.confidences.at(i),
@@ -166,6 +175,7 @@ std::shared_ptr ObjectDetection::GetFrame(std::shared_ptr frame, i
if (parentTimeline){
// Get the Tracked Object's child clip
Clip* childClip = parentTimeline->GetClip(trackedObject->ChildClipId());
+
if (childClip){
std::shared_ptr f(new Frame(1, frame->GetWidth(), frame->GetHeight(), "#00000000"));
// Get the image of the child clip for this frame
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 55504a49..822c4362 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -95,6 +95,7 @@ endif()
# Create object library for test executable main(),
# to avoid recompiling for every test
add_library(catch-main OBJECT catch_main.cpp)
+target_link_libraries(catch-main PUBLIC Catch2::Catch2)
foreach(tname ${OPENSHOT_TESTS})
add_executable(openshot-${tname}-test ${tname}.cpp $)
diff --git a/tests/CVTracker.cpp b/tests/CVTracker.cpp
index bb447065..8415ecef 100644
--- a/tests/CVTracker.cpp
+++ b/tests/CVTracker.cpp
@@ -40,8 +40,46 @@
using namespace openshot;
-// Just for the tracker constructor, it won't be used
-ProcessingController tracker_pc;
+TEST_CASE( "initialization", "[libopenshot][opencv][tracker]" )
+{
+ std::string bad_json = R"proto(
+ }
+ [1, 2, 3, "a"]
+ } )proto";
+ ProcessingController badPC;
+ CVTracker* badTracker;
+ CHECK_THROWS_AS(
+ badTracker = new CVTracker(bad_json, badPC),
+ openshot::InvalidJSON
+ );
+
+ std::string json1 = R"proto(
+ {
+ "tracker-type": "KCF"
+ } )proto";
+
+ ProcessingController pc1;
+ CVTracker tracker1(json1, pc1);
+ CHECK(pc1.GetError() == true);
+ CHECK(pc1.GetErrorMessage() == "No initial bounding box selected");
+
+ std::string json2 = R"proto(
+ {
+ "tracker-type": "KCF",
+ "region": {
+ "normalized_x": 0.459375,
+ "normalized_y": 0.28333,
+ "normalized_width": -0.28125,
+ "normalized_height": -0.461111
+ }
+ } )proto";
+
+ // Create tracker
+ ProcessingController pc2;
+ CVTracker tracker2(json2, pc2);
+ CHECK(pc2.GetError() == true);
+ CHECK(pc2.GetErrorMessage() == "No first-frame");
+}
TEST_CASE( "Track_Video", "[libopenshot][opencv][tracker]" )
{
@@ -57,10 +95,17 @@ TEST_CASE( "Track_Video", "[libopenshot][opencv][tracker]" )
{
"protobuf_data_path": "kcf_tracker.data",
"tracker-type": "KCF",
- "region": {"normalized_x": 0.459375, "normalized_y": 0.28333, "normalized_width": 0.28125, "normalized_height": 0.461111, "first-frame": 1}
+ "region": {
+ "normalized_x": 0.459375,
+ "normalized_y": 0.28333,
+ "normalized_width": 0.28125,
+ "normalized_height": 0.461111,
+ "first-frame": 1
+ }
} )proto";
// Create tracker
+ ProcessingController tracker_pc;
CVTracker kcfTracker(json_data, tracker_pc);
// Track clip for frames 0-20
@@ -73,10 +118,10 @@ TEST_CASE( "Track_Video", "[libopenshot][opencv][tracker]" )
int height = ((float)fd.y2*360) - y;
// Compare if tracked data is equal to pre-tested ones
- CHECK(x >= 255); CHECK(x <= 257);
- CHECK(y >= 133); CHECK(y <= 135);
- CHECK(width >= 179); CHECK(width <= 181);
- CHECK(height >= 165); CHECK(height <= 168);
+ CHECK(x == Approx(256).margin(1));
+ CHECK(y == Approx(134).margin(1));
+ CHECK(width == Approx(180).margin(1));
+ CHECK(height == Approx(166).margin(2));
}
@@ -95,11 +140,17 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" )
{
"protobuf_data_path": "kcf_tracker.data",
"tracker-type": "KCF",
- "region": {"x": 294, "y": 102, "width": 180, "height": 166, "first-frame": 1}
+ "region": {
+ "normalized_x": 0.46,
+ "normalized_y": 0.28,
+ "normalized_width": 0.28,
+ "normalized_height": 0.46,
+ "first-frame": 1
+ }
} )proto";
-
// Create first tracker
+ ProcessingController tracker_pc;
CVTracker kcfTracker_1(json_data, tracker_pc);
// Track clip for frames 0-20
@@ -120,7 +171,13 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" )
{
"protobuf_data_path": "kcf_tracker.data",
"tracker_type": "",
- "region": {"x": -1, "y": -1, "width": -1, "height": -1, "first-frame": 1}
+ "region": {
+ "normalized_x": 0.1,
+ "normalized_y": 0.1,
+ "normalized_width": -0.5,
+ "normalized_height": -0.5,
+ "first-frame": 1
+ }
} )proto";
// Create second tracker
@@ -138,8 +195,9 @@ TEST_CASE( "SaveLoad_Protobuf", "[libopenshot][opencv][tracker]" )
float height_2 = fd_2.y2 - y_2;
// Compare first tracker data with second tracker data
- CHECK((int)(x_1 * 640) == (int)(x_2 * 640));
- CHECK((int)(y_1 * 360) == (int)(y_2 * 360));
- CHECK((int)(width_1 * 640) == (int)(width_2 * 640));
- CHECK((int)(height_1 * 360) == (int)(height_2 * 360));
+ CHECK(x_1 == Approx(x_2).margin(0.01));
+ CHECK(y_1 == Approx(y_2).margin(0.01));
+ CHECK(width_1 == Approx(width_2).margin(0.01));
+ CHECK(height_1 == Approx(height_2).margin(0.01));
+
}
diff --git a/tests/FFmpegReader.cpp b/tests/FFmpegReader.cpp
index 217d601c..c5696b76 100644
--- a/tests/FFmpegReader.cpp
+++ b/tests/FFmpegReader.cpp
@@ -39,7 +39,6 @@
#include "Timeline.h"
#include "Json.h"
-using namespace std;
using namespace openshot;
TEST_CASE( "Invalid_Path", "[libopenshot][ffmpegreader]" )
@@ -51,7 +50,7 @@ TEST_CASE( "Invalid_Path", "[libopenshot][ffmpegreader]" )
TEST_CASE( "GetFrame_Before_Opening", "[libopenshot][ffmpegreader]" )
{
// Create a reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "piano.wav";
FFmpegReader r(path.str());
@@ -62,7 +61,7 @@ TEST_CASE( "GetFrame_Before_Opening", "[libopenshot][ffmpegreader]" )
TEST_CASE( "Check_Audio_File", "[libopenshot][ffmpegreader]" )
{
// Create a reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "piano.wav";
FFmpegReader r(path.str());
r.Open();
@@ -92,7 +91,7 @@ TEST_CASE( "Check_Audio_File", "[libopenshot][ffmpegreader]" )
TEST_CASE( "Check_Video_File", "[libopenshot][ffmpegreader]" )
{
// Create a reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "test.mp4";
FFmpegReader r(path.str());
r.Open();
@@ -138,7 +137,7 @@ TEST_CASE( "Check_Video_File", "[libopenshot][ffmpegreader]" )
TEST_CASE( "Seek", "[libopenshot][ffmpegreader]" )
{
// Create a reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
FFmpegReader r(path.str());
r.Open();
@@ -195,7 +194,7 @@ TEST_CASE( "Seek", "[libopenshot][ffmpegreader]" )
TEST_CASE( "Frame_Rate", "[libopenshot][ffmpegreader]" )
{
// Create a reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
FFmpegReader r(path.str());
r.Open();
@@ -211,7 +210,7 @@ TEST_CASE( "Frame_Rate", "[libopenshot][ffmpegreader]" )
TEST_CASE( "Multiple_Open_and_Close", "[libopenshot][ffmpegreader]" )
{
// Create a reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
FFmpegReader r(path.str());
r.Open();
@@ -247,7 +246,7 @@ TEST_CASE( "Multiple_Open_and_Close", "[libopenshot][ffmpegreader]" )
TEST_CASE( "verify parent Timeline", "[libopenshot][ffmpegreader]" )
{
// Create a reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
FFmpegReader r(path.str());
r.Open();
@@ -273,3 +272,33 @@ TEST_CASE( "verify parent Timeline", "[libopenshot][ffmpegreader]" )
CHECK(r.GetFrame(1)->GetImage()->width() == 640);
CHECK(r.GetFrame(1)->GetImage()->height() == 360);
}
+
+TEST_CASE( "DisplayInfo", "[libopenshot][ffmpegreader]" )
+{
+ // Create a reader
+ std::stringstream path;
+ path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
+ FFmpegReader r(path.str());
+ r.Open();
+
+ std::string expected(R"(----------------------------
+----- File Information -----
+----------------------------
+--> Has Video: true
+--> Has Audio: true
+--> Has Single Image: false
+--> Duration: 51.95 Seconds
+--> File Size: 7.26 MB
+----------------------------
+----- Video Attributes -----
+----------------------------
+--> Width: 1280
+--> Height: 720)");
+
+ // Store the DisplayInfo() text in 'output'
+ std::stringstream output;
+ r.DisplayInfo(&output);
+
+ // Compare a [0, expected.size()) substring of output to expected
+ CHECK(output.str().substr(0, expected.size()) == expected);
+}
diff --git a/tests/FFmpegWriter.cpp b/tests/FFmpegWriter.cpp
index 059bbb4d..6a9a0211 100644
--- a/tests/FFmpegWriter.cpp
+++ b/tests/FFmpegWriter.cpp
@@ -45,7 +45,7 @@ using namespace openshot;
TEST_CASE( "Webm", "[libopenshot][ffmpegwriter]" )
{
// Reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
FFmpegReader r(path.str());
r.Open();
@@ -92,7 +92,7 @@ TEST_CASE( "Webm", "[libopenshot][ffmpegwriter]" )
TEST_CASE( "Options_Overloads", "[libopenshot][ffmpegwriter]" )
{
// Reader
- stringstream path;
+ std::stringstream path;
path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
FFmpegReader r(path.str());
r.Open();
@@ -129,3 +129,75 @@ TEST_CASE( "Options_Overloads", "[libopenshot][ffmpegwriter]" )
CHECK_FALSE(r1.info.interlaced_frame);
CHECK(r1.info.top_field_first == true);
}
+
+
+TEST_CASE( "DisplayInfo", "[libopenshot][ffmpegwriter]" )
+{
+ // Reader
+ std::stringstream path;
+ path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
+ FFmpegReader r(path.str());
+ r.Open();
+
+ /* WRITER ---------------- */
+ FFmpegWriter w("output1.webm");
+
+ // Set options
+ w.SetAudioOptions(true, "libvorbis", 44100, 2, LAYOUT_STEREO, 188000);
+ w.SetVideoOptions(
+ true, "libvpx",
+ Fraction(24,1),
+ 1280, 720,
+ Fraction(1,1),
+ false, false,
+ 30000000);
+
+ // Open writer
+ w.Open();
+
+ std::string expected(
+ R"(----------------------------
+----- File Information -----
+----------------------------
+--> Has Video: true
+--> Has Audio: true
+--> Has Single Image: false
+--> Duration: 0.00 Seconds
+--> File Size: 0.00 MB
+----------------------------
+----- Video Attributes -----
+----------------------------
+--> Width: 1280
+--> Height: 720
+--> Pixel Format: -1
+--> Frames Per Second: 24.00 (24/1)
+--> Video Bit Rate: 30000 kb/s
+--> Pixel Ratio: 1.00 (1/1)
+--> Display Aspect Ratio: 1.78 (16/9)
+--> Video Codec: libvpx
+--> Video Length: 0 Frames
+--> Video Stream Index: -1
+--> Video Timebase: 0.04 (1/24)
+--> Interlaced: false
+--> Interlaced: Top Field First: false
+----------------------------
+----- Audio Attributes -----
+----------------------------
+--> Audio Codec: libvorbis
+--> Audio Bit Rate: 188 kb/s
+--> Sample Rate: 44100 Hz
+--> # of Channels: 2
+--> Channel Layout: 3
+--> Audio Stream Index: -1
+--> Audio Timebase: 1.00 (1/1)
+----------------------------)");
+
+ // Store the DisplayInfo() text in 'output'
+ std::stringstream output;
+ w.DisplayInfo(&output);
+
+ w.Close();
+
+ // Compare a [0, expected.size()) substring of output to expected
+ CHECK(output.str().substr(0, expected.size()) == expected);
+}
diff --git a/tests/Fraction.cpp b/tests/Fraction.cpp
index 8736abaf..57d37a79 100644
--- a/tests/Fraction.cpp
+++ b/tests/Fraction.cpp
@@ -32,6 +32,10 @@
#include "Fraction.h"
+#include