Merge pull request #604 from ferdnyc/opencv_build_config

CMake: Adjustments to build config with OpenCV/Protobuf/Boost
This commit is contained in:
Brenno A. C. Caldato
2021-01-13 14:30:38 -03:00
committed by GitHub
53 changed files with 4564 additions and 343 deletions

9
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,9 @@
# Set update schedule for GitHub Actions
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -1,19 +0,0 @@
# This workflow will triage pull requests and apply a label based on the
# paths that are modified in the pull request.
#
# To use this workflow, you will need to set up a .github/labeler.yml
# file with configuration. For more information, see:
# https://github.com/actions/labeler/blob/master/README.md
name: Labeler
on: [pull_request]
jobs:
label:
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v2
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"

View File

@@ -79,6 +79,7 @@ option(ENABLE_COVERAGE "Scan test coverage using gcov and report" OFF)
option(ENABLE_DOCS "Build API documentation (requires Doxygen)" ON)
option(APPIMAGE_BUILD "Build to install in an AppImage (Linux only)" OFF)
option(ENABLE_MAGICK "Use ImageMagick, if available" ON)
option(ENABLE_OPENCV "Build with OpenCV algorithms (requires Boost, Protobuf 3)" ON)
# Legacy commandline override
if (DISABLE_TESTS)

View File

@@ -54,20 +54,30 @@ if (CMAKE_VERSION VERSION_LESS 3.12)
"${PROJECT_BINARY_DIR}/src")
endif()
################ OPENCV ##################
find_package( OpenCV 4 )
if (OpenCV_FOUND)
message("\nCOMPILING WITH OPENCV\n")
set(CMAKE_SWIG_FLAGS "-DUSE_OPENCV=1")
add_definitions( -DUSE_OPENCV=1 )
else()
message("\nOPENCV NOT FOUND, SOME FUNCTIONALITIES WILL BE DISABLED\n")
endif()
### Enable C++ in SWIG
set_property(SOURCE openshot.i PROPERTY CPLUSPLUS ON)
set_property(SOURCE openshot.i PROPERTY SWIG_MODULE_NAME openshot)
# Set the SWIG_FLAGS from the library target, IFF its
# COMPILE_DEFINITIONS property is set (in practice, always true)
if(CMAKE_VERSION VERSION_GREATER 3.15)
set(_defs
$<REMOVE_DUPLICATES:$<TARGET_PROPERTY:openshot,COMPILE_DEFINITIONS>>)
elseif(CMAKE_VERSION VERSION_GREATER 3.12)
set(_defs $<TARGET_PROPERTY:openshot,COMPILE_DEFINITIONS>)
endif()
if(DEFINED _defs)
set_property(SOURCE openshot.i PROPERTY
COMPILE_DEFINITIONS ${_defs})
else()
get_property(_defs TARGET openshot PROPERTY COMPILE_DEFINITIONS)
foreach(_d ${_defs})
list(APPEND _flags -D${_d})
endforeach()
set_property(SOURCE openshot.i PROPERTY
SWIG_FLAGS ${_flags})
endif()
### Suppress a ton of warnings in the generated SWIG C++ code
set(SWIG_CXX_FLAGS "-Wno-unused-variable -Wno-unused-function \
-Wno-deprecated-copy -Wno-class-memaccess -Wno-cast-function-type \
@@ -75,13 +85,15 @@ set(SWIG_CXX_FLAGS "-Wno-unused-variable -Wno-unused-function \
separate_arguments(sw_flags UNIX_COMMAND ${SWIG_CXX_FLAGS})
set_property(SOURCE openshot.i PROPERTY GENERATED_COMPILE_OPTIONS ${sw_flags})
### Take include dirs from target, automatically if possible
if (CMAKE_VERSION VERSION_GREATER 3.13)
set_property(SOURCE openshot.i PROPERTY USE_TARGET_INCLUDE_DIRECTORIES True)
elseif (CMAKE_VERSION VERSION_GREATER 3.12)
set_property(SOURCE openshot.i PROPERTY
INCLUDE_DIRECTORIES $<TARGET_PROPERTY:openshot,INCLUDE_DIRECTORIES>)
endif ()
### Take include dirs from target
if(CMAKE_VERSION VERSION_GREATER 3.15)
set(_inc $<REMOVE_DUPLICATES:$<TARGET_PROPERTY:openshot,INCLUDE_DIRECTORIES>>)
elseif(CMAKE_VERSION VERSION_GREATER 3.12)
set(_inc $<TARGET_PROPERTY:openshot,INCLUDE_DIRECTORIES>)
endif()
if (DEFINED _inc)
set_property(SOURCE openshot.i PROPERTY INCLUDE_DIRECTORIES ${_inc})
endif()
### Add the SWIG interface file (which defines all the SWIG methods)
if (CMAKE_VERSION VERSION_LESS 3.8.0)

View File

@@ -74,6 +74,26 @@ endif()
set_property(SOURCE openshot.i PROPERTY CPLUSPLUS ON)
set_property(SOURCE openshot.i PROPERTY SWIG_MODULE_NAME openshot)
# Set the SWIG_FLAGS from the library target, IFF its
# COMPILE_DEFINITIONS property is set (in practice, always true)
if(CMAKE_VERSION VERSION_GREATER 3.15)
set(_defs
$<REMOVE_DUPLICATES:$<TARGET_PROPERTY:openshot,COMPILE_DEFINITIONS>>)
elseif(CMAKE_VERSION VERSION_GREATER 3.12)
set(_defs $<TARGET_PROPERTY:openshot,COMPILE_DEFINITIONS>)
endif()
if(DEFINED _defs)
set_property(SOURCE openshot.i PROPERTY
COMPILE_DEFINITIONS ${_defs})
else()
get_property(_defs TARGET openshot PROPERTY COMPILE_DEFINITIONS)
foreach(_d ${_defs})
list(APPEND _flags -D${_d})
endforeach()
set_property(SOURCE openshot.i PROPERTY
SWIG_FLAGS ${_flags})
endif()
### Suppress a ton of warnings in the generated SWIG C++ code
set(SWIG_CXX_FLAGS "-Wno-unused-variable -Wno-unused-function \
-Wno-deprecated-copy -Wno-class-memaccess -Wno-cast-function-type \
@@ -81,13 +101,15 @@ set(SWIG_CXX_FLAGS "-Wno-unused-variable -Wno-unused-function \
separate_arguments(sw_flags UNIX_COMMAND ${SWIG_CXX_FLAGS})
set_property(SOURCE openshot.i PROPERTY GENERATED_COMPILE_OPTIONS ${sw_flags})
### Take include dirs from target, automatically if possible
if (CMAKE_VERSION VERSION_GREATER 3.13)
set_property(SOURCE openshot.i PROPERTY USE_TARGET_INCLUDE_DIRECTORIES True)
else ()
set_property(SOURCE openshot.i PROPERTY
INCLUDE_DIRECTORIES $<TARGET_PROPERTY:openshot,INCLUDE_DIRECTORIES>)
endif ()
### Take include dirs from target
if(CMAKE_VERSION VERSION_GREATER 3.15)
set(_inc $<REMOVE_DUPLICATES:$<TARGET_PROPERTY:openshot,INCLUDE_DIRECTORIES>>)
elseif(CMAKE_VERSION VERSION_GREATER 3.12)
set(_inc $<TARGET_PROPERTY:openshot,INCLUDE_DIRECTORIES>)
endif()
if (DEFINED _inc)
set_property(SOURCE openshot.i PROPERTY INCLUDE_DIRECTORIES ${_inc})
endif()
### Add the SWIG interface file (which defines all the SWIG methods)
if (CMAKE_VERSION VERSION_LESS 3.8.0)

View File

@@ -50,6 +50,7 @@ endif()
find_path(Resvg_INCLUDE_DIRS
ResvgQt.h
PATHS
${Resvg_ROOT}
${RESVGDIR}
${RESVGDIR}/include
$ENV{RESVGDIR}
@@ -65,6 +66,7 @@ find_path(Resvg_INCLUDE_DIRS
find_library(Resvg_LIBRARIES
NAMES resvg
PATHS
${Resvg_ROOT}
${RESVGDIR}
${RESVGDIR}/lib
$ENV{RESVGDIR}

View File

@@ -1,10 +1,8 @@
codecov:
branch: default
coverage:
status:
project:
default:
base: pr # Only post a status to pull requests
only_pulls: true # Only post a status to pull requests
informational: true # Don't block PRs based on coverage stats (yet?)
ignore:
- "examples"

4174
iwyu.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -97,8 +97,6 @@ void AudioReaderSource::GetMoreSamplesFromReader()
} catch (const ReaderClosed & e) {
break;
} catch (const TooManySeeks & e) {
break;
} catch (const OutOfBoundsFrame & e) {
break;
}

View File

@@ -225,7 +225,6 @@ if(ImageMagick_FOUND)
# define a preprocessor macro (used in the C++ source)
target_compile_definitions(openshot PUBLIC USE_IMAGEMAGICK=1)
list(APPEND CMAKE_SWIG_FLAGS -DUSE_IMAGEMAGICK=1)
# Link with ImageMagick library
target_link_libraries(openshot PUBLIC ImageMagick::Magick++)
@@ -359,8 +358,7 @@ if (TARGET Resvg::Resvg)
#include_directories(${Resvg_INCLUDE_DIRS})
target_link_libraries(openshot PUBLIC Resvg::Resvg)
target_compile_definitions(openshot PUBLIC -DUSE_RESVG=1)
list(APPEND CMAKE_SWIG_FLAGS -DUSE_RESVG=1)
target_compile_definitions(openshot PUBLIC USE_RESVG=1)
set(HAVE_RESVG TRUE CACHE BOOL "Building with Resvg support" FORCE)
mark_as_advanced(HAVE_RESVG)
@@ -386,42 +384,36 @@ if (ENABLE_BLACKMAGIC)
target_link_libraries(openshot PUBLIC ${BLACKMAGIC_LIBRARY_DIR})
# define a preprocessor macro (used in the C++)
target_compile_definitions(openshot PUBLIC -DUSE_BLACKMAGIC=1)
list(APPEND CMAKE_SWIG_FLAGS -DUSE_BLACKMAGIC=1)
target_compile_definitions(openshot PUBLIC USE_BLACKMAGIC=1)
endif()
endif()
################## OPENCV ###################
find_package( OpenCV 4 )
if (OpenCV_FOUND)
message("\nCOMPILING WITH OPENCV\n")
list(APPEND CMAKE_SWIG_FLAGS -DUSE_OPENCV=1)
target_compile_definitions(openshot PUBLIC USE_OPENCV=1)
else()
message("\nOPENCV NOT FOUND, SOME FUNCTIONALITIES WILL BE DISABLED\n")
endif()
################## PROTOBUF ##################
if (OpenCV_FOUND)
find_package(Protobuf 3)
if (NOT Protobuf_FOUND)
# Protobuf is required when compiling with opencv
message(FATAL_ERROR "\nPLEASE INSTALL PROTOBUF. Protobuf is required when compiling with opencv.\n")
if(ENABLE_OPENCV)
find_package(OpenCV 4)
if(NOT OpenCV_FOUND)
set(ENABLE_OPENCV FALSE CACHE BOOL
"Build with OpenCV algorithms (requires Boost, Protobuf 3)" FORCE)
else()
add_subdirectory(protobuf_messages)
# Add OpenCV source files
target_sources(openshot PRIVATE
${OPENSHOT_CV_SOURCES}
)
target_compile_definitions(openshot PUBLIC USE_OPENCV=1)
target_link_libraries(openshot PUBLIC
opencv_core
opencv_video
opencv_highgui
opencv_dnn
opencv_tracking
openshot_protobuf
)
set(HAVE_OPENCV TRUE CACHE BOOL "Building with OpenCV effects" FORCE)
mark_as_advanced(HAVE_OPENCV)
endif()
add_subdirectory(protobuf_messages)
target_include_directories(openshot PUBLIC ${ProtobufMessagePath})
# Add OpenCV target sources
target_sources(openshot PRIVATE
${OPENSHOT_CV_SOURCES})
# Link libopenshot with OpenCV libs
target_link_libraries(openshot PUBLIC openshot_protobuf ${OpenCV_LIBS} ${PROTOBUF_LIBRARY} ${Protobuf_INCLUDE_DIRS})
endif()
add_feature_info("OpenCV algorithms" ENABLE_OPENCV "Use OpenCV algorithms")
############### LINK LIBRARY #################
# Link remaining dependency libraries
@@ -450,6 +442,9 @@ install(
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libopenshot
FILES_MATCHING PATTERN "*.h"
)
install(FILES ${ProtobufHeaders}
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libopenshot
)
############### CPACK PACKAGING ##############
if(MINGW)
@@ -464,5 +459,3 @@ endif()
set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Jonathan Thomas") #required
include(CPack)

View File

@@ -30,6 +30,7 @@
#include "CVObjectDetection.h"
using namespace std;
using namespace openshot;
CVObjectDetection::CVObjectDetection(std::string processInfoJson, ProcessingController &processingController)
@@ -58,9 +59,9 @@ void CVObjectDetection::detectObjectsClip(openshot::Clip &video, size_t _start,
if(error){
return;
}
processingController->SetError(false, "");
// Load names of classes
std::ifstream ifs(classesFile.c_str());
std::string line;
@@ -90,7 +91,7 @@ void CVObjectDetection::detectObjectsClip(openshot::Clip &video, size_t _start,
}
std::shared_ptr<openshot::Frame> f = video.GetFrame(frame_number);
// Grab OpenCV Mat image
cv::Mat cvimage = f->GetImageCV();
@@ -104,7 +105,7 @@ void CVObjectDetection::detectObjectsClip(openshot::Clip &video, size_t _start,
}
void CVObjectDetection::DetectObjects(const cv::Mat &frame, size_t frameId){
// Get frame as OpenCV Mat
// Get frame as OpenCV Mat
cv::Mat blob;
// Create a 4D blob from the frame.
@@ -112,10 +113,10 @@ void CVObjectDetection::DetectObjects(const cv::Mat &frame, size_t frameId){
inpWidth = inpHeight = 416;
cv::dnn::blobFromImage(frame, blob, 1/255.0, cv::Size(inpWidth, inpHeight), cv::Scalar(0,0,0), true, false);
//Sets the input to the network
net.setInput(blob);
// Runs the forward pass to get output of the output layers
std::vector<cv::Mat> outs;
net.forward(outs, getOutputsNames(net));
@@ -132,7 +133,7 @@ void CVObjectDetection::postprocess(const cv::Size &frameDims, const std::vector
std::vector<int> classIds;
std::vector<float> confidences;
std::vector<cv::Rect> boxes;
for (size_t i = 0; i < outs.size(); ++i)
{
// Scan through all the bounding boxes output from the network and keep only the
@@ -154,14 +155,14 @@ void CVObjectDetection::postprocess(const cv::Size &frameDims, const std::vector
int height = (int)(data[3] * frameDims.height);
int left = centerX - width / 2;
int top = centerY - height / 2;
classIds.push_back(classIdPoint.x);
confidences.push_back((float)confidence);
boxes.push_back(cv::Rect(left, top, width, height));
}
}
}
// Perform non maximum suppression to eliminate redundant overlapping boxes with
// lower confidences
std::vector<int> indices;
@@ -189,7 +190,7 @@ void CVObjectDetection::postprocess(const cv::Size &frameDims, const std::vector
for(uint j = i+1; j<boxes.size(); j++){
int xc_1 = boxes[i].x + (int)(boxes[i].width/2), yc_1 = boxes[i].y + (int)(boxes[i].width/2);
int xc_2 = boxes[j].x + (int)(boxes[j].width/2), yc_2 = boxes[j].y + (int)(boxes[j].width/2);
if(fabs(xc_1 - xc_2) < 10 && fabs(yc_1 - yc_2) < 10){
if(classIds[i] == classIds[j]){
if(confidences[i] >= confidences[j]){
@@ -213,7 +214,7 @@ void CVObjectDetection::postprocess(const cv::Size &frameDims, const std::vector
// Remove boxes based in IOU score
for(uint i = 0; i<boxes.size(); i++){
for(uint j = i+1; j<boxes.size(); j++){
if( iou(boxes[i], boxes[j])){
if(classIds[i] == classIds[j]){
if(confidences[i] >= confidences[j]){
@@ -233,7 +234,7 @@ void CVObjectDetection::postprocess(const cv::Size &frameDims, const std::vector
}
}
}
// Normalize boxes coordinates
std::vector<cv::Rect_<float>> normalized_boxes;
for(auto box : boxes){
@@ -244,7 +245,7 @@ void CVObjectDetection::postprocess(const cv::Size &frameDims, const std::vector
normalized_box.height = (box.height)/(float)frameDims.height;
normalized_boxes.push_back(normalized_box);
}
detectionsData[frameId] = CVDetectionData(classIds, confidences, normalized_boxes, frameId);
}
@@ -276,13 +277,13 @@ bool CVObjectDetection::iou(cv::Rect pred_box, cv::Rect sort_box){
std::vector<cv::String> CVObjectDetection::getOutputsNames(const cv::dnn::Net& net)
{
static std::vector<cv::String> names;
//Get the indices of the output layers, i.e. the layers with unconnected outputs
std::vector<int> outLayers = net.getUnconnectedOutLayers();
//get the names of all the layers in the network
std::vector<cv::String> layersNames = net.getLayerNames();
// Get the names of the output layers in names
names.resize(outLayers.size());
for (size_t i = 0; i < outLayers.size(); ++i)
@@ -293,10 +294,10 @@ std::vector<cv::String> CVObjectDetection::getOutputsNames(const cv::dnn::Net& n
CVDetectionData CVObjectDetection::GetDetectionData(size_t frameId){
// Check if the stabilizer info for the requested frame exists
if ( detectionsData.find(frameId) == detectionsData.end() ) {
return CVDetectionData();
} else {
return detectionsData[frameId];
}
}
@@ -386,13 +387,13 @@ void CVObjectDetection::SetJsonValue(const Json::Value root) {
processingDevice = (root["processing-device"].asString());
}
if (!root["model-config"].isNull()){
modelConfiguration = (root["model-config"].asString());
modelConfiguration = (root["model-config"].asString());
std::ifstream infile(modelConfiguration);
if(!infile.good()){
processingController->SetError(true, "Incorrect path to model config file");
error = true;
}
}
if (!root["model-weights"].isNull()){
modelWeights= (root["model-weights"].asString());
@@ -424,7 +425,7 @@ void CVObjectDetection::SetJsonValue(const Json::Value root) {
// Load protobuf data file
bool CVObjectDetection::_LoadObjDetectdData(){
// Create tracker message
libopenshotobjdetect::ObjDetect objMessage;
libopenshotobjdetect::ObjDetect objMessage;
{
// Read the existing tracker message.
@@ -453,7 +454,7 @@ bool CVObjectDetection::_LoadObjDetectdData(){
// Load bounding box data
const google::protobuf::RepeatedPtrField<libopenshotobjdetect::Frame_Box > &pBox = pbFrameData.bounding_box();
// Construct data vectors related to detections in the current frame
std::vector<int> classIds; std::vector<float> confidences; std::vector<cv::Rect_<float>> boxes;
@@ -475,8 +476,8 @@ bool CVObjectDetection::_LoadObjDetectdData(){
detectionsData[id] = CVDetectionData(classIds, confidences, boxes, id);
}
// Show the time stamp from the last update in object detector data file
if (objMessage.has_last_updated())
// Show the time stamp from the last update in object detector data file
if (objMessage.has_last_updated())
cout << " Loaded Data. Saved Time Stamp: " << TimeUtil::ToString(objMessage.last_updated()) << endl;
// Delete all global objects allocated by libprotobuf.

View File

@@ -42,9 +42,9 @@
#include "Json.h"
#include "ProcessingController.h"
#include "Clip.h"
#include "objdetectdata.pb.h"
#include "protobuf_messages/objdetectdata.pb.h"
#include "../src/sort_filter/sort.hpp"
#include "sort_filter/sort.hpp"
namespace openshot
{
@@ -67,13 +67,13 @@ namespace openshot
/**
* @brief This class runs trought a clip to detect objects and returns the bounding boxes and its properties.
*
* Object detection is performed using YoloV3 model with OpenCV DNN module
*
* Object detection is performed using YoloV3 model with OpenCV DNN module
*/
class CVObjectDetection{
private:
cv::dnn::Net net;
std::vector<std::string> classNames;
float confThreshold, nmsThreshold;
@@ -97,7 +97,7 @@ namespace openshot
ProcessingController *processingController;
void setProcessingDevice();
// Detect onbects on a single frame
void DetectObjects(const cv::Mat &frame, size_t frame_number);

View File

@@ -30,10 +30,11 @@
#include "CVStabilization.h"
using namespace std;
using namespace openshot;
// Set default smoothing window value to compute stabilization
// Set default smoothing window value to compute stabilization
CVStabilization::CVStabilization(std::string processInfoJson, ProcessingController &processingController)
: processingController(&processingController){
SetJson(processInfoJson);
@@ -52,7 +53,7 @@ void CVStabilization::stabilizeClip(openshot::Clip& video, size_t _start, size_t
avr_dx=0; avr_dy=0; avr_da=0; max_dx=0; max_dy=0; max_da=0;
video.Open();
// Save original video width and height
// Save original video width and height
cv::Size readerDims(video.Reader()->info.width, video.Reader()->info.height);
size_t frame_number;
@@ -71,7 +72,7 @@ void CVStabilization::stabilizeClip(openshot::Clip& video, size_t _start, size_t
}
std::shared_ptr<openshot::Frame> f = video.GetFrame(frame_number);
// Grab OpenCV Mat image
cv::Mat cvimage = f->GetImageCV();
// Resize frame to original video width and height if they differ
@@ -174,7 +175,7 @@ bool CVStabilization::TrackFrameFeatures(cv::Mat frame, size_t frameNum){
return false;
}
// Keep computing average and max transformation parameters
// Keep computing average and max transformation parameters
avr_dx+=fabs(dx);
avr_dy+=fabs(dy);
avr_da+=fabs(da);
@@ -184,7 +185,7 @@ bool CVStabilization::TrackFrameFeatures(cv::Mat frame, size_t frameNum){
max_dy = dy;
if(fabs(da) > max_da)
max_da = da;
T.copyTo(last_T);
prev_to_cur_transform.push_back(TransformParam(dx, dy, da));
@@ -204,8 +205,8 @@ std::vector<CamTrajectory> CVStabilization::ComputeFramesTrajectory(){
double y = 0;
vector <CamTrajectory> trajectory; // trajectory at all frames
// Compute global camera trajectory. First frame is the origin
// Compute global camera trajectory. First frame is the origin
for(size_t i=0; i < prev_to_cur_transform.size(); i++) {
x += prev_to_cur_transform[i].dx;
y += prev_to_cur_transform[i].dy;
@@ -307,7 +308,7 @@ bool CVStabilization::SaveStabilizedData(){
// Add frame stabilization data into protobuf message
void CVStabilization::AddFrameDataToProto(libopenshotstabilize::Frame* pbFrameData, CamTrajectory& trajData, TransformParam& transData, size_t frame_number){
// Save frame number
// Save frame number
pbFrameData->set_id(frame_number);
// Save camera trajectory data
@@ -325,10 +326,10 @@ TransformParam CVStabilization::GetTransformParamData(size_t frameId){
// Check if the stabilizer info for the requested frame exists
if ( transformationData.find(frameId) == transformationData.end() ) {
return TransformParam();
} else {
return transformationData[frameId];
}
}
@@ -337,10 +338,10 @@ CamTrajectory CVStabilization::GetCamTrajectoryTrackedData(size_t frameId){
// Check if the stabilizer info for the requested frame exists
if ( trajectoryData.find(frameId) == trajectoryData.end() ) {
return CamTrajectory();
} else {
return trajectoryData[frameId];
}
}
@@ -395,11 +396,11 @@ bool CVStabilization::_LoadStabilizedData(){
transformationData.clear();
trajectoryData.clear();
// Iterate over all frames of the saved message and assign to the data maps
// Iterate over all frames of the saved message and assign to the data maps
for (size_t i = 0; i < stabilizationMessage.frame_size(); i++) {
const libopenshotstabilize::Frame& pbFrameData = stabilizationMessage.frame(i);
// Load frame number
// Load frame number
size_t id = pbFrameData.id();
// Load camera trajectory data
@@ -419,7 +420,7 @@ bool CVStabilization::_LoadStabilizedData(){
transformationData[id] = TransformParam(dx,dy,da);
}
// Show the time stamp from the last update in stabilization data file
// Show the time stamp from the last update in stabilization data file
if (stabilizationMessage.has_last_updated()) {
cout << " Loaded Data. Saved Time Stamp: " << TimeUtil::ToString(stabilizationMessage.last_updated()) << endl;
}
@@ -428,4 +429,4 @@ bool CVStabilization::_LoadStabilizedData(){
google::protobuf::ShutdownProtobufLibrary();
return true;
}
}

View File

@@ -40,12 +40,11 @@
#undef uint64
#undef int64
#include <cmath>
#include "stabilizedata.pb.h"
#include "protobuf_messages/stabilizedata.pb.h"
#include "ProcessingController.h"
#include "Clip.h"
#include "Json.h"
using namespace std;
using google::protobuf::util::TimeUtil;
// Store the relative transformation parameters between consecutive frames
@@ -82,22 +81,22 @@ struct CamTrajectory
/**
* @brief This class stabilizes a video frame using optical flow
*
* The relative motion between two consecutive frames is computed to obtain the global camera trajectory.
* The relative motion between two consecutive frames is computed to obtain the global camera trajectory.
* The camera trajectory is then smoothed to reduce jittering.
*/
class CVStabilization {
class CVStabilization {
private:
int smoothingWindow; // In frames. The larger the more stable the video, but less reactive to sudden panning
size_t start;
size_t end;
double avr_dx, avr_dy, avr_da, max_dx, max_dy, max_da;
cv::Mat last_T;
cv::Mat prev_grey;
std::vector <TransformParam> prev_to_cur_transform; // Previous to current
std::vector <TransformParam> prev_to_cur_transform; // Previous to current
std::string protobuf_data_path;
uint progress;
@@ -108,7 +107,7 @@ class CVStabilization {
// Track current frame features and find the relative transformation
bool TrackFrameFeatures(cv::Mat frame, size_t frameNum);
std::vector<CamTrajectory> ComputeFramesTrajectory();
std::map<size_t,CamTrajectory> SmoothTrajectory(std::vector <CamTrajectory> &trajectory);
@@ -120,12 +119,12 @@ class CVStabilization {
std::map <size_t,CamTrajectory> trajectoryData; // Save camera trajectory data
std::map <size_t,TransformParam> transformationData; // Save transormation data
// Set default smoothing window value to compute stabilization
// Set default smoothing window value to compute stabilization
CVStabilization(std::string processInfoJson, ProcessingController &processingController);
// Process clip and store necessary stabilization data
void stabilizeClip(openshot::Clip& video, size_t _start=0, size_t _end=0, bool process_interval=false);
/// Protobuf Save and Load methods
// Save stabilization data to protobuf file
bool SaveStabilizedData();
@@ -144,4 +143,4 @@ class CVStabilization {
bool _LoadStabilizedData();
};
#endif
#endif

View File

@@ -30,12 +30,13 @@
#include "CVTracker.h"
using namespace std;
using namespace openshot;
// Constructor
CVTracker::CVTracker(std::string processInfoJson, ProcessingController &processingController)
: processingController(&processingController), json_interval(false){
: processingController(&processingController), json_interval(false){
SetJson(processInfoJson);
}
@@ -78,11 +79,11 @@ void CVTracker::trackClip(openshot::Clip& video, size_t _start, size_t _end, boo
start = start + video.Start() * video.Reader()->info.fps.ToInt();
end = video.End() * video.Reader()->info.fps.ToInt();
}
if(error){
return;
}
}
processingController->SetError(false, "");
bool trackerInit = false;
@@ -99,22 +100,22 @@ void CVTracker::trackClip(openshot::Clip& video, size_t _start, size_t _end, boo
size_t frame_number = frame;
// Get current frame
std::shared_ptr<openshot::Frame> f = video.GetFrame(frame_number);
// Grab OpenCV Mat image
cv::Mat cvimage = f->GetImageCV();
// Pass the first frame to initialize the tracker
if(!trackerInit){
// Initialize the tracker
initTracker(cvimage, frame_number);
trackerInit = true;
}
else{
// Update the object tracker according to frame
// Update the object tracker according to frame
trackerInit = trackFrame(cvimage, frame_number);
// Draw box on image
FrameData fd = GetTrackedData(frame_number);
@@ -155,7 +156,7 @@ bool CVTracker::initTracker(cv::Mat &frame, size_t frameId){
return true;
}
// Update the object tracker according to frame
// Update the object tracker according to frame
bool CVTracker::trackFrame(cv::Mat &frame, size_t frameId){
// Update the tracking result
bool ok = tracker->update(frame, bbox);
@@ -170,7 +171,7 @@ bool CVTracker::trackFrame(cv::Mat &frame, size_t frameId){
std::vector<cv::Rect> bboxes = {bbox};
std::vector<float> confidence = {1.0};
std::vector<int> classId = {1};
sort.update(bboxes, frameId, sqrt(pow(frame.rows, 2) + pow(frame.cols, 2)), confidence, classId);
for(auto TBox : sort.frameTrackingResult)
@@ -236,15 +237,15 @@ void CVTracker::AddFrameDataToProto(libopenshottracker::Frame* pbFrameData, Fram
box->set_y2(fData.y2);
}
// Get tracker info for the desired frame
// Get tracker info for the desired frame
FrameData CVTracker::GetTrackedData(size_t frameId){
// Check if the tracker info for the requested frame exists
if ( trackedDataById.find(frameId) == trackedDataById.end() ) {
return FrameData();
} else {
return trackedDataById[frameId];
}
@@ -269,7 +270,7 @@ void CVTracker::SetJson(const std::string value) {
// Load Json::Value into this object
void CVTracker::SetJsonValue(const Json::Value root) {
// Set data from Json (if key is found)
if (!root["protobuf_data_path"].isNull()){
protobuf_data_path = (root["protobuf_data_path"].asString());
@@ -277,7 +278,7 @@ void CVTracker::SetJsonValue(const Json::Value root) {
if (!root["tracker-type"].isNull()){
trackerType = (root["tracker-type"].asString());
}
if (!root["region"].isNull()){
double x = root["region"]["x"].asDouble();
double y = root["region"]["y"].asDouble();
@@ -343,7 +344,7 @@ bool CVTracker::_LoadTrackedData(){
trackedDataById[id] = FrameData(id, rotation, x1, y1, x2, y2);
}
// Show the time stamp from the last update in tracker data file
// Show the time stamp from the last update in tracker data file
if (trackerMessage.has_last_updated()) {
cout << " Loaded Data. Saved Time Stamp: " << TimeUtil::ToString(trackerMessage.last_updated()) << endl;
}

View File

@@ -47,16 +47,15 @@
#include "Frame.h"
#include "Json.h"
#include "ProcessingController.h"
#include "trackerdata.pb.h"
#include "protobuf_messages/trackerdata.pb.h"
#include "../src/sort_filter/sort.hpp"
#include "sort_filter/sort.hpp"
using namespace std;
using google::protobuf::util::TimeUtil;
namespace openshot
{
// Store the tracked object information for one frame
struct FrameData{
size_t frame_id = -1;
@@ -88,13 +87,13 @@ namespace openshot
* @brief The tracker class will receive one bounding box provided by the user and then iterate over the clip frames
* to return the object position in all the frames.
*/
class CVTracker {
class CVTracker {
private:
std::map<size_t, FrameData> trackedDataById; // Save tracked data
std::map<size_t, FrameData> trackedDataById; // Save tracked data
std::string trackerType; // Name of the chosen tracker
cv::Ptr<cv::Tracker> tracker; // Pointer of the selected tracker
cv::Rect2d bbox; // Bounding box coords
cv::Rect2d bbox; // Bounding box coords
SortTracker sort;
std::string protobuf_data_path; // Path to protobuf data file
@@ -103,34 +102,34 @@ namespace openshot
/// Will handle a Thread safely comutication between ClipProcessingJobs and the processing effect classes
ProcessingController *processingController;
bool json_interval;
size_t start;
size_t end;
bool error = false;
// Initialize the tracker
bool initTracker(cv::Mat &frame, size_t frameId);
// Update the object tracker according to frame
// Update the object tracker according to frame
bool trackFrame(cv::Mat &frame, size_t frameId);
public:
// Constructor
CVTracker(std::string processInfoJson, ProcessingController &processingController);
// Set desirable tracker method
cv::Ptr<cv::Tracker> selectTracker(std::string trackerType);
// Track object in the hole clip or in a given interval
// If start, end and process_interval are passed as argument, clip will be processed in [start,end)
// If start, end and process_interval are passed as argument, clip will be processed in [start,end)
void trackClip(openshot::Clip& video, size_t _start=0, size_t _end=0, bool process_interval=false);
// Get tracked data for a given frame
FrameData GetTrackedData(size_t frameId);
/// Protobuf Save and Load methods
// Save protobuf file
bool SaveTrackedData();
@@ -146,4 +145,4 @@ namespace openshot
};
}
#endif
#endif

View File

@@ -717,8 +717,6 @@ std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
} catch (const ReaderClosed & e) {
// ...
} catch (const TooManySeeks & e) {
// ...
} catch (const OutOfBoundsFrame & e) {
// ...
}

View File

@@ -365,22 +365,6 @@ namespace openshot {
virtual ~ResampleError() noexcept {}
};
/// Exception when too many seek attempts happen
class TooManySeeks : public ExceptionBase
{
public:
std::string file_path;
/**
* @brief Constructor
*
* @param message A message to accompany the exception
* @param file_path (optional) The input file being processed
*/
TooManySeeks(std::string message, std::string file_path="")
: ExceptionBase(message), file_path(file_path) { }
virtual ~TooManySeeks() noexcept {}
};
/// Exception when a writer is closed, and a frame is requested
class WriterClosed : public ExceptionBase
{

View File

@@ -1355,7 +1355,13 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
std::shared_ptr<Frame> f = CreateFrame(current_frame);
// Add Image data to frame
f->AddImage(width, height, 4, QImage::Format_RGBA8888_Premultiplied, buffer);
if (!ffmpeg_has_alpha(AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx))) {
// Add image with no alpha channel, Speed optimization
f->AddImage(width, height, 4, QImage::Format_RGBA8888_Premultiplied, buffer);
} else {
// Add image with alpha channel (this will be converted to premultipled when needed, but is slower)
f->AddImage(width, height, 4, QImage::Format_RGBA8888, buffer);
}
// Update working cache
working_cache.Add(f);

View File

@@ -126,6 +126,16 @@
#define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P
#endif
// Does ffmpeg pixel format contain an alpha channel?
inline static const bool ffmpeg_has_alpha(PixelFormat pix_fmt)
{
if (pix_fmt == AV_PIX_FMT_ARGB || pix_fmt == AV_PIX_FMT_RGBA || pix_fmt == AV_PIX_FMT_ABGR || pix_fmt == AV_PIX_FMT_BGRA || pix_fmt == AV_PIX_FMT_YUVA420P) {
return true;
} else {
return false;
}
}
// FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's
// definition in ruby/config.h, so we move it to FF_RSHIFT
#ifdef RSHIFT

Some files were not shown because too many files have changed in this diff Show More