Merge pull request #585 from OpenShot/opencv

3 New OpenCV Effects (Tracker, Stabilization, and Object Detection)
This commit is contained in:
Jonathan Thomas
2021-01-28 17:28:15 -06:00
committed by GitHub
52 changed files with 5269 additions and 45 deletions

1
.gitignore vendored
View File

@@ -8,3 +8,4 @@
/.metadata/
tags
*~

View File

@@ -70,7 +70,7 @@ windows-builder-x64:
- $env:LIBOPENSHOT_AUDIO_DIR = "$CI_PROJECT_DIR\build\install-x64"
- $env:UNITTEST_DIR = "C:\msys64\usr"
- $env:RESVGDIR = "C:\msys64\usr"
- $env:Path = "C:\msys64\mingw64\bin;C:\msys64\mingw64\lib;C:\msys64\usr\lib\cmake\UnitTest++;C:\msys64\home\jonathan\depot_tools;C:\msys64\usr;C:\msys64\usr\lib;" + $env:Path;
- $env:Path = "C:\msys64\mingw64\bin;C:\msys64\mingw64\lib;C:\msys64\usr\lib\cmake\UnitTest++;C:\msys64\home\jonathan\depot_tools;C:\msys64\usr;C:\msys64\usr\lib;C:\msys64\usr\local\x64\mingw\bin;C:\msys64\usr\local;" + $env:Path;
- New-Item -ItemType Directory -Force -Path build
- cd build
- cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR\build\install-x64" -D"PYTHON_MODULE_PATH=python" -D"RUBY_MODULE_PATH=ruby" -G "MSYS Makefiles" -DCMAKE_MAKE_PROGRAM=mingw32-make -D"CMAKE_BUILD_TYPE:STRING=Release" ../
@@ -97,7 +97,7 @@ windows-builder-x86:
- $env:LIBOPENSHOT_AUDIO_DIR = "$CI_PROJECT_DIR\build\install-x86"
- $env:UNITTEST_DIR = "C:\msys32\usr"
- $env:RESVGDIR = "C:\msys32\usr"
- $env:Path = "C:\msys32\mingw32\bin;C:\msys32\mingw32\lib;C:\msys32\usr\lib\cmake\UnitTest++;C:\msys32\home\jonathan\depot_tools;C:\msys32\usr;C:\msys32\usr\lib;" + $env:Path;
- $env:Path = "C:\msys32\mingw32\bin;C:\msys32\mingw32\lib;C:\msys32\usr\lib\cmake\UnitTest++;C:\msys32\home\jonathan\depot_tools;C:\msys32\usr;C:\msys32\usr\lib;C:\msys32\usr\local\x64\mingw\bin;C:\msys32\usr\local;" + $env:Path;
- New-Item -ItemType Directory -Force -Path build
- cd build
- cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR\build\install-x86" -D"PYTHON_MODULE_PATH=python" -D"RUBY_MODULE_PATH=ruby" -G "MSYS Makefiles" -DCMAKE_MAKE_PROGRAM=mingw32-make -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_CXX_FLAGS=-m32" -D"CMAKE_EXE_LINKER_FLAGS=-Wl,--large-address-aware" -D"CMAKE_C_FLAGS=-m32" ../

View File

@@ -79,6 +79,7 @@ option(ENABLE_COVERAGE "Scan test coverage using gcov and report" OFF)
option(ENABLE_DOCS "Build API documentation (requires Doxygen)" ON)
option(APPIMAGE_BUILD "Build to install in an AppImage (Linux only)" OFF)
option(ENABLE_MAGICK "Use ImageMagick, if available" ON)
option(ENABLE_OPENCV "Build with OpenCV algorithms (requires Boost, Protobuf 3)" ON)
# Legacy commandline override
if (DISABLE_TESTS)

View File

@@ -114,6 +114,15 @@
%}
#endif
#ifdef USE_OPENCV
%{
#include "ClipProcessingJobs.h"
#include "effects/Stabilizer.h"
#include "effects/Tracker.h"
#include "effects/ObjectDetection.h"
%}
#endif
/* Generic language independent exception handler. */
%include "exception.i"
%exception {
@@ -269,6 +278,10 @@
%include "ZmqLogger.h"
%include "AudioDeviceInfo.h"
#ifdef USE_OPENCV
%include "ClipProcessingJobs.h"
#endif
#ifdef USE_IMAGEMAGICK
%include "ImageReader.h"
%include "ImageWriter.h"
@@ -291,6 +304,11 @@
%include "effects/Saturation.h"
%include "effects/Shift.h"
%include "effects/Wave.h"
#ifdef USE_OPENCV
%include "effects/Stabilizer.h"
%include "effects/Tracker.h"
%include "effects/ObjectDetection.h"
#endif
/* Wrap std templates (list, vector, etc...) */

297
examples/Example_opencv.cpp Normal file
View File

@@ -0,0 +1,297 @@
/**
* @file
* @brief Source file for Example Executable (example app for libopenshot)
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include <fstream>
#include <iostream>
#include <memory>
#include "../../include/CVTracker.h"
#include "../../include/CVStabilization.h"
#include "../../include/CVObjectDetection.h"
#include "../../include/OpenShot.h"
#include "../../include/CrashHandler.h"
using namespace openshot;
using namespace std;
/*
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
The following methods are just for getting JSON info to the pre-processing effects
*/
string jsonFormat(string key, string value, string type="string"); // Format variables to the needed JSON format
string trackerJson(cv::Rect2d r, bool onlyProtoPath); // Set variable values for tracker effect
string stabilizerJson(bool onlyProtoPath); // Set variable values for stabilizer effect
string objectDetectionJson(bool onlyProtoPath); // Set variable values for object detector effect
/*
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
*/
// Show the pre-processed clip on the screen
void displayClip(openshot::Clip &r9){
// Opencv display window
cv::namedWindow("Display Image", cv::WINDOW_NORMAL );
// Get video lenght
int videoLenght = r9.Reader()->info.video_length;
// Loop through the clip and show it with the effects, if any
for (long int frame = 0; frame < videoLenght; frame++)
{
int frame_number = frame;
// Get the frame
std::shared_ptr<openshot::Frame> f = r9.GetFrame(frame_number);
// Grab OpenCV::Mat image
cv::Mat cvimage = f->GetImageCV();
// Display the frame
cv::imshow("Display Image", cvimage);
// Press ESC on keyboard to exit
char c=(char)cv::waitKey(25);
if(c==27)
break;
}
// Destroy all remaining windows
cv::destroyAllWindows();
}
int main(int argc, char* argv[]) {
// Set pre-processing effects
bool TRACK_DATA = true;
bool SMOOTH_VIDEO = false;
bool OBJECT_DETECTION_DATA = false;
// Get media path
std::stringstream path;
path << TEST_MEDIA_PATH << ((OBJECT_DETECTION_DATA) ? "run.mp4" : "test.avi");
// run.mp4 --> Used for object detector
// test.avi --> Used for tracker and stabilizer
// Thread controller just for the pre-processing constructors, it won't be used
ProcessingController processingController;
// Open clip
openshot::Clip r9(path.str());
r9.Open();
// Apply tracking effect on the clip
if(TRACK_DATA){
// Take the bounding box coordinates
cv::Mat roi = r9.GetFrame(0)->GetImageCV();
cv::Rect2d r = cv::selectROI(roi);
cv::destroyAllWindows();
// Create a tracker object by passing a JSON string and a thread controller, this last one won't be used
// JSON info: path to save the tracked data, type of tracker and bbox coordinates
CVTracker tracker(trackerJson(r, false), processingController);
// Start the tracking
tracker.trackClip(r9, 0, 100, true);
// Save the tracked data
tracker.SaveTrackedData();
// Create a tracker effect
EffectBase* e = EffectInfo().CreateEffect("Tracker");
// Pass a JSON string with the saved tracked data
// The effect will read and save the tracking in a map::<frame,data_struct>
e->SetJson(trackerJson(r, true));
// Add the effect to the clip
r9.AddEffect(e);
}
// Apply stabilizer effect on the clip
if(SMOOTH_VIDEO){
// Create a stabilizer object by passing a JSON string and a thread controller, this last one won't be used
// JSON info: path to save the stabilized data and smoothing window value
CVStabilization stabilizer(stabilizerJson(false), processingController);
// Start the stabilization
stabilizer.stabilizeClip(r9, 0, 100, true);
// Save the stabilization data
stabilizer.SaveStabilizedData();
// Create a stabilizer effect
EffectBase* e = EffectInfo().CreateEffect("Stabilizer");
// Pass a JSON string with the saved stabilized data
// The effect will read and save the stabilization in a map::<frame,data_struct>
e->SetJson(stabilizerJson(true));
// Add the effect to the clip
r9.AddEffect(e);
}
// Apply object detection effect on the clip
if(OBJECT_DETECTION_DATA){
// Create a object detection object by passing a JSON string and a thread controller, this last one won't be used
// JSON info: path to save the detection data, processing devicee, model weights, model configuration and class names
CVObjectDetection objectDetection(objectDetectionJson(false), processingController);
// Start the object detection
objectDetection.detectObjectsClip(r9, 0, 100, true);
// Save the object detection data
objectDetection.SaveObjDetectedData();
// Create a object detector effect
EffectBase* e = EffectInfo().CreateEffect("Object Detector");
// Pass a JSON string with the saved detections data
// The effect will read and save the detections in a map::<frame,data_struct>
e->SetJson(objectDetectionJson(true));
// Add the effect to the clip
r9.AddEffect(e);
}
// Show the pre-processed clip on the screen
displayClip(r9);
// Close timeline
r9.Close();
std::cout << "Completed successfully!" << std::endl;
return 0;
}
/*
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
The following methods are just for getting JSON info to the pre-processing effects
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
*/
string jsonFormat(string key, string value, string type){
stringstream jsonFormatMessage;
jsonFormatMessage << ( "\"" + key + "\": " );
if(type == "string")
jsonFormatMessage << ( "\"" + value + "\"" );
if(type == "rstring")
jsonFormatMessage << value;
if(type == "int")
jsonFormatMessage << stoi(value);
if(type == "float")
jsonFormatMessage << (float)stof(value);
if(type == "double")
jsonFormatMessage << (double)stof(value);
if (type == "bool")
jsonFormatMessage << ((value == "true" || value == "1") ? "true" : "false");
return jsonFormatMessage.str();
}
// Return JSON string for the tracker effect
string trackerJson(cv::Rect2d r, bool onlyProtoPath){
// Define path to save tracked data
string protobufDataPath = "kcf_tracker.data";
// Set the tracker
string tracker = "KCF";
// Construct all the composition of the JSON string
string protobuf_data_path = jsonFormat("protobuf_data_path", protobufDataPath);
string trackerType = jsonFormat("tracker_type", tracker);
string bboxCoords = jsonFormat(
"bbox",
"{" + jsonFormat("x", to_string(r.x), "int") +
"," + jsonFormat("y", to_string(r.y), "int") +
"," + jsonFormat("w", to_string(r.width), "int") +
"," + jsonFormat("h", to_string(r.height), "int") +
"}",
"rstring");
// Return only the the protobuf path in JSON format
if(onlyProtoPath)
return "{" + protobuf_data_path + "}";
// Return all the parameters for the pre-processing effect
else
return "{" + protobuf_data_path + "," + trackerType + "," + bboxCoords + "}";
}
// Return JSON string for the stabilizer effect
string stabilizerJson(bool onlyProtoPath){
// Define path to save stabilized data
string protobufDataPath = "example_stabilizer.data";
// Set smoothing window value
string smoothingWindow = "30";
// Construct all the composition of the JSON string
string protobuf_data_path = jsonFormat("protobuf_data_path", protobufDataPath);
string smoothing_window = jsonFormat("smoothing_window", smoothingWindow, "int");
// Return only the the protobuf path in JSON format
if(onlyProtoPath)
return "{" + protobuf_data_path + "}";
// Return all the parameters for the pre-processing effect
else
return "{" + protobuf_data_path + "," + smoothing_window + "}";
}
string objectDetectionJson(bool onlyProtoPath){
// Define path to save object detection data
string protobufDataPath = "example_object_detection.data";
// Define processing device
string processingDevice = "GPU";
// Set path to model configuration file
string modelConfiguration = "yolov3.cfg";
// Set path to model weights
string modelWeights = "yolov3.weights";
// Set path to class names file
string classesFile = "obj.names";
// Construct all the composition of the JSON string
string protobuf_data_path = jsonFormat("protobuf_data_path", protobufDataPath);
string processing_device = jsonFormat("processing_device", processingDevice);
string model_configuration = jsonFormat("model_configuration", modelConfiguration);
string model_weights = jsonFormat("model_weights", modelWeights);
string classes_file = jsonFormat("classes_file", classesFile);
// Return only the the protobuf path in JSON format
if(onlyProtoPath)
return "{" + protobuf_data_path + "}";
else
return "{" + protobuf_data_path + "," + processing_device + "," + model_configuration + ","
+ model_weights + "," + classes_file + "}";
}

BIN
examples/run.mp4 Normal file

Binary file not shown.

BIN
examples/test.avi Normal file

Binary file not shown.

BIN
examples/test_video.mp4 Normal file

Binary file not shown.

View File

@@ -99,6 +99,19 @@ set(OPENSHOT_SOURCES
TimelineBase.cpp
Timeline.cpp)
# OpenCV related classes
set(OPENSHOT_CV_SOURCES
CVTracker.cpp
CVStabilization.cpp
ClipProcessingJobs.cpp
CVObjectDetection.cpp
effects/Stabilizer.cpp
effects/Tracker.cpp
effects/ObjectDetection.cpp
./sort_filter/sort.cpp
./sort_filter/Hungarian.cpp
./sort_filter/KalmanTracker.cpp)
# Video effects
set(EFFECTS_SOURCES
effects/Bars.cpp
@@ -375,6 +388,33 @@ if (ENABLE_BLACKMAGIC)
endif()
endif()
################## OPENCV ###################
if(ENABLE_OPENCV)
find_package(OpenCV 4)
if(NOT OpenCV_FOUND)
set(ENABLE_OPENCV FALSE CACHE BOOL
"Build with OpenCV algorithms (requires Boost, Protobuf 3)" FORCE)
else()
add_subdirectory(protobuf_messages)
# Add OpenCV source files
target_sources(openshot PRIVATE
${OPENSHOT_CV_SOURCES}
)
target_compile_definitions(openshot PUBLIC USE_OPENCV=1)
target_link_libraries(openshot PUBLIC
opencv_core
opencv_video
opencv_highgui
opencv_dnn
opencv_tracking
openshot_protobuf
)
set(HAVE_OPENCV TRUE CACHE BOOL "Building with OpenCV effects" FORCE)
mark_as_advanced(HAVE_OPENCV)
endif()
endif()
add_feature_info("OpenCV algorithms" ENABLE_OPENCV "Use OpenCV algorithms")
############### LINK LIBRARY #################
# Link remaining dependency libraries
if(DEFINED PROFILER)
@@ -402,6 +442,9 @@ install(
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libopenshot
FILES_MATCHING PATTERN "*.h"
)
install(FILES ${ProtobufHeaders}
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libopenshot
)
############### CPACK PACKAGING ##############
if(MINGW)

488
src/CVObjectDetection.cpp Normal file
View File

@@ -0,0 +1,488 @@
/**
* @file
* @brief Source file for CVObjectDetection class
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "CVObjectDetection.h"
#include <google/protobuf/util/time_util.h>
using namespace std;
using namespace openshot;
using google::protobuf::util::TimeUtil;
CVObjectDetection::CVObjectDetection(std::string processInfoJson, ProcessingController &processingController)
: processingController(&processingController), processingDevice("CPU"){
SetJson(processInfoJson);
confThreshold = 0.5;
nmsThreshold = 0.1;
}
void CVObjectDetection::setProcessingDevice(){
if(processingDevice == "GPU"){
net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA);
}
else if(processingDevice == "CPU"){
net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
}
}
void CVObjectDetection::detectObjectsClip(openshot::Clip &video, size_t _start, size_t _end, bool process_interval)
{
start = _start; end = _end;
video.Open();
if(error){
return;
}
processingController->SetError(false, "");
// Load names of classes
std::ifstream ifs(classesFile.c_str());
std::string line;
while (std::getline(ifs, line)) classNames.push_back(line);
// Load the network
if(classesFile == "" || modelConfiguration == "" || modelWeights == "")
return;
net = cv::dnn::readNetFromDarknet(modelConfiguration, modelWeights);
setProcessingDevice();
size_t frame_number;
if(!process_interval || end == 0 || end-start == 0){
// Get total number of frames in video
start = video.Start() * video.Reader()->info.fps.ToInt();
end = video.End() * video.Reader()->info.fps.ToInt();
}
for (frame_number = start; frame_number <= end; frame_number++)
{
// Stop the feature tracker process
if(processingController->ShouldStop()){
return;
}
std::shared_ptr<openshot::Frame> f = video.GetFrame(frame_number);
// Grab OpenCV Mat image
cv::Mat cvimage = f->GetImageCV();
DetectObjects(cvimage, frame_number);
// Update progress
processingController->SetProgress(uint(100*(frame_number-start)/(end-start)));
// std::cout<<"Frame: "<<frame_number<<"\n";
}
}
void CVObjectDetection::DetectObjects(const cv::Mat &frame, size_t frameId){
// Get frame as OpenCV Mat
cv::Mat blob;
// Create a 4D blob from the frame.
int inpWidth, inpHeight;
inpWidth = inpHeight = 416;
cv::dnn::blobFromImage(frame, blob, 1/255.0, cv::Size(inpWidth, inpHeight), cv::Scalar(0,0,0), true, false);
//Sets the input to the network
net.setInput(blob);
// Runs the forward pass to get output of the output layers
std::vector<cv::Mat> outs;
net.forward(outs, getOutputsNames(net));
// Remove the bounding boxes with low confidence
postprocess(frame.size(), outs, frameId);
}
// Remove the bounding boxes with low confidence using non-maxima suppression
void CVObjectDetection::postprocess(const cv::Size &frameDims, const std::vector<cv::Mat>& outs, size_t frameId)
{
std::vector<int> classIds;
std::vector<float> confidences;
std::vector<cv::Rect> boxes;
for (size_t i = 0; i < outs.size(); ++i)
{
// Scan through all the bounding boxes output from the network and keep only the
// ones with high confidence scores. Assign the box's class label as the class
// with the highest score for the box.
float* data = (float*)outs[i].data;
for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols)
{
cv::Mat scores = outs[i].row(j).colRange(5, outs[i].cols);
cv::Point classIdPoint;
double confidence;
// Get the value and location of the maximum score
cv::minMaxLoc(scores, 0, &confidence, 0, &classIdPoint);
if (confidence > confThreshold)
{
int centerX = (int)(data[0] * frameDims.width);
int centerY = (int)(data[1] * frameDims.height);
int width = (int)(data[2] * frameDims.width);
int height = (int)(data[3] * frameDims.height);
int left = centerX - width / 2;
int top = centerY - height / 2;
classIds.push_back(classIdPoint.x);
confidences.push_back((float)confidence);
boxes.push_back(cv::Rect(left, top, width, height));
}
}
}
// Perform non maximum suppression to eliminate redundant overlapping boxes with
// lower confidences
std::vector<int> indices;
cv::dnn::NMSBoxes(boxes, confidences, confThreshold, nmsThreshold, indices);
// Pass boxes to SORT algorithm
std::vector<cv::Rect> sortBoxes;
for(auto box : boxes)
sortBoxes.push_back(box);
sort.update(sortBoxes, frameId, sqrt(pow(frameDims.width,2) + pow(frameDims.height, 2)), confidences, classIds);
// Clear data vectors
boxes.clear(); confidences.clear(); classIds.clear();
// Get SORT predicted boxes
for(auto TBox : sort.frameTrackingResult){
if(TBox.frame == frameId){
boxes.push_back(TBox.box);
confidences.push_back(TBox.confidence);
classIds.push_back(TBox.classId);
}
}
// Remove boxes based on controids distance
for(uint i = 0; i<boxes.size(); i++){
for(uint j = i+1; j<boxes.size(); j++){
int xc_1 = boxes[i].x + (int)(boxes[i].width/2), yc_1 = boxes[i].y + (int)(boxes[i].width/2);
int xc_2 = boxes[j].x + (int)(boxes[j].width/2), yc_2 = boxes[j].y + (int)(boxes[j].width/2);
if(fabs(xc_1 - xc_2) < 10 && fabs(yc_1 - yc_2) < 10){
if(classIds[i] == classIds[j]){
if(confidences[i] >= confidences[j]){
boxes.erase(boxes.begin() + j);
classIds.erase(classIds.begin() + j);
confidences.erase(confidences.begin() + j);
break;
}
else{
boxes.erase(boxes.begin() + i);
classIds.erase(classIds.begin() + i);
confidences.erase(confidences.begin() + i);
i = 0;
break;
}
}
}
}
}
// Remove boxes based in IOU score
for(uint i = 0; i<boxes.size(); i++){
for(uint j = i+1; j<boxes.size(); j++){
if( iou(boxes[i], boxes[j])){
if(classIds[i] == classIds[j]){
if(confidences[i] >= confidences[j]){
boxes.erase(boxes.begin() + j);
classIds.erase(classIds.begin() + j);
confidences.erase(confidences.begin() + j);
break;
}
else{
boxes.erase(boxes.begin() + i);
classIds.erase(classIds.begin() + i);
confidences.erase(confidences.begin() + i);
i = 0;
break;
}
}
}
}
}
// Normalize boxes coordinates
std::vector<cv::Rect_<float>> normalized_boxes;
for(auto box : boxes){
cv::Rect_<float> normalized_box;
normalized_box.x = (box.x)/(float)frameDims.width;
normalized_box.y = (box.y)/(float)frameDims.height;
normalized_box.width = (box.width)/(float)frameDims.width;
normalized_box.height = (box.height)/(float)frameDims.height;
normalized_boxes.push_back(normalized_box);
}
detectionsData[frameId] = CVDetectionData(classIds, confidences, normalized_boxes, frameId);
}
// Compute IOU between 2 boxes
bool CVObjectDetection::iou(cv::Rect pred_box, cv::Rect sort_box){
// Determine the (x, y)-coordinates of the intersection rectangle
int xA = std::max(pred_box.x, sort_box.x);
int yA = std::max(pred_box.y, sort_box.y);
int xB = std::min(pred_box.x + pred_box.width, sort_box.x + sort_box.width);
int yB = std::min(pred_box.y + pred_box.height, sort_box.y + sort_box.height);
// Compute the area of intersection rectangle
int interArea = std::max(0, xB - xA + 1) * std::max(0, yB - yA + 1);
// Compute the area of both the prediction and ground-truth rectangles
int boxAArea = (pred_box.width + 1) * (pred_box.height + 1);
int boxBArea = (sort_box.width + 1) * (sort_box.height + 1);
// Compute the intersection over union by taking the intersection
float iou = interArea / (float)(boxAArea + boxBArea - interArea);
// If IOU is above this value the boxes are very close (probably a variation of the same bounding box)
if(iou > 0.5)
return true;
return false;
}
// Get the names of the output layers
std::vector<cv::String> CVObjectDetection::getOutputsNames(const cv::dnn::Net& net)
{
static std::vector<cv::String> names;
//Get the indices of the output layers, i.e. the layers with unconnected outputs
std::vector<int> outLayers = net.getUnconnectedOutLayers();
//get the names of all the layers in the network
std::vector<cv::String> layersNames = net.getLayerNames();
// Get the names of the output layers in names
names.resize(outLayers.size());
for (size_t i = 0; i < outLayers.size(); ++i)
names[i] = layersNames[outLayers[i] - 1];
return names;
}
CVDetectionData CVObjectDetection::GetDetectionData(size_t frameId){
// Check if the stabilizer info for the requested frame exists
if ( detectionsData.find(frameId) == detectionsData.end() ) {
return CVDetectionData();
} else {
return detectionsData[frameId];
}
}
bool CVObjectDetection::SaveObjDetectedData(){
// Create tracker message
pb_objdetect::ObjDetect objMessage;
//Save class names in protobuf message
for(int i = 0; i<classNames.size(); i++){
std::string* className = objMessage.add_classnames();
className->assign(classNames.at(i));
}
// Iterate over all frames data and save in protobuf message
for(std::map<size_t,CVDetectionData>::iterator it=detectionsData.begin(); it!=detectionsData.end(); ++it){
CVDetectionData dData = it->second;
pb_objdetect::Frame* pbFrameData;
AddFrameDataToProto(objMessage.add_frame(), dData);
}
// Add timestamp
*objMessage.mutable_last_updated() = TimeUtil::SecondsToTimestamp(time(NULL));
{
// Write the new message to disk.
std::fstream output(protobuf_data_path, ios::out | ios::trunc | ios::binary);
if (!objMessage.SerializeToOstream(&output)) {
cerr << "Failed to write protobuf message." << endl;
return false;
}
}
// Delete all global objects allocated by libprotobuf.
google::protobuf::ShutdownProtobufLibrary();
return true;
}
// Add frame object detection into protobuf message.
void CVObjectDetection::AddFrameDataToProto(pb_objdetect::Frame* pbFrameData, CVDetectionData& dData) {
// Save frame number and rotation
pbFrameData->set_id(dData.frameId);
for(size_t i = 0; i < dData.boxes.size(); i++){
pb_objdetect::Frame_Box* box = pbFrameData->add_bounding_box();
// Save bounding box data
box->set_x(dData.boxes.at(i).x);
box->set_y(dData.boxes.at(i).y);
box->set_w(dData.boxes.at(i).width);
box->set_h(dData.boxes.at(i).height);
box->set_classid(dData.classIds.at(i));
box->set_confidence(dData.confidences.at(i));
}
}
// Load JSON string into this object
void CVObjectDetection::SetJson(const std::string value) {
// Parse JSON string into JSON objects
try
{
const Json::Value root = openshot::stringToJson(value);
// Set all values that match
SetJsonValue(root);
}
catch (const std::exception& e)
{
// Error parsing JSON (or missing keys)
// throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
std::cout<<"JSON is invalid (missing keys or invalid data types)"<<std::endl;
}
}
// Load Json::Value into this object
void CVObjectDetection::SetJsonValue(const Json::Value root) {
// Set data from Json (if key is found)
if (!root["protobuf_data_path"].isNull()){
protobuf_data_path = (root["protobuf_data_path"].asString());
}
if (!root["processing-device"].isNull()){
processingDevice = (root["processing-device"].asString());
}
if (!root["model-config"].isNull()){
modelConfiguration = (root["model-config"].asString());
std::ifstream infile(modelConfiguration);
if(!infile.good()){
processingController->SetError(true, "Incorrect path to model config file");
error = true;
}
}
if (!root["model-weights"].isNull()){
modelWeights= (root["model-weights"].asString());
std::ifstream infile(modelWeights);
if(!infile.good()){
processingController->SetError(true, "Incorrect path to model weight file");
error = true;
}
}
if (!root["class-names"].isNull()){
classesFile = (root["class-names"].asString());
std::ifstream infile(classesFile);
if(!infile.good()){
processingController->SetError(true, "Incorrect path to class name file");
error = true;
}
}
}
/*
||||||||||||||||||||||||||||||||||||||||||||||||||
ONLY FOR MAKE TEST
||||||||||||||||||||||||||||||||||||||||||||||||||
*/
// Load protobuf data file
bool CVObjectDetection::_LoadObjDetectdData(){
// Create tracker message
pb_objdetect::ObjDetect objMessage;
{
// Read the existing tracker message.
fstream input(protobuf_data_path, ios::in | ios::binary);
if (!objMessage.ParseFromIstream(&input)) {
cerr << "Failed to parse protobuf message." << endl;
return false;
}
}
// Make sure classNames and detectionsData are empty
classNames.clear(); detectionsData.clear();
// Get all classes names and assign a color to them
for(int i = 0; i < objMessage.classnames_size(); i++){
classNames.push_back(objMessage.classnames(i));
}
// Iterate over all frames of the saved message
for (size_t i = 0; i < objMessage.frame_size(); i++) {
// Create protobuf message reader
const pb_objdetect::Frame& pbFrameData = objMessage.frame(i);
// Get frame Id
size_t id = pbFrameData.id();
// Load bounding box data
const google::protobuf::RepeatedPtrField<pb_objdetect::Frame_Box > &pBox = pbFrameData.bounding_box();
// Construct data vectors related to detections in the current frame
std::vector<int> classIds; std::vector<float> confidences; std::vector<cv::Rect_<float>> boxes;
for(int i = 0; i < pbFrameData.bounding_box_size(); i++){
// Get bounding box coordinates
float x = pBox.Get(i).x(); float y = pBox.Get(i).y();
float w = pBox.Get(i).w(); float h = pBox.Get(i).h();
// Create OpenCV rectangle with the bouding box info
cv::Rect_<float> box(x, y, w, h);
// Get class Id (which will be assign to a class name) and prediction confidence
int classId = pBox.Get(i).classid(); float confidence = pBox.Get(i).confidence();
// Push back data into vectors
boxes.push_back(box); classIds.push_back(classId); confidences.push_back(confidence);
}
// Assign data to object detector map
detectionsData[id] = CVDetectionData(classIds, confidences, boxes, id);
}
// Show the time stamp from the last update in object detector data file
if (objMessage.has_last_updated())
cout << " Loaded Data. Saved Time Stamp: " << TimeUtil::ToString(objMessage.last_updated()) << endl;
// Delete all global objects allocated by libprotobuf.
google::protobuf::ShutdownProtobufLibrary();
return true;
}

133
src/CVObjectDetection.h Normal file
View File

@@ -0,0 +1,133 @@
/**
* @file
* @brief Header file for CVObjectDetection class
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#define int64 opencv_broken_int
#define uint64 opencv_broken_uint
#include <opencv2/dnn.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#undef uint64
#undef int64
#include "Json.h"
#include "ProcessingController.h"
#include "Clip.h"
#include "protobuf_messages/objdetectdata.pb.h"
#include "sort_filter/sort.hpp"
namespace openshot
{
// Stores the detected object bounding boxes and its properties.
struct CVDetectionData{
CVDetectionData(){}
CVDetectionData(std::vector<int> _classIds, std::vector<float> _confidences, std::vector<cv::Rect_<float>> _boxes, size_t _frameId){
classIds = _classIds;
confidences = _confidences;
boxes = _boxes;
frameId = _frameId;
}
size_t frameId;
std::vector<int> classIds;
std::vector<float> confidences;
std::vector<cv::Rect_<float>> boxes;
};
/**
* @brief This class runs trought a clip to detect objects and returns the bounding boxes and its properties.
*
* Object detection is performed using YoloV3 model with OpenCV DNN module
*/
class CVObjectDetection{
private:
cv::dnn::Net net;
std::vector<std::string> classNames;
float confThreshold, nmsThreshold;
std::string classesFile;
std::string modelConfiguration;
std::string modelWeights;
std::string processingDevice;
std::string protobuf_data_path;
SortTracker sort;
uint progress;
size_t start;
size_t end;
bool error = false;
/// Will handle a Thread safely comutication between ClipProcessingJobs and the processing effect classes
ProcessingController *processingController;
void setProcessingDevice();
// Detect onbects on a single frame
void DetectObjects(const cv::Mat &frame, size_t frame_number);
bool iou(cv::Rect pred_box, cv::Rect sort_box);
// Remove the bounding boxes with low confidence using non-maxima suppression
void postprocess(const cv::Size &frameDims, const std::vector<cv::Mat>& out, size_t frame_number);
// Get the names of the output layers
std::vector<cv::String> getOutputsNames(const cv::dnn::Net& net);
public:
std::map<size_t, CVDetectionData> detectionsData;
CVObjectDetection(std::string processInfoJson, ProcessingController &processingController);
// Iterate over a clip object and run inference for each video frame
void detectObjectsClip(openshot::Clip &video, size_t start=0, size_t end=0, bool process_interval=false);
CVDetectionData GetDetectionData(size_t frameId);
/// Protobuf Save and Load methods
// Save protobuf file
bool SaveObjDetectedData();
// Add frame object detection data into protobuf message.
void AddFrameDataToProto(pb_objdetect::Frame* pbFrameData, CVDetectionData& dData);
/// Get and Set JSON methods
void SetJson(const std::string value); ///< Load JSON string into this object
void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
// Load protobuf file (ONLY FOR MAKE TEST)
bool _LoadObjDetectdData();
};
}

435
src/CVStabilization.cpp Normal file
View File

@@ -0,0 +1,435 @@
/**
* @file
* @brief Source file for CVStabilization class
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "CVStabilization.h"
#include <google/protobuf/util/time_util.h>
using namespace std;
using namespace openshot;
using google::protobuf::util::TimeUtil;
// Set default smoothing window value to compute stabilization
CVStabilization::CVStabilization(std::string processInfoJson, ProcessingController &processingController)
: processingController(&processingController){
SetJson(processInfoJson);
start = 0;
end = 0;
}
// Process clip and store necessary stabilization data
void CVStabilization::stabilizeClip(openshot::Clip& video, size_t _start, size_t _end, bool process_interval){
if(error){
return;
}
processingController->SetError(false, "");
start = _start; end = _end;
// Compute max and average transformation parameters
avr_dx=0; avr_dy=0; avr_da=0; max_dx=0; max_dy=0; max_da=0;
video.Open();
// Save original video width and height
cv::Size readerDims(video.Reader()->info.width, video.Reader()->info.height);
size_t frame_number;
if(!process_interval || end == 0 || end-start == 0){
// Get total number of frames in video
start = video.Start() * video.Reader()->info.fps.ToInt();
end = video.End() * video.Reader()->info.fps.ToInt();
}
// Extract and track opticalflow features for each frame
for (frame_number = start; frame_number <= end; frame_number++)
{
// Stop the feature tracker process
if(processingController->ShouldStop()){
return;
}
std::shared_ptr<openshot::Frame> f = video.GetFrame(frame_number);
// Grab OpenCV Mat image
cv::Mat cvimage = f->GetImageCV();
// Resize frame to original video width and height if they differ
if(cvimage.size().width != readerDims.width || cvimage.size().height != readerDims.height)
cv::resize(cvimage, cvimage, cv::Size(readerDims.width, readerDims.height));
cv::cvtColor(cvimage, cvimage, cv::COLOR_RGB2GRAY);
if(!TrackFrameFeatures(cvimage, frame_number)){
prev_to_cur_transform.push_back(TransformParam(0, 0, 0));
}
// Update progress
processingController->SetProgress(uint(100*(frame_number-start)/(end-start)));
}
// Show average and max transformation parameters
std::cout<<"\nAVERAGE DX: "<<avr_dx/(frame_number-1)<<" AVERAGE DY: "<<avr_dy/(frame_number-1)<<" AVERAGE A: "<<avr_da/(frame_number-1)<<"\n";
std::cout<<"MAX X: "<<max_dx<<" MAX Y: "<<max_dy<<" MAX A: "<<max_da<<"\n\n";
// Calculate trajectory data
std::vector <CamTrajectory> trajectory = ComputeFramesTrajectory();
// Calculate and save smoothed trajectory data
trajectoryData = SmoothTrajectory(trajectory);
// Calculate and save transformation data
transformationData = GenNewCamPosition(trajectoryData);
// Normalize smoothed trajectory data
for(auto &dataToNormalize : trajectoryData){
dataToNormalize.second.x/=readerDims.width;
dataToNormalize.second.y/=readerDims.height;
}
// Normalize transformation data
for(auto &dataToNormalize : transformationData){
dataToNormalize.second.dx/=readerDims.width;
dataToNormalize.second.dy/=readerDims.height;
}
}
// Track current frame features and find the relative transformation
bool CVStabilization::TrackFrameFeatures(cv::Mat frame, size_t frameNum){
// Check if there are black frames
if(cv::countNonZero(frame) < 1){
return false;
}
// Initialize prev_grey if not
if(prev_grey.empty()){
prev_grey = frame;
return true;
}
// OpticalFlow features vector
std::vector <cv::Point2f> prev_corner, cur_corner;
std::vector <cv::Point2f> prev_corner2, cur_corner2;
std::vector <uchar> status;
std::vector <float> err;
// Extract new image features
cv::goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30);
// Track features
cv::calcOpticalFlowPyrLK(prev_grey, frame, prev_corner, cur_corner, status, err);
// Remove untracked features
for(size_t i=0; i < status.size(); i++) {
if(status[i]) {
prev_corner2.push_back(prev_corner[i]);
cur_corner2.push_back(cur_corner[i]);
}
}
// In case no feature was detected
if(prev_corner2.empty() || cur_corner2.empty()){
last_T = cv::Mat();
// prev_grey = cv::Mat();
return false;
}
// Translation + rotation only
cv::Mat T = cv::estimateAffinePartial2D(prev_corner2, cur_corner2); // false = rigid transform, no scaling/shearing
double da, dx, dy;
// If T has nothing inside return (probably a segment where there is nothing to stabilize)
if(T.size().width == 0 || T.size().height == 0){
return false;
}
else{
// If no transformation is found, just use the last known good transform
if(T.data == NULL){
if(!last_T.empty())
last_T.copyTo(T);
else
return false;
}
// Decompose T
dx = T.at<double>(0,2);
dy = T.at<double>(1,2);
da = atan2(T.at<double>(1,0), T.at<double>(0,0));
}
// Filter transformations parameters, if they are higher than these: return
if(dx > 200 || dy > 200 || da > 0.1){
return false;
}
// Keep computing average and max transformation parameters
avr_dx+=fabs(dx);
avr_dy+=fabs(dy);
avr_da+=fabs(da);
if(fabs(dx) > max_dx)
max_dx = dx;
if(fabs(dy) > max_dy)
max_dy = dy;
if(fabs(da) > max_da)
max_da = da;
T.copyTo(last_T);
prev_to_cur_transform.push_back(TransformParam(dx, dy, da));
frame.copyTo(prev_grey);
// Show processing info
cout << "Frame: " << frameNum << " - good optical flow: " << prev_corner2.size() << endl;
return true;
}
std::vector<CamTrajectory> CVStabilization::ComputeFramesTrajectory(){
// Accumulated frame to frame transform
double a = 0;
double x = 0;
double y = 0;
vector <CamTrajectory> trajectory; // trajectory at all frames
// Compute global camera trajectory. First frame is the origin
for(size_t i=0; i < prev_to_cur_transform.size(); i++) {
x += prev_to_cur_transform[i].dx;
y += prev_to_cur_transform[i].dy;
a += prev_to_cur_transform[i].da;
// Save trajectory data to vector
trajectory.push_back(CamTrajectory(x,y,a));
}
return trajectory;
}
std::map<size_t,CamTrajectory> CVStabilization::SmoothTrajectory(std::vector <CamTrajectory> &trajectory){
std::map <size_t,CamTrajectory> smoothed_trajectory; // trajectory at all frames
for(size_t i=0; i < trajectory.size(); i++) {
double sum_x = 0;
double sum_y = 0;
double sum_a = 0;
int count = 0;
for(int j=-smoothingWindow; j <= smoothingWindow; j++) {
if(i+j < trajectory.size()) {
sum_x += trajectory[i+j].x;
sum_y += trajectory[i+j].y;
sum_a += trajectory[i+j].a;
count++;
}
}
double avg_a = sum_a / count;
double avg_x = sum_x / count;
double avg_y = sum_y / count;
// Add smoothed trajectory data to map
smoothed_trajectory[i + start] = CamTrajectory(avg_x, avg_y, avg_a);
}
return smoothed_trajectory;
}
// Generate new transformations parameters for each frame to follow the smoothed trajectory
std::map<size_t,TransformParam> CVStabilization::GenNewCamPosition(std::map <size_t,CamTrajectory> &smoothed_trajectory){
std::map <size_t,TransformParam> new_prev_to_cur_transform;
// Accumulated frame to frame transform
double a = 0;
double x = 0;
double y = 0;
for(size_t i=0; i < prev_to_cur_transform.size(); i++) {
x += prev_to_cur_transform[i].dx;
y += prev_to_cur_transform[i].dy;
a += prev_to_cur_transform[i].da;
// target - current
double diff_x = smoothed_trajectory[i + start].x - x;
double diff_y = smoothed_trajectory[i + start].y - y;
double diff_a = smoothed_trajectory[i + start].a - a;
double dx = prev_to_cur_transform[i].dx + diff_x;
double dy = prev_to_cur_transform[i].dy + diff_y;
double da = prev_to_cur_transform[i].da + diff_a;
// Add transformation data to map
new_prev_to_cur_transform[i + start] = TransformParam(dx, dy, da);
}
return new_prev_to_cur_transform;
}
// Save stabilization data to protobuf file
bool CVStabilization::SaveStabilizedData(){
// Create stabilization message
pb_stabilize::Stabilization stabilizationMessage;
std::map<size_t,CamTrajectory>::iterator trajData = trajectoryData.begin();
std::map<size_t,TransformParam>::iterator transData = transformationData.begin();
// Iterate over all frames data and save in protobuf message
for(; trajData != trajectoryData.end(); ++trajData, ++transData){
AddFrameDataToProto(stabilizationMessage.add_frame(), trajData->second, transData->second, trajData->first);
}
// Add timestamp
*stabilizationMessage.mutable_last_updated() = TimeUtil::SecondsToTimestamp(time(NULL));
// Write the new message to disk.
std::fstream output(protobuf_data_path, ios::out | ios::trunc | ios::binary);
if (!stabilizationMessage.SerializeToOstream(&output)) {
cerr << "Failed to write protobuf message." << endl;
return false;
}
// Delete all global objects allocated by libprotobuf.
google::protobuf::ShutdownProtobufLibrary();
return true;
}
// Add frame stabilization data into protobuf message
void CVStabilization::AddFrameDataToProto(pb_stabilize::Frame* pbFrameData, CamTrajectory& trajData, TransformParam& transData, size_t frame_number){
// Save frame number
pbFrameData->set_id(frame_number);
// Save camera trajectory data
pbFrameData->set_a(trajData.a);
pbFrameData->set_x(trajData.x);
pbFrameData->set_y(trajData.y);
// Save transformation data
pbFrameData->set_da(transData.da);
pbFrameData->set_dx(transData.dx);
pbFrameData->set_dy(transData.dy);
}
TransformParam CVStabilization::GetTransformParamData(size_t frameId){
// Check if the stabilizer info for the requested frame exists
if ( transformationData.find(frameId) == transformationData.end() ) {
return TransformParam();
} else {
return transformationData[frameId];
}
}
CamTrajectory CVStabilization::GetCamTrajectoryTrackedData(size_t frameId){
// Check if the stabilizer info for the requested frame exists
if ( trajectoryData.find(frameId) == trajectoryData.end() ) {
return CamTrajectory();
} else {
return trajectoryData[frameId];
}
}
// Load JSON string into this object
void CVStabilization::SetJson(const std::string value) {
// Parse JSON string into JSON objects
try
{
const Json::Value root = openshot::stringToJson(value);
// Set all values that match
SetJsonValue(root);
}
catch (const std::exception& e)
{
// Error parsing JSON (or missing keys)
throw openshot::InvalidJSON("JSON is invalid (missing keys or invalid data types)");
}
}
// Load Json::Value into this object
void CVStabilization::SetJsonValue(const Json::Value root) {
// Set data from Json (if key is found)
if (!root["protobuf_data_path"].isNull()){
protobuf_data_path = (root["protobuf_data_path"].asString());
}
if (!root["smoothing-window"].isNull()){
smoothingWindow = (root["smoothing-window"].asInt());
}
}
/*
||||||||||||||||||||||||||||||||||||||||||||||||||
ONLY FOR MAKE TEST
||||||||||||||||||||||||||||||||||||||||||||||||||
*/
// Load protobuf data file
bool CVStabilization::_LoadStabilizedData(){
// Create stabilization message
pb_stabilize::Stabilization stabilizationMessage;
// Read the existing tracker message.
fstream input(protobuf_data_path, ios::in | ios::binary);
if (!stabilizationMessage.ParseFromIstream(&input)) {
cerr << "Failed to parse protobuf message." << endl;
return false;
}
// Make sure the data maps are empty
transformationData.clear();
trajectoryData.clear();
// Iterate over all frames of the saved message and assign to the data maps
for (size_t i = 0; i < stabilizationMessage.frame_size(); i++) {
const pb_stabilize::Frame& pbFrameData = stabilizationMessage.frame(i);
// Load frame number
size_t id = pbFrameData.id();
// Load camera trajectory data
float x = pbFrameData.x();
float y = pbFrameData.y();
float a = pbFrameData.a();
// Assign data to trajectory map
trajectoryData[id] = CamTrajectory(x,y,a);
// Load transformation data
float dx = pbFrameData.dx();
float dy = pbFrameData.dy();
float da = pbFrameData.da();
// Assing data to transformation map
transformationData[id] = TransformParam(dx,dy,da);
}
// Show the time stamp from the last update in stabilization data file
if (stabilizationMessage.has_last_updated()) {
cout << " Loaded Data. Saved Time Stamp: " << TimeUtil::ToString(stabilizationMessage.last_updated()) << endl;
}
// Delete all global objects allocated by libprotobuf.
google::protobuf::ShutdownProtobufLibrary();
return true;
}

142
src/CVStabilization.h Normal file
View File

@@ -0,0 +1,142 @@
/**
* @file
* @brief Header file for CVStabilization class
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPENSHOT_STABILIZATION_H
#define OPENSHOT_STABILIZATION_H
#define int64 opencv_broken_int
#define uint64 opencv_broken_uint
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#undef uint64
#undef int64
#include <cmath>
#include "protobuf_messages/stabilizedata.pb.h"
#include "ProcessingController.h"
#include "Clip.h"
#include "Json.h"
// Store the relative transformation parameters between consecutive frames
struct TransformParam
{
TransformParam() {}
TransformParam(double _dx, double _dy, double _da) {
dx = _dx;
dy = _dy;
da = _da;
}
double dx;
double dy;
double da; // angle
};
// Stores the global camera trajectory for one frame
struct CamTrajectory
{
CamTrajectory() {}
CamTrajectory(double _x, double _y, double _a) {
x = _x;
y = _y;
a = _a;
}
double x;
double y;
double a; // angle
};
/**
* @brief This class stabilizes a video frame using optical flow
*
* The relative motion between two consecutive frames is computed to obtain the global camera trajectory.
* The camera trajectory is then smoothed to reduce jittering.
*/
class CVStabilization {
private:
int smoothingWindow; // In frames. The larger the more stable the video, but less reactive to sudden panning
size_t start;
size_t end;
double avr_dx, avr_dy, avr_da, max_dx, max_dy, max_da;
cv::Mat last_T;
cv::Mat prev_grey;
std::vector <TransformParam> prev_to_cur_transform; // Previous to current
std::string protobuf_data_path;
uint progress;
bool error = false;
/// Will handle a Thread safely comutication between ClipProcessingJobs and the processing effect classes
ProcessingController *processingController;
// Track current frame features and find the relative transformation
bool TrackFrameFeatures(cv::Mat frame, size_t frameNum);
std::vector<CamTrajectory> ComputeFramesTrajectory();
std::map<size_t,CamTrajectory> SmoothTrajectory(std::vector <CamTrajectory> &trajectory);
// Generate new transformations parameters for each frame to follow the smoothed trajectory
std::map<size_t,TransformParam> GenNewCamPosition(std::map <size_t,CamTrajectory> &smoothed_trajectory);
public:
std::map <size_t,CamTrajectory> trajectoryData; // Save camera trajectory data
std::map <size_t,TransformParam> transformationData; // Save transormation data
// Set default smoothing window value to compute stabilization
CVStabilization(std::string processInfoJson, ProcessingController &processingController);
// Process clip and store necessary stabilization data
void stabilizeClip(openshot::Clip& video, size_t _start=0, size_t _end=0, bool process_interval=false);
/// Protobuf Save and Load methods
// Save stabilization data to protobuf file
bool SaveStabilizedData();
// Add frame stabilization data into protobuf message
void AddFrameDataToProto(pb_stabilize::Frame* pbFrameData, CamTrajectory& trajData, TransformParam& transData, size_t frame_number);
// Return requested struct info for a given frame
TransformParam GetTransformParamData(size_t frameId);
CamTrajectory GetCamTrajectoryTrackedData(size_t frameId);
/// Get and Set JSON methods
void SetJson(const std::string value); ///< Load JSON string into this object
void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
// Load protobuf data file (ONLY FOR MAKE TEST)
bool _LoadStabilizedData();
};
#endif

361
src/CVTracker.cpp Normal file
View File

@@ -0,0 +1,361 @@
/**
* @file
* @brief Track an object selected by the user
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "CVTracker.h"
#include <google/protobuf/util/time_util.h>
using namespace std;
using namespace openshot;
using google::protobuf::util::TimeUtil;
// Constructor
CVTracker::CVTracker(std::string processInfoJson, ProcessingController &processingController)
: processingController(&processingController), json_interval(false){
SetJson(processInfoJson);
start = 0;
end = 0;
}
// Set desirable tracker method
cv::Ptr<cv::Tracker> CVTracker::selectTracker(std::string trackerType){
cv::Ptr<cv::Tracker> t;
if (trackerType == "BOOSTING")
t = cv::TrackerBoosting::create();
if (trackerType == "MIL")
t = cv::TrackerMIL::create();
if (trackerType == "KCF")
t = cv::TrackerKCF::create();
if (trackerType == "TLD")
t = cv::TrackerTLD::create();
if (trackerType == "MEDIANFLOW")
t = cv::TrackerMedianFlow::create();
if (trackerType == "MOSSE")
t = cv::TrackerMOSSE::create();
if (trackerType == "CSRT")
t = cv::TrackerCSRT::create();
return t;
}
// Track object in the hole clip or in a given interval
void CVTracker::trackClip(openshot::Clip& video, size_t _start, size_t _end, bool process_interval){
video.Open();
if(!json_interval){
start = _start; end = _end;
if(!process_interval || end <= 0 || end-start == 0){
// Get total number of frames in video
start = video.Start() * video.Reader()->info.fps.ToInt();
end = video.End() * video.Reader()->info.fps.ToInt();
}
}
else{
start = start + video.Start() * video.Reader()->info.fps.ToInt();
end = video.End() * video.Reader()->info.fps.ToInt();
}
if(error){
return;
}
processingController->SetError(false, "");
bool trackerInit = false;
size_t frame;
// Loop through video
for (frame = start; frame <= end; frame++)
{
// Stop the feature tracker process
if(processingController->ShouldStop()){
return;
}
size_t frame_number = frame;
// Get current frame
std::shared_ptr<openshot::Frame> f = video.GetFrame(frame_number);
// Grab OpenCV Mat image
cv::Mat cvimage = f->GetImageCV();
// Pass the first frame to initialize the tracker
if(!trackerInit){
// Initialize the tracker
initTracker(cvimage, frame_number);
trackerInit = true;
}
else{
// Update the object tracker according to frame
trackerInit = trackFrame(cvimage, frame_number);
// Draw box on image
FrameData fd = GetTrackedData(frame_number);
}
// Update progress
processingController->SetProgress(uint(100*(frame_number-start)/(end-start)));
}
}
// Initialize the tracker
bool CVTracker::initTracker(cv::Mat &frame, size_t frameId){
// Create new tracker object
tracker = selectTracker(trackerType);
// Correct if bounding box contains negative proportions (width and/or height < 0)
if(bbox.width < 0){
bbox.x = bbox.x - abs(bbox.width);
bbox.width = abs(bbox.width);
}
if(bbox.height < 0){
bbox.y = bbox.y - abs(bbox.height);
bbox.height = abs(bbox.height);
}
// Initialize tracker
tracker->init(frame, bbox);
float fw = frame.size().width;
float fh = frame.size().height;
// Add new frame data
trackedDataById[frameId] = FrameData(frameId, 0, (bbox.x)/fw,
(bbox.y)/fh,
(bbox.x+bbox.width)/fw,
(bbox.y+bbox.height)/fh);
return true;
}
// Update the object tracker according to frame
bool CVTracker::trackFrame(cv::Mat &frame, size_t frameId){
// Update the tracking result
bool ok = tracker->update(frame, bbox);
// Add frame number and box coords if tracker finds the object
// Otherwise add only frame number
if (ok)
{
float fw = frame.size().width;
float fh = frame.size().height;
std::vector<cv::Rect> bboxes = {bbox};
std::vector<float> confidence = {1.0};
std::vector<int> classId = {1};
sort.update(bboxes, frameId, sqrt(pow(frame.rows, 2) + pow(frame.cols, 2)), confidence, classId);
for(auto TBox : sort.frameTrackingResult)
bbox = TBox.box;
// Add new frame data
trackedDataById[frameId] = FrameData(frameId, 0, (bbox.x)/fw,
(bbox.y)/fh,
(bbox.x+bbox.width)/fw,
(bbox.y+bbox.height)/fh);
}
else
{
// Add new frame data
trackedDataById[frameId] = FrameData(frameId);
}
return ok;
}
bool CVTracker::SaveTrackedData(){
// Create tracker message
pb_tracker::Tracker trackerMessage;
// Iterate over all frames data and save in protobuf message
for(std::map<size_t,FrameData>::iterator it=trackedDataById.begin(); it!=trackedDataById.end(); ++it){
FrameData fData = it->second;
pb_tracker::Frame* pbFrameData;
AddFrameDataToProto(trackerMessage.add_frame(), fData);
}
// Add timestamp
*trackerMessage.mutable_last_updated() = TimeUtil::SecondsToTimestamp(time(NULL));
{
// Write the new message to disk.
std::fstream output(protobuf_data_path, ios::out | ios::trunc | ios::binary);
if (!trackerMessage.SerializeToOstream(&output)) {
cerr << "Failed to write protobuf message." << endl;
return false;
}
}
// Delete all global objects allocated by libprotobuf.
google::protobuf::ShutdownProtobufLibrary();
return true;
}
// Add frame tracked data into protobuf message.
void CVTracker::AddFrameDataToProto(pb_tracker::Frame* pbFrameData, FrameData& fData) {
// Save frame number and rotation
pbFrameData->set_id(fData.frame_id);
pbFrameData->set_rotation(0);
pb_tracker::Frame::Box* box = pbFrameData->mutable_bounding_box();
// Save bounding box data
box->set_x1(fData.x1);
box->set_y1(fData.y1);
box->set_x2(fData.x2);
box->set_y2(fData.y2);
}
// Get tracker info for the desired frame
FrameData CVTracker::GetTrackedData(size_t frameId){
// Check if the tracker info for the requested frame exists
if ( trackedDataById.find(frameId) == trackedDataById.end() ) {
return FrameData();
} else {
return trackedDataById[frameId];
}
}
// Load JSON string into this object
void CVTracker::SetJson(const std::string value) {
// Parse JSON string into JSON objects
try
{
const Json::Value root = openshot::stringToJson(value);
// Set all values that match
SetJsonValue(root);
}
catch (const std::exception& e)
{
// Error parsing JSON (or missing keys)
throw openshot::InvalidJSON("JSON is invalid (missing keys or invalid data types)");
}
}
// Load Json::Value into this object
void CVTracker::SetJsonValue(const Json::Value root) {
// Set data from Json (if key is found)
if (!root["protobuf_data_path"].isNull()){
protobuf_data_path = (root["protobuf_data_path"].asString());
}
if (!root["tracker-type"].isNull()){
trackerType = (root["tracker-type"].asString());
}
if (!root["region"].isNull()){
double x = root["region"]["x"].asDouble();
double y = root["region"]["y"].asDouble();
double w = root["region"]["width"].asDouble();
double h = root["region"]["height"].asDouble();
cv::Rect2d prev_bbox(x,y,w,h);
bbox = prev_bbox;
}
else{
processingController->SetError(true, "No initial bounding box selected");
error = true;
}
if (!root["region"]["first-frame"].isNull()){
start = root["region"]["first-frame"].asInt64();
json_interval = true;
}
else{
processingController->SetError(true, "No first-frame");
error = true;
}
}
/*
||||||||||||||||||||||||||||||||||||||||||||||||||
ONLY FOR MAKE TEST
||||||||||||||||||||||||||||||||||||||||||||||||||
*/
// Load protobuf data file
bool CVTracker::_LoadTrackedData(){
// Create tracker message
pb_tracker::Tracker trackerMessage;
{
// Read the existing tracker message.
fstream input(protobuf_data_path, ios::in | ios::binary);
if (!trackerMessage.ParseFromIstream(&input)) {
cerr << "Failed to parse protobuf message." << endl;
return false;
}
}
// Make sure the trackedData is empty
trackedDataById.clear();
// Iterate over all frames of the saved message
for (size_t i = 0; i < trackerMessage.frame_size(); i++) {
const pb_tracker::Frame& pbFrameData = trackerMessage.frame(i);
// Load frame and rotation data
size_t id = pbFrameData.id();
float rotation = pbFrameData.rotation();
// Load bounding box data
const pb_tracker::Frame::Box& box = pbFrameData.bounding_box();
float x1 = box.x1();
float y1 = box.y1();
float x2 = box.x2();
float y2 = box.y2();
// Assign data to tracker map
trackedDataById[id] = FrameData(id, rotation, x1, y1, x2, y2);
}
// Show the time stamp from the last update in tracker data file
if (trackerMessage.has_last_updated()) {
cout << " Loaded Data. Saved Time Stamp: " << TimeUtil::ToString(trackerMessage.last_updated()) << endl;
}
// Delete all global objects allocated by libprotobuf.
google::protobuf::ShutdownProtobufLibrary();
return true;
}

144
src/CVTracker.h Normal file
View File

@@ -0,0 +1,144 @@
/**
* @file
* @brief Track an object selected by the user
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPENSHOT_CVTRACKER_H
#define OPENSHOT_CVTRACKER_H
#define int64 opencv_broken_int
#define uint64 opencv_broken_uint
#include <opencv2/opencv.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/core.hpp>
#undef uint64
#undef int64
#include <fstream>
#include "Clip.h"
#include "KeyFrame.h"
#include "Frame.h"
#include "Json.h"
#include "ProcessingController.h"
#include "protobuf_messages/trackerdata.pb.h"
#include "sort_filter/sort.hpp"
namespace openshot
{
// Store the tracked object information for one frame
struct FrameData{
size_t frame_id = -1;
float rotation = 0;
float x1 = -1;
float y1 = -1;
float x2 = -1;
float y2 = -1;
// Constructors
FrameData()
{}
FrameData( size_t _frame_id)
{frame_id = _frame_id;}
FrameData( size_t _frame_id , float _rotation, float _x1, float _y1, float _x2, float _y2)
{
frame_id = _frame_id;
rotation = _rotation;
x1 = _x1;
y1 = _y1;
x2 = _x2;
y2 = _y2;
}
};
/**
* @brief The tracker class will receive one bounding box provided by the user and then iterate over the clip frames
* to return the object position in all the frames.
*/
class CVTracker {
private:
std::map<size_t, FrameData> trackedDataById; // Save tracked data
std::string trackerType; // Name of the chosen tracker
cv::Ptr<cv::Tracker> tracker; // Pointer of the selected tracker
cv::Rect2d bbox; // Bounding box coords
SortTracker sort;
std::string protobuf_data_path; // Path to protobuf data file
uint progress; // Pre-processing effect progress
/// Will handle a Thread safely comutication between ClipProcessingJobs and the processing effect classes
ProcessingController *processingController;
bool json_interval;
size_t start;
size_t end;
bool error = false;
// Initialize the tracker
bool initTracker(cv::Mat &frame, size_t frameId);
// Update the object tracker according to frame
bool trackFrame(cv::Mat &frame, size_t frameId);
public:
// Constructor
CVTracker(std::string processInfoJson, ProcessingController &processingController);
// Set desirable tracker method
cv::Ptr<cv::Tracker> selectTracker(std::string trackerType);
// Track object in the hole clip or in a given interval
// If start, end and process_interval are passed as argument, clip will be processed in [start,end)
void trackClip(openshot::Clip& video, size_t _start=0, size_t _end=0, bool process_interval=false);
// Get tracked data for a given frame
FrameData GetTrackedData(size_t frameId);
/// Protobuf Save and Load methods
// Save protobuf file
bool SaveTrackedData();
// Add frame tracked data into protobuf message.
void AddFrameDataToProto(pb_tracker::Frame* pbFrameData, FrameData& fData);
/// Get and Set JSON methods
void SetJson(const std::string value); ///< Load JSON string into this object
void SetJsonValue(const Json::Value root); ///< Load Json::Value into this object
// Load protobuf file (ONLY FOR MAKE TEST)
bool _LoadTrackedData();
};
}
#endif

View File

@@ -983,11 +983,11 @@ void Clip::SetJsonValue(const Json::Value root) {
for (const auto existing_effect : root["effects"]) {
// Create Effect
EffectBase *e = NULL;
if (!existing_effect["type"].isNull()) {
// Create instance of effect
if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) ) {
// Create instance of effect
if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
// Load Json into Effect
e->SetJsonValue(existing_effect);

View File

@@ -31,6 +31,16 @@
#ifndef OPENSHOT_CLIP_H
#define OPENSHOT_CLIP_H
#ifdef USE_OPENCV
#define int64 opencv_broken_int
#define uint64 opencv_broken_uint
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#undef uint64
#undef int64
#endif
#include <memory>
#include <string>
#include <QtGui/QImage>
@@ -46,7 +56,9 @@
#include "ReaderBase.h"
#include "JuceHeader.h"
namespace openshot {
class EffectInfo;
/// Comparison method for sorting effect pointers (by Position, Layer, and Order). Effects are sorted
/// from lowest layer to top layer (since that is sequence clips are combined), and then by
@@ -148,6 +160,8 @@ namespace openshot {
/// Reverse an audio buffer
void reverse_buffer(juce::AudioSampleBuffer* buffer);
public:
openshot::GravityType gravity; ///< The gravity of a clip determines where it snaps to its parent
openshot::ScaleType scale; ///< The scale determines how a clip should be resized to fit its parent
@@ -155,6 +169,12 @@ namespace openshot {
openshot::FrameDisplayType display; ///< The format to display the frame number (if any)
openshot::VolumeMixType mixing; ///< What strategy should be followed when mixing audio with other clips
#ifdef USE_OPENCV
bool COMPILED_WITH_CV = true;
#else
bool COMPILED_WITH_CV = false;
#endif
/// Default Constructor
Clip();
@@ -178,8 +198,6 @@ namespace openshot {
/// Return the type name of the class
std::string Name() override { return "Clip"; };
/// @brief Add an effect to the clip
/// @param effect Add an effect to the clip. An effect can modify the audio or video of an openshot::Frame.
void AddEffect(openshot::EffectBase* effect);

115
src/ClipProcessingJobs.cpp Normal file
View File

@@ -0,0 +1,115 @@
#include "ClipProcessingJobs.h"
// Constructor responsible to choose processing type and apply to clip
ClipProcessingJobs::ClipProcessingJobs(std::string processingType, std::string processInfoJson) :
processingType(processingType), processInfoJson(processInfoJson){
}
void ClipProcessingJobs::processClip(Clip& clip, std::string json){
processInfoJson = json;
// Process clip and save processed data
if(processingType == "Stabilizer"){
t = std::thread(&ClipProcessingJobs::stabilizeClip, this, std::ref(clip), std::ref(this->processingController));
}
if(processingType == "Tracker"){
t = std::thread(&ClipProcessingJobs::trackClip, this, std::ref(clip), std::ref(this->processingController));
}
if(processingType == "Object Detector"){
t = std::thread(&ClipProcessingJobs::detectObjectsClip, this, std::ref(clip), std::ref(this->processingController));
}
}
// Apply object tracking to clip
void ClipProcessingJobs::trackClip(Clip& clip, ProcessingController& controller){
// Create CVTracker object
CVTracker tracker(processInfoJson, controller);
// Start tracking
tracker.trackClip(clip);
// Thread controller. If effect processing is done, save data
// Else, kill thread
if(controller.ShouldStop()){
controller.SetFinished(true);
return;
}
else{
// Save stabilization data
tracker.SaveTrackedData();
// tells to UI that the processing finished
controller.SetFinished(true);
}
}
// Apply object detection to clip
void ClipProcessingJobs::detectObjectsClip(Clip& clip, ProcessingController& controller){
// create CVObjectDetection object
CVObjectDetection objDetector(processInfoJson, controller);
// Start object detection process
objDetector.detectObjectsClip(clip);
// Thread controller. If effect processing is done, save data
// Else, kill thread
if(controller.ShouldStop()){
controller.SetFinished(true);
return;
}
else{
// Save object detection data
objDetector.SaveObjDetectedData();
// tells to UI that the processing finished
controller.SetFinished(true);
}
}
void ClipProcessingJobs::stabilizeClip(Clip& clip, ProcessingController& controller){
// create CVStabilization object
CVStabilization stabilizer(processInfoJson, controller);
// Start stabilization process
stabilizer.stabilizeClip(clip);
// Thread controller. If effect processing is done, save data
// Else, kill thread
if(controller.ShouldStop()){
controller.SetFinished(true);
return;
}
else{
// Save stabilization data
stabilizer.SaveStabilizedData();
// tells to UI that the processing finished
controller.SetFinished(true);
}
}
// Get processing progress while iterating on the clip
int ClipProcessingJobs::GetProgress(){
return (int)processingController.GetProgress();
}
// Check if processing finished
bool ClipProcessingJobs::IsDone(){
if(processingController.GetFinished()){
t.join();
}
return processingController.GetFinished();
}
// stop preprocessing before finishing it
void ClipProcessingJobs::CancelProcessing(){
processingController.CancelProcessing();
}
// check if there is an error with the config
bool ClipProcessingJobs::GetError(){
return processingController.GetError();
}
// get the error message
std::string ClipProcessingJobs::GetErrorMessage(){
return processingController.GetErrorMessage();
}

88
src/ClipProcessingJobs.h Normal file
View File

@@ -0,0 +1,88 @@
/**
* @file
* @brief Header for the ClipProcessingJobs class
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
/* LICENSE
*
* Copyright (c) 2008-2019 OpenShot Studios, LLC
* <http://www.openshotstudios.com/>. This file is part of
* OpenShot Library (libopenshot), an open-source project dedicated to
* delivering high quality video editing and animation solutions to the
* world. For more information visit <http://www.openshot.org/>.
*
* OpenShot Library (libopenshot) is free software: you can redistribute it
* and/or modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* OpenShot Library (libopenshot) is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef USE_OPENCV
#define int64 opencv_broken_int
#define uint64 opencv_broken_uint
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#undef uint64
#undef int64
#include "CVStabilization.h"
#include "CVTracker.h"
#include "CVObjectDetection.h"
#endif
#include <thread>
#include "ProcessingController.h"
#include "Clip.h"
using namespace openshot;
// Constructor responsible to choose processing type and apply to clip
class ClipProcessingJobs{
private:
std::string processInfoJson;
std::string processingType;
bool processingDone = false;
bool stopProcessing = false;
uint processingProgress = 0;
std::thread t;
/// Will handle a Thread safely comutication between ClipProcessingJobs and the processing effect classes
ProcessingController processingController;
// Apply object tracking to clip
void trackClip(Clip& clip, ProcessingController& controller);
// Apply stabilization to clip
void stabilizeClip(Clip& clip, ProcessingController& controller);
// Apply object detection to clip
void detectObjectsClip(Clip& clip, ProcessingController& controller);
public:
// Constructor
ClipProcessingJobs(std::string processingType, std::string processInfoJson);
// Process clip accordingly to processingType
void processClip(Clip& clip, std::string json);
// Thread related variables and methods
int GetProgress();
bool IsDone();
void CancelProcessing();
bool GetError();
std::string GetErrorMessage();
};

View File

@@ -87,6 +87,18 @@ EffectBase* EffectInfo::CreateEffect(std::string effect_type) {
else if (effect_type == "Wave")
return new Wave();
#ifdef USE_OPENCV
else if(effect_type == "Stabilizer")
return new Stabilizer();
else if(effect_type == "Tracker")
return new Tracker();
else if(effect_type == "Object Detector")
return new ObjectDetection();
#endif
return NULL;
}
@@ -113,6 +125,12 @@ Json::Value EffectInfo::JsonValue() {
root.append(Shift().JsonInfo());
root.append(Wave().JsonInfo());
#ifdef USE_OPENCV
root.append(Stabilizer().JsonInfo());
root.append(Tracker().JsonInfo());
root.append(ObjectDetection().JsonInfo());
#endif
// return JsonValue
return root;

Some files were not shown because too many files have changed in this diff Show More