2013-09-11 17:32:40 -05:00
|
|
|
/**
|
|
|
|
|
* @file
|
|
|
|
|
* @brief Source file for Clip class
|
|
|
|
|
* @author Jonathan Thomas <jonathan@openshot.org>
|
|
|
|
|
*
|
2019-06-09 08:31:04 -04:00
|
|
|
* @ref License
|
|
|
|
|
*/
|
|
|
|
|
|
2021-10-16 01:26:26 -04:00
|
|
|
// Copyright (c) 2008-2019 OpenShot Studios, LLC
|
|
|
|
|
//
|
|
|
|
|
// SPDX-License-Identifier: LGPL-3.0-or-later
|
2013-09-11 17:32:40 -05:00
|
|
|
|
2020-10-18 07:43:37 -04:00
|
|
|
#include "Clip.h"
|
2021-10-27 14:34:05 -04:00
|
|
|
|
|
|
|
|
#include "AudioResampler.h"
|
2021-01-26 10:52:04 -05:00
|
|
|
#include "Exceptions.h"
|
2020-10-18 07:43:37 -04:00
|
|
|
#include "FFmpegReader.h"
|
|
|
|
|
#include "FrameMapper.h"
|
|
|
|
|
#include "QtImageReader.h"
|
|
|
|
|
#include "ChunkReader.h"
|
|
|
|
|
#include "DummyReader.h"
|
|
|
|
|
#include "Timeline.h"
|
2021-11-01 11:04:31 -04:00
|
|
|
#include "ZmqLogger.h"
|
|
|
|
|
|
|
|
|
|
#ifdef USE_IMAGEMAGICK
|
2023-02-27 22:11:13 -06:00
|
|
|
#include "MagickUtilities.h"
|
|
|
|
|
#include "ImageReader.h"
|
|
|
|
|
#include "TextReader.h"
|
2021-11-01 11:04:31 -04:00
|
|
|
#endif
|
2012-10-03 01:55:24 -05:00
|
|
|
|
2021-10-27 00:26:56 -04:00
|
|
|
#include <Qt>
|
2012-10-03 01:55:24 -05:00
|
|
|
|
|
|
|
|
using namespace openshot;
|
|
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Init default settings for a clip
|
|
|
|
|
void Clip::init_settings()
|
2012-10-03 01:55:24 -05:00
|
|
|
{
|
|
|
|
|
// Init clip settings
|
2012-10-04 15:07:29 -05:00
|
|
|
Position(0.0);
|
|
|
|
|
Layer(0);
|
|
|
|
|
Start(0.0);
|
2022-10-22 22:48:18 -05:00
|
|
|
ClipBase::End(0.0);
|
2012-10-04 16:07:58 -05:00
|
|
|
gravity = GRAVITY_CENTER;
|
|
|
|
|
scale = SCALE_FIT;
|
|
|
|
|
anchor = ANCHOR_CANVAS;
|
2017-03-15 02:06:53 -05:00
|
|
|
display = FRAME_DISPLAY_NONE;
|
2018-06-27 01:35:38 -05:00
|
|
|
mixing = VOLUME_MIX_NONE;
|
2012-11-29 16:32:48 -06:00
|
|
|
waveform = false;
|
2015-02-17 00:21:57 -06:00
|
|
|
previous_properties = "";
|
2021-01-27 17:54:49 -03:00
|
|
|
parentObjectId = "";
|
2012-10-03 01:55:24 -05:00
|
|
|
|
|
|
|
|
// Init scale curves
|
2012-11-08 18:02:20 -06:00
|
|
|
scale_x = Keyframe(1.0);
|
|
|
|
|
scale_y = Keyframe(1.0);
|
2012-10-03 01:55:24 -05:00
|
|
|
|
|
|
|
|
// Init location curves
|
|
|
|
|
location_x = Keyframe(0.0);
|
|
|
|
|
location_y = Keyframe(0.0);
|
|
|
|
|
|
2018-02-03 01:57:18 -06:00
|
|
|
// Init alpha
|
2016-01-16 21:53:07 -06:00
|
|
|
alpha = Keyframe(1.0);
|
2018-02-03 01:57:18 -06:00
|
|
|
|
2012-10-03 01:55:24 -05:00
|
|
|
// Init time & volume
|
2017-07-19 16:05:07 -05:00
|
|
|
time = Keyframe(1.0);
|
2012-11-29 16:32:48 -06:00
|
|
|
volume = Keyframe(1.0);
|
2012-10-03 01:55:24 -05:00
|
|
|
|
2012-11-29 23:11:50 -06:00
|
|
|
// Init audio waveform color
|
2015-06-01 00:20:14 -07:00
|
|
|
wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
|
2012-11-29 23:11:50 -06:00
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Init shear and perspective curves
|
|
|
|
|
shear_x = Keyframe(0.0);
|
|
|
|
|
shear_y = Keyframe(0.0);
|
2020-04-13 16:55:29 -05:00
|
|
|
origin_x = Keyframe(0.5);
|
|
|
|
|
origin_y = Keyframe(0.5);
|
2012-10-04 01:34:45 -05:00
|
|
|
perspective_c1_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c1_y = Keyframe(-1.0);
|
|
|
|
|
perspective_c2_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c2_y = Keyframe(-1.0);
|
|
|
|
|
perspective_c3_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c3_y = Keyframe(-1.0);
|
|
|
|
|
perspective_c4_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c4_y = Keyframe(-1.0);
|
2012-10-04 16:07:58 -05:00
|
|
|
|
2016-04-24 15:37:47 -05:00
|
|
|
// Init audio channel filter and mappings
|
|
|
|
|
channel_filter = Keyframe(-1.0);
|
|
|
|
|
channel_mapping = Keyframe(-1.0);
|
|
|
|
|
|
|
|
|
|
// Init audio and video overrides
|
|
|
|
|
has_audio = Keyframe(-1.0);
|
|
|
|
|
has_video = Keyframe(-1.0);
|
2020-08-26 22:47:31 -05:00
|
|
|
|
2021-01-27 17:41:39 -03:00
|
|
|
// Initialize the attached object and attached clip as null pointers
|
2021-01-27 17:54:49 -03:00
|
|
|
parentTrackedObject = nullptr;
|
|
|
|
|
parentClipObject = NULL;
|
2021-01-27 17:41:39 -03:00
|
|
|
|
2021-02-18 16:14:14 -06:00
|
|
|
// Init reader info struct
|
2020-10-13 14:55:25 -05:00
|
|
|
init_reader_settings();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Init reader info details
|
|
|
|
|
void Clip::init_reader_settings() {
|
|
|
|
|
if (reader) {
|
|
|
|
|
// Init rotation (if any)
|
|
|
|
|
init_reader_rotation();
|
|
|
|
|
|
|
|
|
|
// Initialize info struct
|
|
|
|
|
info = reader->info;
|
2023-03-20 16:23:10 -05:00
|
|
|
|
|
|
|
|
// Init cache
|
|
|
|
|
final_cache.SetMaxBytesFromInfo(8, info.width, info.height, info.sample_rate, info.channels);
|
2020-10-13 14:55:25 -05:00
|
|
|
}
|
2012-10-04 01:34:45 -05:00
|
|
|
}
|
|
|
|
|
|
2018-02-03 01:57:18 -06:00
|
|
|
// Init reader's rotation (if any)
|
|
|
|
|
void Clip::init_reader_rotation() {
|
2021-07-07 15:29:42 -05:00
|
|
|
// Dont init rotation if clip has keyframes
|
2021-07-01 12:08:27 -05:00
|
|
|
if (rotation.GetCount() > 0)
|
2018-02-03 01:57:18 -06:00
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
// Init rotation
|
|
|
|
|
if (reader && reader->info.metadata.count("rotate") > 0) {
|
|
|
|
|
// Use reader metadata rotation (if any)
|
|
|
|
|
// This is typical with cell phone videos filmed in different orientations
|
|
|
|
|
try {
|
|
|
|
|
float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
|
|
|
|
|
rotation = Keyframe(rotate_metadata);
|
2019-07-03 12:58:02 -04:00
|
|
|
} catch (const std::exception& e) {}
|
2018-02-03 01:57:18 -06:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
// Default no rotation
|
|
|
|
|
rotation = Keyframe(0.0);
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Default Constructor for a clip
|
2020-08-26 13:12:42 -05:00
|
|
|
Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
|
2012-10-04 01:34:45 -05:00
|
|
|
{
|
|
|
|
|
// Init all default settings
|
|
|
|
|
init_settings();
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-09 01:45:34 -05:00
|
|
|
// Constructor with reader
|
2020-08-26 13:12:42 -05:00
|
|
|
Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
2018-02-03 01:57:18 -06:00
|
|
|
// Init all default settings
|
|
|
|
|
init_settings();
|
|
|
|
|
|
2012-12-07 01:05:48 -06:00
|
|
|
// Open and Close the reader (to set the duration of the clip)
|
|
|
|
|
Open();
|
|
|
|
|
Close();
|
2015-02-07 18:06:11 -06:00
|
|
|
|
2020-05-19 19:57:06 +10:00
|
|
|
// Update duration and set parent
|
|
|
|
|
if (reader) {
|
2022-10-22 22:48:18 -05:00
|
|
|
ClipBase::End(reader->info.duration);
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader->ParentClip(this);
|
2021-02-18 16:14:14 -06:00
|
|
|
// Init reader info struct
|
2020-10-23 01:35:46 -05:00
|
|
|
init_reader_settings();
|
2020-05-16 18:55:34 +10:00
|
|
|
}
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Constructor with filepath
|
2020-08-26 13:12:42 -05:00
|
|
|
Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
|
2012-10-04 01:34:45 -05:00
|
|
|
{
|
|
|
|
|
// Init all default settings
|
|
|
|
|
init_settings();
|
|
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
// Get file extension (and convert to lower case)
|
2019-08-04 23:08:19 -04:00
|
|
|
std::string ext = get_file_extension(path);
|
2020-03-09 16:49:06 -05:00
|
|
|
std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
|
2012-10-04 01:34:45 -05:00
|
|
|
|
2023-02-13 16:42:21 -06:00
|
|
|
// Determine if common video formats (or image sequences)
|
2012-10-09 01:45:34 -05:00
|
|
|
if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
|
2023-02-13 16:42:21 -06:00
|
|
|
ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || path.find("%") != std::string::npos)
|
2012-10-04 18:02:46 -05:00
|
|
|
{
|
2012-10-04 01:34:45 -05:00
|
|
|
try
|
|
|
|
|
{
|
2012-10-04 18:02:46 -05:00
|
|
|
// Open common video format
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::FFmpegReader(path);
|
2014-01-05 23:12:56 -06:00
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
} catch(...) { }
|
|
|
|
|
}
|
2020-03-09 16:49:06 -05:00
|
|
|
if (ext=="osp")
|
|
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
// Open common video format
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::Timeline(path, true);
|
2020-03-09 16:49:06 -05:00
|
|
|
|
|
|
|
|
} catch(...) { }
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
// If no video found, try each reader
|
2013-12-07 21:09:55 -06:00
|
|
|
if (!reader)
|
2012-10-04 18:02:46 -05:00
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
// Try an image reader
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::QtImageReader(path);
|
2012-10-04 18:02:46 -05:00
|
|
|
|
|
|
|
|
} catch(...) {
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
// Try a video reader
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::FFmpegReader(path);
|
2012-10-04 18:02:46 -05:00
|
|
|
|
2014-02-18 23:25:28 -06:00
|
|
|
} catch(...) { }
|
2012-10-04 01:34:45 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-19 19:57:06 +10:00
|
|
|
// Update duration and set parent
|
2015-06-01 00:20:14 -07:00
|
|
|
if (reader) {
|
2022-10-22 22:48:18 -05:00
|
|
|
ClipBase::End(reader->info.duration);
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader->ParentClip(this);
|
2019-05-09 10:51:40 -07:00
|
|
|
allocated_reader = reader;
|
2021-02-18 16:14:14 -06:00
|
|
|
// Init reader info struct
|
2020-10-23 01:35:46 -05:00
|
|
|
init_reader_settings();
|
|
|
|
|
}
|
2015-06-01 00:20:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Destructor
|
|
|
|
|
Clip::~Clip()
|
|
|
|
|
{
|
|
|
|
|
// Delete the reader if clip created it
|
2019-05-09 10:51:40 -07:00
|
|
|
if (allocated_reader) {
|
|
|
|
|
delete allocated_reader;
|
|
|
|
|
allocated_reader = NULL;
|
2022-10-10 11:17:53 -05:00
|
|
|
reader = NULL;
|
2015-06-01 00:20:14 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Close the resampler
|
|
|
|
|
if (resampler) {
|
|
|
|
|
delete resampler;
|
|
|
|
|
resampler = NULL;
|
|
|
|
|
}
|
2023-04-19 16:08:36 -05:00
|
|
|
|
|
|
|
|
// Close clip
|
|
|
|
|
Close();
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
|
|
|
|
|
2020-12-22 21:32:36 -03:00
|
|
|
// Attach clip to bounding box
|
2021-01-27 17:41:39 -03:00
|
|
|
void Clip::AttachToObject(std::string object_id)
|
2020-12-22 21:32:36 -03:00
|
|
|
{
|
|
|
|
|
// Search for the tracked object on the timeline
|
2023-03-10 01:15:14 -06:00
|
|
|
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
|
2021-01-18 15:30:11 -03:00
|
|
|
|
2021-01-27 17:41:39 -03:00
|
|
|
if (parentTimeline) {
|
2021-01-18 15:30:11 -03:00
|
|
|
// Create a smart pointer to the tracked object from the timeline
|
2021-01-27 17:41:39 -03:00
|
|
|
std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
|
|
|
|
|
Clip* clipObject = parentTimeline->GetClip(object_id);
|
|
|
|
|
|
2021-01-18 15:30:11 -03:00
|
|
|
// Check for valid tracked object
|
|
|
|
|
if (trackedObject){
|
|
|
|
|
SetAttachedObject(trackedObject);
|
|
|
|
|
}
|
2021-01-27 17:41:39 -03:00
|
|
|
else if (clipObject) {
|
|
|
|
|
SetAttachedClip(clipObject);
|
|
|
|
|
}
|
2020-12-22 21:32:36 -03:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set the pointer to the trackedObject this clip is attached to
|
2021-01-18 14:52:01 -03:00
|
|
|
void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
|
2021-01-27 17:54:49 -03:00
|
|
|
parentTrackedObject = trackedObject;
|
2020-12-22 21:32:36 -03:00
|
|
|
}
|
|
|
|
|
|
2021-01-27 17:41:39 -03:00
|
|
|
// Set the pointer to the clip this clip is attached to
|
|
|
|
|
void Clip::SetAttachedClip(Clip* clipObject){
|
2021-01-27 17:54:49 -03:00
|
|
|
parentClipObject = clipObject;
|
2021-01-27 17:41:39 -03:00
|
|
|
}
|
|
|
|
|
|
2012-10-09 01:45:34 -05:00
|
|
|
/// Set the current reader
|
2013-12-07 21:09:55 -06:00
|
|
|
void Clip::Reader(ReaderBase* new_reader)
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
2023-02-27 22:11:13 -06:00
|
|
|
// Delete previously allocated reader (if not related to new reader)
|
|
|
|
|
// FrameMappers that point to the same allocated reader are ignored
|
|
|
|
|
bool is_same_reader = false;
|
|
|
|
|
if (new_reader && allocated_reader) {
|
|
|
|
|
if (new_reader->Name() == "FrameMapper") {
|
|
|
|
|
// Determine if FrameMapper is pointing at the same allocated ready
|
2023-03-10 01:15:14 -06:00
|
|
|
FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
|
2023-02-27 22:11:13 -06:00
|
|
|
if (allocated_reader == clip_mapped_reader->Reader()) {
|
|
|
|
|
is_same_reader = true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Clear existing allocated reader (if different)
|
|
|
|
|
if (allocated_reader && !is_same_reader) {
|
|
|
|
|
reader->Close();
|
|
|
|
|
allocated_reader->Close();
|
|
|
|
|
delete allocated_reader;
|
|
|
|
|
reader = NULL;
|
|
|
|
|
allocated_reader = NULL;
|
|
|
|
|
}
|
2022-10-28 11:00:47 -05:00
|
|
|
|
2012-10-09 01:45:34 -05:00
|
|
|
// set reader pointer
|
2013-12-07 21:09:55 -06:00
|
|
|
reader = new_reader;
|
2018-02-03 01:57:18 -06:00
|
|
|
|
2019-01-19 02:18:52 -06:00
|
|
|
// set parent
|
2022-10-28 11:00:47 -05:00
|
|
|
if (reader) {
|
2023-02-27 22:11:13 -06:00
|
|
|
reader->ParentClip(this);
|
2019-01-19 02:18:52 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Init reader info struct
|
|
|
|
|
init_reader_settings();
|
|
|
|
|
}
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Get the current reader
|
2017-10-26 18:44:35 -05:00
|
|
|
ReaderBase* Clip::Reader()
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
|
|
|
|
return reader;
|
2013-09-28 22:00:52 -05:00
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
2019-08-27 15:47:39 -04:00
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
|
2012-10-04 16:07:58 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-08 16:22:18 -05:00
|
|
|
// Open the internal reader
|
2017-10-26 18:44:35 -05:00
|
|
|
void Clip::Open()
|
2012-10-08 16:22:18 -05:00
|
|
|
{
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
|
|
|
|
// Open the reader
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->Open();
|
2020-08-26 13:12:42 -05:00
|
|
|
is_open = true;
|
|
|
|
|
|
|
|
|
|
// Copy Reader info to Clip
|
|
|
|
|
info = reader->info;
|
2012-10-09 01:45:34 -05:00
|
|
|
|
|
|
|
|
// Set some clip properties from the file reader
|
2012-12-07 01:05:48 -06:00
|
|
|
if (end == 0.0)
|
2022-10-22 22:48:18 -05:00
|
|
|
ClipBase::End(reader->info.duration);
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
2013-09-28 22:00:52 -05:00
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
2019-08-27 15:47:39 -04:00
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
|
2012-10-08 16:22:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Close the internal reader
|
2017-10-26 18:44:35 -05:00
|
|
|
void Clip::Close()
|
2012-10-08 16:22:18 -05:00
|
|
|
{
|
2023-04-19 16:08:36 -05:00
|
|
|
if (is_open && reader) {
|
2019-07-03 14:14:02 -04:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
|
2016-04-24 15:37:47 -05:00
|
|
|
|
2015-02-07 18:06:11 -06:00
|
|
|
// Close the reader
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->Close();
|
2014-07-25 23:32:12 -05:00
|
|
|
}
|
2023-04-19 16:08:36 -05:00
|
|
|
|
|
|
|
|
// Clear cache
|
|
|
|
|
final_cache.Clear();
|
|
|
|
|
is_open = false;
|
2012-10-08 16:22:18 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-14 02:36:05 -05:00
|
|
|
// Get end position of clip (trim end of video), which can be affected by the time curve.
|
2019-12-27 08:51:51 -05:00
|
|
|
float Clip::End() const
|
2012-10-14 02:36:05 -05:00
|
|
|
{
|
2019-03-14 09:26:56 -07:00
|
|
|
// if a time curve is present, use its length
|
2019-11-19 23:43:28 +01:00
|
|
|
if (time.GetCount() > 1)
|
2012-11-08 04:35:21 -06:00
|
|
|
{
|
|
|
|
|
// Determine the FPS fo this clip
|
|
|
|
|
float fps = 24.0;
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
2012-11-08 04:35:21 -06:00
|
|
|
// file reader
|
2013-12-07 21:09:55 -06:00
|
|
|
fps = reader->info.fps.ToFloat();
|
2013-09-28 22:00:52 -05:00
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
2019-08-27 15:47:39 -04:00
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
|
2012-11-08 04:35:21 -06:00
|
|
|
|
2012-10-21 05:29:29 -05:00
|
|
|
return float(time.GetLength()) / fps;
|
2012-11-08 04:35:21 -06:00
|
|
|
}
|
2012-10-14 02:36:05 -05:00
|
|
|
else
|
|
|
|
|
// just use the duration (as detected by the reader)
|
|
|
|
|
return end;
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-22 22:48:18 -05:00
|
|
|
// Override End() position
|
|
|
|
|
void Clip::End(float value) {
|
|
|
|
|
ClipBase::End(value);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Set associated Timeline pointer
|
|
|
|
|
void Clip::ParentTimeline(openshot::TimelineBase* new_timeline) {
|
|
|
|
|
timeline = new_timeline;
|
2020-08-26 13:12:42 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Clear cache (it might have changed)
|
|
|
|
|
final_cache.Clear();
|
2020-08-26 17:05:50 -05:00
|
|
|
}
|
|
|
|
|
|
2021-05-18 14:25:36 -05:00
|
|
|
// Create an openshot::Frame object for a specific frame number of this reader.
|
2023-02-27 22:11:13 -06:00
|
|
|
std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
|
2021-05-18 14:25:36 -05:00
|
|
|
{
|
2023-02-27 22:11:13 -06:00
|
|
|
// Call override of GetFrame
|
|
|
|
|
return GetFrame(NULL, clip_frame_number, NULL);
|
|
|
|
|
}
|
2021-05-18 14:25:36 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Create an openshot::Frame object for a specific frame number of this reader.
|
|
|
|
|
// NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
|
|
|
|
|
std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
|
|
|
|
|
{
|
|
|
|
|
// Call override of GetFrame
|
|
|
|
|
return GetFrame(background_frame, clip_frame_number, NULL);
|
2021-05-18 14:25:36 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Use an existing openshot::Frame object and draw this Clip's frame onto it
|
2023-02-27 22:11:13 -06:00
|
|
|
std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
|
2020-08-26 17:05:50 -05:00
|
|
|
{
|
|
|
|
|
// Check for open reader (or throw exception)
|
|
|
|
|
if (!is_open)
|
2020-10-20 12:42:00 -05:00
|
|
|
throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
|
2020-08-26 17:05:50 -05:00
|
|
|
|
|
|
|
|
if (reader)
|
|
|
|
|
{
|
2023-02-27 22:11:13 -06:00
|
|
|
// Get frame object
|
|
|
|
|
std::shared_ptr<Frame> frame = NULL;
|
2020-08-26 17:05:50 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Check cache
|
|
|
|
|
frame = final_cache.GetFrame(clip_frame_number);
|
|
|
|
|
if (frame) {
|
|
|
|
|
// Debug output
|
|
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::GetFrame (Cached frame found)",
|
|
|
|
|
"requested_frame", clip_frame_number);
|
2012-10-10 15:21:33 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Return cached frame
|
|
|
|
|
return frame;
|
|
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Generate clip frame
|
|
|
|
|
frame = GetOrCreateFrame(clip_frame_number);
|
2012-10-10 02:36:53 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
if (!background_frame) {
|
|
|
|
|
// Create missing background_frame w/ transparent color (if needed)
|
|
|
|
|
background_frame = std::make_shared<Frame>(clip_frame_number, frame->GetWidth(), frame->GetHeight(),
|
|
|
|
|
"#00000000", frame->GetAudioSamplesCount(),
|
|
|
|
|
frame->GetAudioChannelsCount());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get time mapped frame object (used to increase speed, change direction, etc...)
|
|
|
|
|
apply_timemapping(frame);
|
|
|
|
|
|
|
|
|
|
// Apply waveform image (if any)
|
2023-04-16 01:50:11 -05:00
|
|
|
apply_waveform(frame, background_frame);
|
2023-02-10 15:16:56 -06:00
|
|
|
|
2023-05-24 17:12:15 -05:00
|
|
|
// Apply effects BEFORE applying keyframes (if any local or global effects are used)
|
|
|
|
|
apply_effects(frame, background_frame, options, true);
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
|
2023-05-24 17:12:15 -05:00
|
|
|
// Apply keyframe / transforms to current clip image
|
2023-04-16 01:50:11 -05:00
|
|
|
apply_keyframes(frame, background_frame);
|
2023-02-27 22:11:13 -06:00
|
|
|
|
2023-05-24 17:12:15 -05:00
|
|
|
// Apply effects AFTER applying keyframes (if any local or global effects are used)
|
|
|
|
|
apply_effects(frame, background_frame, options, false);
|
|
|
|
|
|
|
|
|
|
// Apply background canvas (i.e. flatten this image onto previous layer image)
|
|
|
|
|
apply_background(frame, background_frame);
|
|
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Add final frame to cache
|
|
|
|
|
final_cache.Add(frame);
|
2020-08-26 22:47:31 -05:00
|
|
|
|
2013-09-28 22:00:52 -05:00
|
|
|
// Return processed 'frame'
|
2023-02-27 22:11:13 -06:00
|
|
|
return frame;
|
2013-09-28 22:00:52 -05:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
2019-08-27 15:47:39 -04:00
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
|
2012-10-08 16:22:18 -05:00
|
|
|
}
|
|
|
|
|
|
2020-09-01 22:53:46 -04:00
|
|
|
// Look up an effect by ID
|
|
|
|
|
openshot::EffectBase* Clip::GetEffect(const std::string& id)
|
|
|
|
|
{
|
|
|
|
|
// Find the matching effect (if any)
|
|
|
|
|
for (const auto& effect : effects) {
|
|
|
|
|
if (effect->Id() == id) {
|
|
|
|
|
return effect;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
// Get file extension
|
2019-08-04 23:08:19 -04:00
|
|
|
std::string Clip::get_file_extension(std::string path)
|
2012-10-04 18:02:46 -05:00
|
|
|
{
|
|
|
|
|
// return last part of path
|
|
|
|
|
return path.substr(path.find_last_of(".") + 1);
|
|
|
|
|
}
|
2012-10-10 02:36:53 -05:00
|
|
|
|
2012-10-21 05:29:29 -05:00
|
|
|
// Reverse an audio buffer
|
2021-08-24 04:21:55 -04:00
|
|
|
void Clip::reverse_buffer(juce::AudioBuffer<float>* buffer)
|
2012-10-10 02:36:53 -05:00
|
|
|
{
|
2012-10-21 05:29:29 -05:00
|
|
|
int number_of_samples = buffer->getNumSamples();
|
|
|
|
|
int channels = buffer->getNumChannels();
|
|
|
|
|
|
|
|
|
|
// Reverse array (create new buffer to hold the reversed version)
|
2021-08-24 04:21:55 -04:00
|
|
|
auto *reversed = new juce::AudioBuffer<float>(channels, number_of_samples);
|
2012-10-21 05:29:29 -05:00
|
|
|
reversed->clear();
|
|
|
|
|
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
{
|
|
|
|
|
int n=0;
|
|
|
|
|
for (int s = number_of_samples - 1; s >= 0; s--, n++)
|
2015-02-05 00:11:55 -06:00
|
|
|
reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
|
2012-10-21 05:29:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy the samples back to the original array
|
|
|
|
|
buffer->clear();
|
|
|
|
|
// Loop through channels, and get audio samples
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Get the audio samples for this channel
|
2015-02-05 00:11:55 -06:00
|
|
|
buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
|
|
|
|
delete reversed;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Adjust the audio and image of a time mapped frame
|
2023-02-27 22:11:13 -06:00
|
|
|
void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
|
2012-10-21 05:29:29 -05:00
|
|
|
{
|
2013-09-28 22:00:52 -05:00
|
|
|
// Check for valid reader
|
2013-12-07 21:09:55 -06:00
|
|
|
if (!reader)
|
2013-09-28 22:00:52 -05:00
|
|
|
// Throw error if reader not initialized
|
2019-08-27 15:47:39 -04:00
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
|
2013-09-28 22:00:52 -05:00
|
|
|
|
2012-10-10 02:36:53 -05:00
|
|
|
// Check for a valid time map curve
|
2019-11-19 23:43:28 +01:00
|
|
|
if (time.GetLength() > 1)
|
2012-10-10 02:36:53 -05:00
|
|
|
{
|
2021-10-27 14:34:05 -04:00
|
|
|
const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
|
2015-12-28 02:41:32 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
int64_t clip_frame_number = frame->number;
|
|
|
|
|
int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
|
2012-10-10 02:36:53 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// create buffer
|
|
|
|
|
juce::AudioBuffer<float> *source_samples = nullptr;
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Get delta (difference from this frame to the next time mapped frame: Y value)
|
|
|
|
|
double delta = time.GetDelta(clip_frame_number + 1);
|
|
|
|
|
bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Determine length of source audio (in samples)
|
|
|
|
|
// A delta of 1.0 == normal expected samples
|
|
|
|
|
// A delta of 0.5 == 50% of normal expected samples
|
|
|
|
|
// A delta of 2.0 == 200% of normal expected samples
|
|
|
|
|
int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
|
|
|
|
|
Reader()->info.sample_rate,
|
|
|
|
|
Reader()->info.channels);
|
|
|
|
|
int source_sample_count = round(target_sample_count * fabs(delta));
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Determine starting audio location
|
|
|
|
|
AudioLocation location;
|
|
|
|
|
if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2) {
|
|
|
|
|
// No previous location OR gap detected
|
|
|
|
|
location.frame = new_frame_number;
|
|
|
|
|
location.sample_start = 0;
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Create / Reset resampler
|
|
|
|
|
// We don't want to interpolate between unrelated audio data
|
|
|
|
|
if (resampler) {
|
|
|
|
|
delete resampler;
|
2012-10-21 05:29:29 -05:00
|
|
|
}
|
2023-02-27 22:11:13 -06:00
|
|
|
// Init resampler with # channels from Reader (should match the timeline)
|
|
|
|
|
resampler = new AudioResampler(Reader()->info.channels);
|
2023-03-17 16:50:51 -05:00
|
|
|
|
|
|
|
|
// Allocate buffer of silence to initialize some data inside the resampler
|
|
|
|
|
// To prevent it from becoming input limited
|
|
|
|
|
juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
|
|
|
|
|
init_samples.clear();
|
|
|
|
|
resampler->SetBuffer(&init_samples, 1.0);
|
|
|
|
|
resampler->GetResampledBuffer();
|
|
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
} else {
|
|
|
|
|
// Use previous location
|
|
|
|
|
location = previous_location;
|
2012-10-21 05:29:29 -05:00
|
|
|
}
|
2023-02-27 22:11:13 -06:00
|
|
|
|
|
|
|
|
if (source_sample_count <= 0) {
|
|
|
|
|
// Add silence and bail (we don't need any samples)
|
|
|
|
|
frame->AddAudioSilence(target_sample_count);
|
|
|
|
|
return;
|
|
|
|
|
}
|
2023-02-28 14:13:12 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Allocate a new sample buffer for these delta frames
|
|
|
|
|
source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
|
|
|
|
|
source_samples->clear();
|
|
|
|
|
|
|
|
|
|
// Determine ending audio location
|
|
|
|
|
int remaining_samples = source_sample_count;
|
|
|
|
|
int source_pos = 0;
|
|
|
|
|
while (remaining_samples > 0) {
|
2023-02-28 14:13:12 -06:00
|
|
|
std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
|
|
|
|
|
int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
|
2023-02-27 22:11:13 -06:00
|
|
|
|
|
|
|
|
if (frame_sample_count == 0) {
|
|
|
|
|
// No samples found in source frame (fill with silence)
|
|
|
|
|
if (is_increasing) {
|
|
|
|
|
location.frame++;
|
|
|
|
|
} else {
|
|
|
|
|
location.frame--;
|
|
|
|
|
}
|
|
|
|
|
location.sample_start = 0;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if (remaining_samples - frame_sample_count >= 0) {
|
|
|
|
|
// Use all frame samples & increment location
|
2023-02-28 14:13:12 -06:00
|
|
|
for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
|
|
|
|
|
source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
|
2023-02-27 22:11:13 -06:00
|
|
|
}
|
|
|
|
|
if (is_increasing) {
|
|
|
|
|
location.frame++;
|
|
|
|
|
} else {
|
|
|
|
|
location.frame--;
|
|
|
|
|
}
|
|
|
|
|
location.sample_start = 0;
|
|
|
|
|
remaining_samples -= frame_sample_count;
|
|
|
|
|
source_pos += frame_sample_count;
|
|
|
|
|
|
2023-02-28 14:13:12 -06:00
|
|
|
} else {
|
2023-02-27 22:11:13 -06:00
|
|
|
// Use just what is needed (and reverse samples)
|
2023-02-28 14:13:12 -06:00
|
|
|
for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
|
|
|
|
|
source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
|
2023-02-27 22:11:13 -06:00
|
|
|
}
|
|
|
|
|
location.sample_start += remaining_samples;
|
|
|
|
|
remaining_samples = 0;
|
|
|
|
|
source_pos += remaining_samples;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-28 14:13:12 -06:00
|
|
|
// Resize audio for current frame object + fill with silence
|
|
|
|
|
// We are fixing to clobber this with actual audio data (possibly resampled)
|
|
|
|
|
frame->AddAudioSilence(target_sample_count);
|
|
|
|
|
|
2023-03-10 01:15:14 -06:00
|
|
|
if (source_sample_count != target_sample_count) {
|
2023-02-28 14:13:12 -06:00
|
|
|
// Resample audio (if needed)
|
2023-03-10 01:15:14 -06:00
|
|
|
double resample_ratio = double(source_sample_count) / double(target_sample_count);
|
|
|
|
|
resampler->SetBuffer(source_samples, resample_ratio);
|
2023-02-27 22:11:13 -06:00
|
|
|
|
|
|
|
|
// Resample the data
|
|
|
|
|
juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
|
|
|
|
|
|
|
|
|
|
// Fill the frame with resampled data
|
|
|
|
|
for (int channel = 0; channel < Reader()->info.channels; channel++) {
|
|
|
|
|
// Add new (slower) samples, to the frame object
|
2023-03-10 01:15:14 -06:00
|
|
|
frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
|
2023-02-27 22:11:13 -06:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Fill the frame
|
|
|
|
|
for (int channel = 0; channel < Reader()->info.channels; channel++) {
|
|
|
|
|
// Add new (slower) samples, to the frame object
|
|
|
|
|
frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-28 14:13:12 -06:00
|
|
|
// Clean up
|
|
|
|
|
delete source_samples;
|
|
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Set previous location
|
|
|
|
|
previous_location = location;
|
2019-03-06 15:35:03 -06:00
|
|
|
}
|
2012-10-10 02:36:53 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-10 15:21:33 -05:00
|
|
|
// Adjust frame number minimum value
|
2017-09-28 16:03:01 -05:00
|
|
|
int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
|
2012-10-10 15:21:33 -05:00
|
|
|
{
|
|
|
|
|
// Never return a frame number 0 or below
|
|
|
|
|
if (frame_number < 1)
|
|
|
|
|
return 1;
|
|
|
|
|
else
|
|
|
|
|
return frame_number;
|
|
|
|
|
|
|
|
|
|
}
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-28 02:41:32 -06:00
|
|
|
// Get or generate a blank frame
|
2023-02-27 22:11:13 -06:00
|
|
|
std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
|
2015-12-28 02:41:32 -06:00
|
|
|
{
|
|
|
|
|
try {
|
2023-02-27 22:11:13 -06:00
|
|
|
// Init to requested frame
|
|
|
|
|
int64_t clip_frame_number = adjust_frame_number_minimum(number);
|
|
|
|
|
|
|
|
|
|
// Adjust for time-mapping (if any)
|
|
|
|
|
if (enable_time && time.GetLength() > 1) {
|
|
|
|
|
clip_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-24 15:37:47 -05:00
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
2023-02-27 22:11:13 -06:00
|
|
|
"Clip::GetOrCreateFrame (from reader)",
|
|
|
|
|
"number", number, "clip_frame_number", clip_frame_number);
|
2016-04-24 15:37:47 -05:00
|
|
|
|
2015-12-28 02:41:32 -06:00
|
|
|
// Attempt to get a frame (but this could fail if a reader has just been closed)
|
2023-02-27 22:11:13 -06:00
|
|
|
auto reader_frame = reader->GetFrame(clip_frame_number);
|
|
|
|
|
reader_frame->number = number; // Override frame # (due to time-mapping might change it)
|
2015-12-28 02:41:32 -06:00
|
|
|
|
|
|
|
|
// Return real frame
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
if (reader_frame) {
|
|
|
|
|
// Create a new copy of reader frame
|
|
|
|
|
// This allows a clip to modify the pixels and audio of this frame without
|
|
|
|
|
// changing the underlying reader's frame data
|
2020-10-20 12:42:00 -05:00
|
|
|
auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
|
2023-02-27 22:11:13 -06:00
|
|
|
if (has_video.GetInt(number) == 0) {
|
|
|
|
|
// No video, so add transparent pixels
|
|
|
|
|
reader_copy->AddColor(QColor(Qt::transparent));
|
|
|
|
|
}
|
|
|
|
|
if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
|
|
|
|
|
// No audio, so include silence (also, mute audio if past end of reader)
|
|
|
|
|
reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
|
|
|
|
|
}
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
return reader_copy;
|
|
|
|
|
}
|
2015-12-28 02:41:32 -06:00
|
|
|
|
|
|
|
|
} catch (const ReaderClosed & e) {
|
|
|
|
|
// ...
|
|
|
|
|
} catch (const OutOfBoundsFrame & e) {
|
|
|
|
|
// ...
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-26 17:05:50 -05:00
|
|
|
// Estimate # of samples needed for this frame
|
|
|
|
|
int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
|
|
|
|
|
|
2016-04-24 15:37:47 -05:00
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::GetOrCreateFrame (create blank)",
|
|
|
|
|
"number", number,
|
|
|
|
|
"estimated_samples_in_frame", estimated_samples_in_frame);
|
2016-04-24 15:37:47 -05:00
|
|
|
|
2015-12-28 02:41:32 -06:00
|
|
|
// Create blank frame
|
2020-10-20 12:42:00 -05:00
|
|
|
auto new_frame = std::make_shared<Frame>(
|
|
|
|
|
number, reader->info.width, reader->info.height,
|
|
|
|
|
"#000000", estimated_samples_in_frame, reader->info.channels);
|
2015-12-28 02:41:32 -06:00
|
|
|
new_frame->SampleRate(reader->info.sample_rate);
|
|
|
|
|
new_frame->ChannelsLayout(reader->info.channel_layout);
|
2020-08-26 17:05:50 -05:00
|
|
|
new_frame->AddAudioSilence(estimated_samples_in_frame);
|
2015-12-28 02:41:32 -06:00
|
|
|
return new_frame;
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
// Generate JSON string of this object
|
2019-12-27 08:51:51 -05:00
|
|
|
std::string Clip::Json() const {
|
2013-12-07 21:09:55 -06:00
|
|
|
|
|
|
|
|
// Return formatted string
|
|
|
|
|
return JsonValue().toStyledString();
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-09 22:41:42 -06:00
|
|
|
// Get all properties for a specific frame
|
2019-12-27 08:51:51 -05:00
|
|
|
std::string Clip::PropertiesJSON(int64_t requested_frame) const {
|
2015-02-09 22:41:42 -06:00
|
|
|
|
|
|
|
|
// Generate JSON properties list
|
|
|
|
|
Json::Value root;
|
2016-10-19 02:19:07 -05:00
|
|
|
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
|
|
|
|
|
root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
|
|
|
|
|
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
|
|
|
|
|
root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
|
|
|
|
|
root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
|
2017-03-15 02:06:53 -05:00
|
|
|
root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
|
2018-06-27 01:35:38 -05:00
|
|
|
root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
|
2016-10-19 02:19:07 -05:00
|
|
|
root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
|
2023-04-14 14:32:48 -05:00
|
|
|
root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
|
|
|
|
|
|
2015-10-02 18:22:10 -05:00
|
|
|
// Add gravity choices (dropdown style)
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
|
|
|
|
|
|
|
|
|
|
// Add scale choices (dropdown style)
|
|
|
|
|
root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
|
|
|
|
|
root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
|
|
|
|
|
root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
|
2021-06-02 15:45:43 -04:00
|
|
|
root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
|
2015-10-02 18:22:10 -05:00
|
|
|
|
2017-03-15 02:06:53 -05:00
|
|
|
// Add frame number display choices (dropdown style)
|
2021-06-02 15:45:43 -04:00
|
|
|
root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
|
2017-03-15 02:06:53 -05:00
|
|
|
root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
|
|
|
|
|
root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
|
|
|
|
|
root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
|
|
|
|
|
|
2018-06-27 01:35:38 -05:00
|
|
|
// Add volume mixing choices (dropdown style)
|
2021-06-02 15:45:43 -04:00
|
|
|
root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
|
2018-06-27 01:35:38 -05:00
|
|
|
root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
|
|
|
|
|
root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
|
|
|
|
|
|
2015-10-02 18:22:10 -05:00
|
|
|
// Add waveform choices (dropdown style)
|
|
|
|
|
root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
|
|
|
|
|
root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
|
2021-10-27 00:26:56 -04:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Add the parentTrackedObject's properties
|
2023-04-13 16:04:50 -05:00
|
|
|
if (parentTrackedObject && parentClipObject)
|
2021-10-27 00:26:56 -04:00
|
|
|
{
|
2021-01-19 16:03:51 -03:00
|
|
|
// Convert Clip's frame position to Timeline's frame position
|
|
|
|
|
long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
|
|
|
|
|
long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
|
|
|
|
|
double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
|
|
|
|
|
|
|
|
|
|
// Get attached object's parent clip properties
|
2021-01-27 17:54:49 -03:00
|
|
|
std::map< std::string, float > trackedObjectParentClipProperties = parentTrackedObject->GetParentClipProperties(timeline_frame_number);
|
|
|
|
|
double parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
|
2021-01-19 16:03:51 -03:00
|
|
|
// Get attached object properties
|
2021-01-27 17:54:49 -03:00
|
|
|
std::map< std::string, float > trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
|
2021-10-27 00:26:56 -04:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Correct the parent Tracked Object properties by the clip's reference system
|
|
|
|
|
float parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["cx"];
|
|
|
|
|
float parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["cy"];
|
|
|
|
|
float parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
|
|
|
|
|
float parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
|
|
|
|
|
float parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["r"];
|
|
|
|
|
|
|
|
|
|
// Add the parent Tracked Object properties to JSON
|
|
|
|
|
root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
|
|
|
|
|
root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
}
|
|
|
|
|
// Add the parentClipObject's properties
|
|
|
|
|
else if (parentClipObject)
|
|
|
|
|
{
|
|
|
|
|
// Convert Clip's frame position to Timeline's frame position
|
|
|
|
|
long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
|
|
|
|
|
long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
|
|
|
|
|
double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
|
|
|
|
|
|
|
|
|
|
// Correct the parent Clip Object properties by the clip's reference system
|
2021-10-27 00:26:56 -04:00
|
|
|
float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
|
2021-01-27 17:54:49 -03:00
|
|
|
float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
|
|
|
|
|
float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
|
|
|
|
|
float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
|
|
|
|
|
float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
|
|
|
|
|
float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
|
|
|
|
|
float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
|
|
|
|
|
|
|
|
|
|
// Add the parent Clip Object properties to JSON
|
|
|
|
|
root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
|
|
|
|
|
root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// Add this own clip's properties to JSON
|
2021-01-19 16:03:51 -03:00
|
|
|
root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
|
2021-01-27 17:54:49 -03:00
|
|
|
root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
|
2021-01-19 16:03:51 -03:00
|
|
|
}
|
2015-10-02 18:22:10 -05:00
|
|
|
|
2015-02-09 22:41:42 -06:00
|
|
|
// Keyframes
|
2016-10-19 02:19:07 -05:00
|
|
|
root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
|
2020-04-13 16:55:29 -05:00
|
|
|
root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
|
2016-10-19 02:19:07 -05:00
|
|
|
root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
|
|
|
|
|
root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
|
|
|
|
|
root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
|
|
|
|
|
root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
|
2015-02-09 22:41:42 -06:00
|
|
|
|
2019-07-26 19:27:15 -04:00
|
|
|
// Add enable audio/video choices (dropdown style)
|
|
|
|
|
root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
|
|
|
|
|
root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
|
|
|
|
|
root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
|
|
|
|
|
root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
|
|
|
|
|
root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
|
|
|
|
|
root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
|
|
|
|
|
|
2016-10-19 02:19:07 -05:00
|
|
|
root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
|
|
|
|
|
root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
|
|
|
|
|
root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
|
|
|
|
|
root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
|
2015-11-25 23:54:10 -06:00
|
|
|
|
|
|
|
|
|
2015-02-09 22:41:42 -06:00
|
|
|
// Return formatted string
|
|
|
|
|
return root.toStyledString();
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-27 08:51:51 -05:00
|
|
|
// Generate Json::Value for this object
|
|
|
|
|
Json::Value Clip::JsonValue() const {
|
2013-12-06 00:40:26 -06:00
|
|
|
|
2013-12-03 00:13:25 -06:00
|
|
|
// Create root json object
|
2013-12-06 00:40:26 -06:00
|
|
|
Json::Value root = ClipBase::JsonValue(); // get parent properties
|
2021-01-27 17:54:49 -03:00
|
|
|
root["parentObjectId"] = parentObjectId;
|
2013-12-03 00:13:25 -06:00
|
|
|
root["gravity"] = gravity;
|
|
|
|
|
root["scale"] = scale;
|
|
|
|
|
root["anchor"] = anchor;
|
2017-03-15 02:06:53 -05:00
|
|
|
root["display"] = display;
|
2018-06-27 01:35:38 -05:00
|
|
|
root["mixing"] = mixing;
|
2013-12-03 00:13:25 -06:00
|
|
|
root["waveform"] = waveform;
|
2014-01-04 19:04:21 -06:00
|
|
|
root["scale_x"] = scale_x.JsonValue();
|
|
|
|
|
root["scale_y"] = scale_y.JsonValue();
|
|
|
|
|
root["location_x"] = location_x.JsonValue();
|
|
|
|
|
root["location_y"] = location_y.JsonValue();
|
|
|
|
|
root["alpha"] = alpha.JsonValue();
|
|
|
|
|
root["rotation"] = rotation.JsonValue();
|
|
|
|
|
root["time"] = time.JsonValue();
|
|
|
|
|
root["volume"] = volume.JsonValue();
|
|
|
|
|
root["wave_color"] = wave_color.JsonValue();
|
|
|
|
|
root["shear_x"] = shear_x.JsonValue();
|
|
|
|
|
root["shear_y"] = shear_y.JsonValue();
|
2020-04-13 16:55:29 -05:00
|
|
|
root["origin_x"] = origin_x.JsonValue();
|
|
|
|
|
root["origin_y"] = origin_y.JsonValue();
|
2016-04-24 15:37:47 -05:00
|
|
|
root["channel_filter"] = channel_filter.JsonValue();
|
|
|
|
|
root["channel_mapping"] = channel_mapping.JsonValue();
|
|
|
|
|
root["has_audio"] = has_audio.JsonValue();
|
|
|
|
|
root["has_video"] = has_video.JsonValue();
|
2014-01-04 19:04:21 -06:00
|
|
|
root["perspective_c1_x"] = perspective_c1_x.JsonValue();
|
|
|
|
|
root["perspective_c1_y"] = perspective_c1_y.JsonValue();
|
|
|
|
|
root["perspective_c2_x"] = perspective_c2_x.JsonValue();
|
|
|
|
|
root["perspective_c2_y"] = perspective_c2_y.JsonValue();
|
|
|
|
|
root["perspective_c3_x"] = perspective_c3_x.JsonValue();
|
|
|
|
|
root["perspective_c3_y"] = perspective_c3_y.JsonValue();
|
|
|
|
|
root["perspective_c4_x"] = perspective_c4_x.JsonValue();
|
|
|
|
|
root["perspective_c4_y"] = perspective_c4_y.JsonValue();
|
|
|
|
|
|
2015-03-14 01:36:13 -05:00
|
|
|
// Add array of effects
|
|
|
|
|
root["effects"] = Json::Value(Json::arrayValue);
|
|
|
|
|
|
|
|
|
|
// loop through effects
|
2019-12-27 01:01:48 -05:00
|
|
|
for (auto existing_effect : effects)
|
2015-03-14 01:36:13 -05:00
|
|
|
{
|
|
|
|
|
root["effects"].append(existing_effect->JsonValue());
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
|
|
|
|
root["reader"] = reader->JsonValue();
|
2020-03-09 16:49:06 -05:00
|
|
|
else
|
|
|
|
|
root["reader"] = Json::Value(Json::objectValue);
|
2013-12-03 00:13:25 -06:00
|
|
|
|
2013-12-06 00:40:26 -06:00
|
|
|
// return JsonValue
|
|
|
|
|
return root;
|
2013-12-03 00:13:25 -06:00
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
// Load JSON string into this object
|
2019-12-27 08:51:51 -05:00
|
|
|
void Clip::SetJson(const std::string value) {
|
2013-12-07 21:09:55 -06:00
|
|
|
|
|
|
|
|
// Parse JSON string into JSON objects
|
|
|
|
|
try
|
|
|
|
|
{
|
2019-12-27 08:51:51 -05:00
|
|
|
const Json::Value root = openshot::stringToJson(value);
|
2013-12-07 21:09:55 -06:00
|
|
|
// Set all values that match
|
|
|
|
|
SetJsonValue(root);
|
|
|
|
|
}
|
2019-07-03 12:58:02 -04:00
|
|
|
catch (const std::exception& e)
|
2013-12-07 21:09:55 -06:00
|
|
|
{
|
|
|
|
|
// Error parsing JSON (or missing keys)
|
2019-08-27 15:47:39 -04:00
|
|
|
throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
|
2013-12-07 21:09:55 -06:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-12-27 08:51:51 -05:00
|
|
|
// Load Json::Value into this object
|
|
|
|
|
void Clip::SetJsonValue(const Json::Value root) {
|
2013-12-06 00:40:26 -06:00
|
|
|
|
|
|
|
|
// Set parent data
|
2013-12-07 21:09:55 -06:00
|
|
|
ClipBase::SetJsonValue(root);
|
2013-12-06 00:40:26 -06:00
|
|
|
|
|
|
|
|
// Set data from Json (if key is found)
|
2021-01-27 17:54:49 -03:00
|
|
|
if (!root["parentObjectId"].isNull()){
|
|
|
|
|
parentObjectId = root["parentObjectId"].asString();
|
2021-04-12 21:32:27 -03:00
|
|
|
if (parentObjectId.size() > 0 && parentObjectId != ""){
|
2021-01-27 17:54:49 -03:00
|
|
|
AttachToObject(parentObjectId);
|
2021-01-14 15:52:49 -03:00
|
|
|
} else{
|
2021-01-27 17:54:49 -03:00
|
|
|
parentTrackedObject = nullptr;
|
|
|
|
|
parentClipObject = NULL;
|
2020-12-22 21:32:36 -03:00
|
|
|
}
|
2021-01-27 17:41:39 -03:00
|
|
|
}
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["gravity"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
gravity = (GravityType) root["gravity"].asInt();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["scale"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
scale = (ScaleType) root["scale"].asInt();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["anchor"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
anchor = (AnchorType) root["anchor"].asInt();
|
2017-03-15 02:06:53 -05:00
|
|
|
if (!root["display"].isNull())
|
|
|
|
|
display = (FrameDisplayType) root["display"].asInt();
|
2018-06-27 01:35:38 -05:00
|
|
|
if (!root["mixing"].isNull())
|
|
|
|
|
mixing = (VolumeMixType) root["mixing"].asInt();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["waveform"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
waveform = root["waveform"].asBool();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["scale_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
scale_x.SetJsonValue(root["scale_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["scale_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
scale_y.SetJsonValue(root["scale_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["location_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
location_x.SetJsonValue(root["location_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["location_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
location_y.SetJsonValue(root["location_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["alpha"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
alpha.SetJsonValue(root["alpha"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["rotation"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
rotation.SetJsonValue(root["rotation"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["time"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
time.SetJsonValue(root["time"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["volume"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
volume.SetJsonValue(root["volume"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["wave_color"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
wave_color.SetJsonValue(root["wave_color"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["shear_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
shear_x.SetJsonValue(root["shear_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["shear_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
shear_y.SetJsonValue(root["shear_y"]);
|
2020-04-13 16:55:29 -05:00
|
|
|
if (!root["origin_x"].isNull())
|
|
|
|
|
origin_x.SetJsonValue(root["origin_x"]);
|
|
|
|
|
if (!root["origin_y"].isNull())
|
|
|
|
|
origin_y.SetJsonValue(root["origin_y"]);
|
2016-04-24 15:37:47 -05:00
|
|
|
if (!root["channel_filter"].isNull())
|
|
|
|
|
channel_filter.SetJsonValue(root["channel_filter"]);
|
|
|
|
|
if (!root["channel_mapping"].isNull())
|
|
|
|
|
channel_mapping.SetJsonValue(root["channel_mapping"]);
|
|
|
|
|
if (!root["has_audio"].isNull())
|
|
|
|
|
has_audio.SetJsonValue(root["has_audio"]);
|
|
|
|
|
if (!root["has_video"].isNull())
|
|
|
|
|
has_video.SetJsonValue(root["has_video"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c1_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c1_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c2_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c2_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c3_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c3_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c4_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c4_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
|
2015-03-14 01:36:13 -05:00
|
|
|
if (!root["effects"].isNull()) {
|
2015-08-06 20:01:34 -05:00
|
|
|
|
2015-03-14 01:36:13 -05:00
|
|
|
// Clear existing effects
|
|
|
|
|
effects.clear();
|
|
|
|
|
|
|
|
|
|
// loop through effects
|
2019-12-27 01:01:48 -05:00
|
|
|
for (const auto existing_effect : root["effects"]) {
|
2015-03-14 01:36:13 -05:00
|
|
|
// Create Effect
|
|
|
|
|
EffectBase *e = NULL;
|
2016-08-16 22:40:51 -05:00
|
|
|
if (!existing_effect["type"].isNull()) {
|
2015-11-09 00:12:21 -06:00
|
|
|
|
2020-07-16 21:10:02 -03:00
|
|
|
// Create instance of effect
|
|
|
|
|
if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
|
2021-10-27 00:26:56 -04:00
|
|
|
|
2020-07-16 21:10:02 -03:00
|
|
|
// Load Json into Effect
|
|
|
|
|
e->SetJsonValue(existing_effect);
|
2015-08-16 22:58:07 -05:00
|
|
|
|
2020-07-16 21:10:02 -03:00
|
|
|
// Add Effect to Timeline
|
|
|
|
|
AddEffect(e);
|
2018-09-11 00:40:31 -05:00
|
|
|
}
|
2016-08-16 22:40:51 -05:00
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
}
|
|
|
|
|
}
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["reader"].isNull()) // does Json contain a reader?
|
2013-12-07 21:09:55 -06:00
|
|
|
{
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
|
2013-12-07 21:09:55 -06:00
|
|
|
{
|
|
|
|
|
// Close previous reader (if any)
|
2013-12-18 21:55:43 -06:00
|
|
|
bool already_open = false;
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
2013-12-18 21:55:43 -06:00
|
|
|
{
|
|
|
|
|
// Track if reader was open
|
|
|
|
|
already_open = reader->IsOpen();
|
|
|
|
|
|
2022-10-28 11:00:47 -05:00
|
|
|
// Close and delete existing allocated reader (if any)
|
2023-02-27 22:11:13 -06:00
|
|
|
Reader(NULL);
|
2013-12-18 21:55:43 -06:00
|
|
|
}
|
2013-12-07 21:09:55 -06:00
|
|
|
|
|
|
|
|
// Create new reader (and load properties)
|
2019-08-04 23:08:19 -04:00
|
|
|
std::string type = root["reader"]["type"].asString();
|
2013-12-07 21:09:55 -06:00
|
|
|
|
|
|
|
|
if (type == "FFmpegReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
|
|
|
|
|
2015-06-01 00:20:14 -07:00
|
|
|
} else if (type == "QtImageReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
|
2015-06-01 00:20:14 -07:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
|
|
|
|
|
2016-02-23 00:27:03 -06:00
|
|
|
#ifdef USE_IMAGEMAGICK
|
2013-12-07 21:09:55 -06:00
|
|
|
} else if (type == "ImageReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
2016-09-16 17:43:26 -05:00
|
|
|
reader = new ImageReader(root["reader"]["path"].asString(), false);
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
|
|
|
|
|
|
|
|
|
} else if (type == "TextReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
|
|
|
|
reader = new TextReader();
|
|
|
|
|
reader->SetJsonValue(root["reader"]);
|
2016-02-23 00:27:03 -06:00
|
|
|
#endif
|
2013-12-18 21:55:43 -06:00
|
|
|
|
|
|
|
|
} else if (type == "ChunkReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
|
2013-12-18 21:55:43 -06:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
2014-01-05 22:37:11 -06:00
|
|
|
|
|
|
|
|
} else if (type == "DummyReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::DummyReader();
|
2014-01-05 22:37:11 -06:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
2020-03-09 16:49:06 -05:00
|
|
|
|
|
|
|
|
} else if (type == "Timeline") {
|
|
|
|
|
|
|
|
|
|
// Create new reader (always load from file again)
|
|
|
|
|
// This prevents FrameMappers from being loaded on accident
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
|
2013-12-07 21:09:55 -06:00
|
|
|
}
|
|
|
|
|
|
2019-01-19 02:18:52 -06:00
|
|
|
// mark as managed reader and set parent
|
|
|
|
|
if (reader) {
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
reader->ParentClip(this);
|
2019-05-09 10:51:40 -07:00
|
|
|
allocated_reader = reader;
|
2019-01-19 02:18:52 -06:00
|
|
|
}
|
2015-06-01 00:20:14 -07:00
|
|
|
|
2013-12-18 21:55:43 -06:00
|
|
|
// Re-Open reader (if needed)
|
2023-02-27 22:11:13 -06:00
|
|
|
if (already_open) {
|
2013-12-18 21:55:43 -06:00
|
|
|
reader->Open();
|
2023-02-27 22:11:13 -06:00
|
|
|
}
|
2013-12-07 21:09:55 -06:00
|
|
|
}
|
|
|
|
|
}
|
2023-02-27 22:11:13 -06:00
|
|
|
|
|
|
|
|
// Clear cache (it might have changed)
|
|
|
|
|
final_cache.Clear();
|
2013-12-06 00:40:26 -06:00
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
|
|
|
|
|
// Sort effects by order
|
|
|
|
|
void Clip::sort_effects()
|
|
|
|
|
{
|
|
|
|
|
// sort clips
|
|
|
|
|
effects.sort(CompareClipEffects());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add an effect to the clip
|
|
|
|
|
void Clip::AddEffect(EffectBase* effect)
|
|
|
|
|
{
|
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
|
|
|
// Set parent clip pointer
|
|
|
|
|
effect->ParentClip(this);
|
|
|
|
|
|
2015-03-14 01:36:13 -05:00
|
|
|
// Add effect to list
|
|
|
|
|
effects.push_back(effect);
|
|
|
|
|
|
|
|
|
|
// Sort effects
|
|
|
|
|
sort_effects();
|
2020-08-26 22:47:31 -05:00
|
|
|
|
2021-02-04 16:11:27 -03:00
|
|
|
// Get the parent timeline of this clip
|
2023-03-10 01:15:14 -06:00
|
|
|
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
|
2021-02-04 16:11:27 -03:00
|
|
|
|
|
|
|
|
if (parentTimeline)
|
|
|
|
|
effect->ParentTimeline(parentTimeline);
|
|
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
#ifdef USE_OPENCV
|
2021-02-01 15:12:18 -03:00
|
|
|
// Add Tracked Object to Timeline
|
2021-01-22 19:00:44 -03:00
|
|
|
if (effect->info.has_tracked_object){
|
2020-12-22 21:32:36 -03:00
|
|
|
|
2021-01-18 15:30:11 -03:00
|
|
|
// Check if this clip has a parent timeline
|
|
|
|
|
if (parentTimeline){
|
2020-12-22 21:32:36 -03:00
|
|
|
|
2021-02-04 15:45:33 -03:00
|
|
|
effect->ParentTimeline(parentTimeline);
|
|
|
|
|
|
2021-01-22 19:00:44 -03:00
|
|
|
// Iterate through effect's vector of Tracked Objects
|
|
|
|
|
for (auto const& trackedObject : effect->trackedObjects){
|
2021-10-27 00:26:56 -04:00
|
|
|
|
2021-01-22 19:00:44 -03:00
|
|
|
// Cast the Tracked Object as TrackedObjectBBox
|
|
|
|
|
std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
|
2020-12-22 21:32:36 -03:00
|
|
|
|
2021-01-22 19:00:44 -03:00
|
|
|
// Set the Tracked Object's parent clip to this
|
|
|
|
|
trackedObjectBBox->ParentClip(this);
|
2021-01-18 15:12:45 -03:00
|
|
|
|
2021-01-22 19:00:44 -03:00
|
|
|
// Add the Tracked Object to the timeline
|
|
|
|
|
parentTimeline->AddTrackedObject(trackedObjectBBox);
|
2021-10-27 00:26:56 -04:00
|
|
|
}
|
2021-01-18 15:30:11 -03:00
|
|
|
}
|
2020-12-22 21:32:36 -03:00
|
|
|
}
|
2023-02-27 22:11:13 -06:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// Clear cache (it might have changed)
|
|
|
|
|
final_cache.Clear();
|
2015-03-14 01:36:13 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remove an effect from the clip
|
|
|
|
|
void Clip::RemoveEffect(EffectBase* effect)
|
|
|
|
|
{
|
|
|
|
|
effects.remove(effect);
|
2023-02-27 22:11:13 -06:00
|
|
|
|
|
|
|
|
// Clear cache (it might have changed)
|
|
|
|
|
final_cache.Clear();
|
2015-03-14 01:36:13 -05:00
|
|
|
}
|
|
|
|
|
|
2023-05-24 17:12:15 -05:00
|
|
|
// Apply background image to the current clip image (i.e. flatten this image onto previous layer)
|
|
|
|
|
void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
|
|
|
|
|
// Add background canvas
|
|
|
|
|
std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
|
|
|
|
|
QPainter painter(background_canvas.get());
|
|
|
|
|
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
|
|
|
|
|
|
|
|
|
|
// Composite a new layer onto the image
|
|
|
|
|
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
|
|
|
|
|
painter.drawImage(0, 0, *frame->GetImage());
|
|
|
|
|
painter.end();
|
|
|
|
|
|
|
|
|
|
// Add new QImage to frame
|
|
|
|
|
frame->AddImage(background_canvas);
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-14 01:36:13 -05:00
|
|
|
// Apply effects to the source frame (if any)
|
2023-05-24 17:12:15 -05:00
|
|
|
void Clip::apply_effects(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame, TimelineInfoStruct* options, bool before_keyframes)
|
2015-03-14 01:36:13 -05:00
|
|
|
{
|
2019-12-27 01:01:48 -05:00
|
|
|
for (auto effect : effects)
|
2015-03-14 01:36:13 -05:00
|
|
|
{
|
|
|
|
|
// Apply the effect to this frame
|
2023-05-24 17:12:15 -05:00
|
|
|
if (effect->info.apply_before_clip && before_keyframes) {
|
|
|
|
|
effect->GetFrame(frame, frame->number);
|
|
|
|
|
} else if (!effect->info.apply_before_clip && !before_keyframes) {
|
|
|
|
|
effect->GetFrame(frame, frame->number);
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
|
2023-05-24 17:12:15 -05:00
|
|
|
if (timeline != NULL && options != NULL) {
|
|
|
|
|
// Apply global timeline effects (i.e. transitions & masks... if any)
|
|
|
|
|
Timeline* timeline_instance = static_cast<Timeline*>(timeline);
|
|
|
|
|
options->is_before_clip_keyframes = before_keyframes;
|
|
|
|
|
timeline_instance->apply_effects(frame, background_frame->number, Layer(), options);
|
|
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
}
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
// Compare 2 floating point numbers for equality
|
|
|
|
|
bool Clip::isEqual(double a, double b)
|
|
|
|
|
{
|
|
|
|
|
return fabs(a - b) < 0.000001;
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-17 19:44:44 -06:00
|
|
|
// Apply keyframes to the source frame (if any)
|
2023-04-16 01:50:11 -05:00
|
|
|
void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame) {
|
2023-02-27 22:11:13 -06:00
|
|
|
// Skip out if video was disabled or only an audio frame (no visualisation in use)
|
|
|
|
|
if (!frame->has_image_data) {
|
|
|
|
|
// Skip the rest of the image processing for performance reasons
|
|
|
|
|
return;
|
|
|
|
|
}
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-05-24 17:12:15 -05:00
|
|
|
// Get image from clip, and create transparent background image
|
2023-02-27 22:11:13 -06:00
|
|
|
std::shared_ptr<QImage> source_image = frame->GetImage();
|
2023-05-24 17:12:15 -05:00
|
|
|
std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(background_frame->GetImage()->width(),
|
|
|
|
|
background_frame->GetImage()->height(),
|
|
|
|
|
QImage::Format_RGBA8888_Premultiplied);
|
|
|
|
|
background_canvas->fill(QColor(Qt::transparent));
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Get transform from clip's keyframes
|
|
|
|
|
QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Load timeline's new frame image into a QPainter
|
|
|
|
|
QPainter painter(background_canvas.get());
|
|
|
|
|
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Apply transform (translate, rotate, scale)
|
|
|
|
|
painter.setTransform(transform);
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Composite a new layer onto the image
|
|
|
|
|
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
|
|
|
|
|
painter.drawImage(0, 0, *source_image);
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
if (timeline) {
|
2023-03-10 01:15:14 -06:00
|
|
|
Timeline *t = static_cast<Timeline *>(timeline);
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Draw frame #'s on top of image (if needed)
|
|
|
|
|
if (display != FRAME_DISPLAY_NONE) {
|
|
|
|
|
std::stringstream frame_number_str;
|
|
|
|
|
switch (display) {
|
|
|
|
|
case (FRAME_DISPLAY_NONE):
|
|
|
|
|
// This is only here to prevent unused-enum warnings
|
|
|
|
|
break;
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
case (FRAME_DISPLAY_CLIP):
|
|
|
|
|
frame_number_str << frame->number;
|
|
|
|
|
break;
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
case (FRAME_DISPLAY_TIMELINE):
|
|
|
|
|
frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
|
|
|
|
|
break;
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
case (FRAME_DISPLAY_BOTH):
|
|
|
|
|
frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
|
|
|
|
|
break;
|
|
|
|
|
}
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Draw frame number on top of image
|
|
|
|
|
painter.setPen(QColor("#ffffff"));
|
|
|
|
|
painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
painter.end();
|
2021-02-17 19:44:44 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Add new QImage to frame
|
|
|
|
|
frame->AddImage(background_canvas);
|
2021-02-17 19:44:44 -06:00
|
|
|
}
|
2020-08-26 13:12:42 -05:00
|
|
|
|
2023-02-10 15:16:56 -06:00
|
|
|
// Apply apply_waveform image to the source frame (if any)
|
2023-04-16 01:50:11 -05:00
|
|
|
void Clip::apply_waveform(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame) {
|
2023-02-10 15:16:56 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
if (!Waveform()) {
|
|
|
|
|
// Exit if no waveform is needed
|
|
|
|
|
return;
|
|
|
|
|
}
|
2023-02-10 15:16:56 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Get image from clip
|
|
|
|
|
std::shared_ptr<QImage> source_image = frame->GetImage();
|
2023-04-16 01:50:11 -05:00
|
|
|
std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
|
2023-02-10 15:16:56 -06:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Debug output
|
|
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::apply_waveform (Generate Waveform Image)",
|
|
|
|
|
"frame->number", frame->number,
|
|
|
|
|
"Waveform()", Waveform(),
|
|
|
|
|
"background_canvas->width()", background_canvas->width(),
|
|
|
|
|
"background_canvas->height()", background_canvas->height());
|
|
|
|
|
|
|
|
|
|
// Get the color of the waveform
|
|
|
|
|
int red = wave_color.red.GetInt(frame->number);
|
|
|
|
|
int green = wave_color.green.GetInt(frame->number);
|
|
|
|
|
int blue = wave_color.blue.GetInt(frame->number);
|
|
|
|
|
int alpha = wave_color.alpha.GetInt(frame->number);
|
|
|
|
|
|
|
|
|
|
// Generate Waveform Dynamically (the size of the timeline)
|
|
|
|
|
source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue, alpha);
|
|
|
|
|
frame->AddImage(source_image);
|
2023-02-10 15:16:56 -06:00
|
|
|
}
|
|
|
|
|
|
2020-08-26 13:12:42 -05:00
|
|
|
// Apply keyframes to the source frame (if any)
|
2022-03-29 01:31:51 -05:00
|
|
|
QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
|
2020-08-26 13:12:42 -05:00
|
|
|
{
|
2023-02-27 22:11:13 -06:00
|
|
|
// Get image from clip
|
|
|
|
|
std::shared_ptr<QImage> source_image = frame->GetImage();
|
2020-08-26 13:12:42 -05:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
/* ALPHA & OPACITY */
|
2022-03-29 01:31:51 -05:00
|
|
|
if (alpha.GetValue(frame->number) != 1.0)
|
2020-08-26 13:12:42 -05:00
|
|
|
{
|
2022-03-29 01:31:51 -05:00
|
|
|
float alpha_value = alpha.GetValue(frame->number);
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
// Get source image's pixels
|
2020-10-16 18:04:10 -05:00
|
|
|
unsigned char *pixels = source_image->bits();
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
// Loop through pixels
|
|
|
|
|
for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
|
|
|
|
|
{
|
2020-10-14 14:19:26 -05:00
|
|
|
// Apply alpha to pixel values (since we use a premultiplied value, we must
|
|
|
|
|
// multiply the alpha with all colors).
|
|
|
|
|
pixels[byte_index + 0] *= alpha_value;
|
|
|
|
|
pixels[byte_index + 1] *= alpha_value;
|
|
|
|
|
pixels[byte_index + 2] *= alpha_value;
|
2020-08-26 13:12:42 -05:00
|
|
|
pixels[byte_index + 3] *= alpha_value;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::get_transform (Set Alpha & Opacity)",
|
|
|
|
|
"alpha_value", alpha_value,
|
|
|
|
|
"frame->number", frame->number);
|
2020-08-26 13:12:42 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* RESIZE SOURCE IMAGE - based on scale type */
|
|
|
|
|
QSize source_size = source_image->size();
|
2021-01-18 15:12:45 -03:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
// Apply stretch scale to correctly fit the bounding-box
|
2021-01-27 17:54:49 -03:00
|
|
|
if (parentTrackedObject){
|
2021-01-18 15:12:45 -03:00
|
|
|
scale = SCALE_STRETCH;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-26 13:12:42 -05:00
|
|
|
switch (scale)
|
|
|
|
|
{
|
|
|
|
|
case (SCALE_FIT): {
|
2020-08-26 17:05:50 -05:00
|
|
|
source_size.scale(width, height, Qt::KeepAspectRatio);
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::get_transform (Scale: SCALE_FIT)",
|
|
|
|
|
"frame->number", frame->number,
|
|
|
|
|
"source_width", source_size.width(),
|
|
|
|
|
"source_height", source_size.height());
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case (SCALE_STRETCH): {
|
2020-08-26 17:05:50 -05:00
|
|
|
source_size.scale(width, height, Qt::IgnoreAspectRatio);
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::get_transform (Scale: SCALE_STRETCH)",
|
|
|
|
|
"frame->number", frame->number,
|
|
|
|
|
"source_width", source_size.width(),
|
|
|
|
|
"source_height", source_size.height());
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case (SCALE_CROP): {
|
2020-10-20 12:42:00 -05:00
|
|
|
source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::get_transform (Scale: SCALE_CROP)",
|
|
|
|
|
"frame->number", frame->number,
|
|
|
|
|
"source_width", source_size.width(),
|
|
|
|
|
"source_height", source_size.height());
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
case (SCALE_NONE): {
|
2022-01-12 10:52:05 -05:00
|
|
|
// Image is already the original size (i.e. no scaling mode) relative
|
|
|
|
|
// to the preview window size (i.e. timeline / preview ratio). No further
|
|
|
|
|
// scaling is needed here.
|
2020-08-26 13:12:42 -05:00
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::get_transform (Scale: SCALE_NONE)",
|
|
|
|
|
"frame->number", frame->number,
|
|
|
|
|
"source_width", source_size.width(),
|
|
|
|
|
"source_height", source_size.height());
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-01-27 17:41:39 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Initialize parent object's properties (Clip or Tracked Object)
|
|
|
|
|
float parentObject_location_x = 0.0;
|
|
|
|
|
float parentObject_location_y = 0.0;
|
|
|
|
|
float parentObject_scale_x = 1.0;
|
|
|
|
|
float parentObject_scale_y = 1.0;
|
|
|
|
|
float parentObject_shear_x = 0.0;
|
|
|
|
|
float parentObject_shear_y = 0.0;
|
|
|
|
|
float parentObject_rotation = 0.0;
|
2021-01-18 15:12:45 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Get the parentClipObject properties
|
|
|
|
|
if (parentClipObject){
|
2021-01-27 17:41:39 -03:00
|
|
|
|
|
|
|
|
// Convert Clip's frame position to Timeline's frame position
|
|
|
|
|
long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
|
|
|
|
|
long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
|
2022-03-29 01:31:51 -05:00
|
|
|
double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
|
2021-01-27 17:41:39 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Get parent object's properties (Clip)
|
2021-10-27 00:26:56 -04:00
|
|
|
parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
|
2021-01-27 17:54:49 -03:00
|
|
|
parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
|
|
|
|
|
parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
|
|
|
|
|
parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
|
|
|
|
|
parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
|
|
|
|
|
parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
|
|
|
|
|
parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
|
2021-01-27 17:41:39 -03:00
|
|
|
}
|
|
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Get the parentTrackedObject properties
|
|
|
|
|
if (parentTrackedObject){
|
2021-01-14 16:03:22 -03:00
|
|
|
// Convert Clip's frame position to Timeline's frame position
|
|
|
|
|
long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
|
|
|
|
|
long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
|
2022-03-29 01:31:51 -05:00
|
|
|
double timeline_frame_number = frame->number + clip_start_position - clip_start_frame;
|
2020-12-22 21:32:36 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Get parentTrackedObject's parent clip's properties
|
2023-04-14 14:32:48 -05:00
|
|
|
std::map<std::string, float> trackedObjectParentClipProperties =
|
2023-04-14 14:35:46 -05:00
|
|
|
parentTrackedObject->GetParentClipProperties(timeline_frame_number);
|
2021-01-14 16:03:22 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Get the attached object's parent clip's properties
|
|
|
|
|
if (!trackedObjectParentClipProperties.empty())
|
|
|
|
|
{
|
|
|
|
|
// Get parent object's properties (Tracked Object)
|
|
|
|
|
float parentObject_frame_number = trackedObjectParentClipProperties["frame_number"];
|
2021-01-19 16:03:51 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Access the parentTrackedObject's properties
|
|
|
|
|
std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parentObject_frame_number);
|
2021-01-19 16:03:51 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Get the Tracked Object's properties and correct them by the clip's reference system
|
|
|
|
|
parentObject_location_x = trackedObjectProperties["cx"] - 0.5 + trackedObjectParentClipProperties["location_x"];
|
|
|
|
|
parentObject_location_y = trackedObjectProperties["cy"] - 0.5 + trackedObjectParentClipProperties["location_y"];
|
|
|
|
|
parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
|
|
|
|
|
parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
|
|
|
|
|
parentObject_rotation = trackedObjectProperties["r"] + trackedObjectParentClipProperties["rotation"];
|
2021-10-27 00:26:56 -04:00
|
|
|
}
|
|
|
|
|
else
|
2021-01-27 17:54:49 -03:00
|
|
|
{
|
|
|
|
|
// Access the parentTrackedObject's properties
|
|
|
|
|
std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(timeline_frame_number);
|
2021-01-19 16:03:51 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Get the Tracked Object's properties and correct them by the clip's reference system
|
|
|
|
|
parentObject_location_x = trackedObjectProperties["cx"] - 0.5;
|
|
|
|
|
parentObject_location_y = trackedObjectProperties["cy"] - 0.5;
|
|
|
|
|
parentObject_scale_x = trackedObjectProperties["w"]*trackedObjectProperties["sx"];
|
|
|
|
|
parentObject_scale_y = trackedObjectProperties["h"]*trackedObjectProperties["sy"];
|
|
|
|
|
parentObject_rotation = trackedObjectProperties["r"];
|
2021-01-18 15:12:45 -03:00
|
|
|
}
|
2020-12-22 21:32:36 -03:00
|
|
|
}
|
|
|
|
|
|
2020-08-26 13:12:42 -05:00
|
|
|
/* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
|
|
|
|
|
float x = 0.0; // left
|
|
|
|
|
float y = 0.0; // top
|
|
|
|
|
|
|
|
|
|
// Adjust size for scale x and scale y
|
2022-03-29 01:31:51 -05:00
|
|
|
float sx = scale_x.GetValue(frame->number); // percentage X scale
|
|
|
|
|
float sy = scale_y.GetValue(frame->number); // percentage Y scale
|
2021-01-19 16:03:51 -03:00
|
|
|
|
2021-01-27 17:54:49 -03:00
|
|
|
// Change clip's scale to parentObject's scale
|
|
|
|
|
if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
|
|
|
|
|
sx*= parentObject_scale_x;
|
|
|
|
|
sy*= parentObject_scale_y;
|
2021-01-14 16:03:22 -03:00
|
|
|
}
|
2021-01-19 16:03:51 -03:00
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
float scaled_source_width = source_size.width() * sx;
|
2020-08-26 13:12:42 -05:00
|
|
|
float scaled_source_height = source_size.height() * sy;
|
2021-10-27 00:26:56 -04:00
|
|
|
|
2020-08-26 13:12:42 -05:00
|
|
|
switch (gravity)
|
|
|
|
|
{
|
|
|
|
|
case (GRAVITY_TOP_LEFT):
|
|
|
|
|
// This is only here to prevent unused-enum warnings
|
|
|
|
|
break;
|
|
|
|
|
case (GRAVITY_TOP):
|
2020-08-26 17:05:50 -05:00
|
|
|
x = (width - scaled_source_width) / 2.0; // center
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
case (GRAVITY_TOP_RIGHT):
|
2020-08-26 17:05:50 -05:00
|
|
|
x = width - scaled_source_width; // right
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
case (GRAVITY_LEFT):
|
2020-08-26 17:05:50 -05:00
|
|
|
y = (height - scaled_source_height) / 2.0; // center
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
case (GRAVITY_CENTER):
|
2020-08-26 17:05:50 -05:00
|
|
|
x = (width - scaled_source_width) / 2.0; // center
|
|
|
|
|
y = (height - scaled_source_height) / 2.0; // center
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
case (GRAVITY_RIGHT):
|
2020-08-26 17:05:50 -05:00
|
|
|
x = width - scaled_source_width; // right
|
|
|
|
|
y = (height - scaled_source_height) / 2.0; // center
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
case (GRAVITY_BOTTOM_LEFT):
|
2020-08-26 17:05:50 -05:00
|
|
|
y = (height - scaled_source_height); // bottom
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
case (GRAVITY_BOTTOM):
|
2020-08-26 17:05:50 -05:00
|
|
|
x = (width - scaled_source_width) / 2.0; // center
|
|
|
|
|
y = (height - scaled_source_height); // bottom
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
case (GRAVITY_BOTTOM_RIGHT):
|
2020-08-26 17:05:50 -05:00
|
|
|
x = width - scaled_source_width; // right
|
|
|
|
|
y = (height - scaled_source_height); // bottom
|
2020-08-26 13:12:42 -05:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Debug output
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::get_transform (Gravity)",
|
|
|
|
|
"frame->number", frame->number,
|
|
|
|
|
"source_clip->gravity", gravity,
|
|
|
|
|
"scaled_source_width", scaled_source_width,
|
|
|
|
|
"scaled_source_height", scaled_source_height);
|
2020-08-26 13:12:42 -05:00
|
|
|
|
2020-12-22 21:32:36 -03:00
|
|
|
QTransform transform;
|
2021-10-27 00:26:56 -04:00
|
|
|
|
2020-08-26 13:12:42 -05:00
|
|
|
/* LOCATION, ROTATION, AND SCALE */
|
2022-03-29 01:31:51 -05:00
|
|
|
float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
|
|
|
|
|
x += (width * (location_x.GetValue(frame->number) + parentObject_location_x )); // move in percentage of final width
|
|
|
|
|
y += (height * (location_y.GetValue(frame->number) + parentObject_location_y )); // move in percentage of final height
|
|
|
|
|
float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
|
|
|
|
|
float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
|
|
|
|
|
float origin_x_value = origin_x.GetValue(frame->number);
|
|
|
|
|
float origin_y_value = origin_y.GetValue(frame->number);
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
// Transform source image (if needed)
|
2022-01-12 10:52:05 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod(
|
|
|
|
|
"Clip::get_transform (Build QTransform - if needed)",
|
|
|
|
|
"frame->number", frame->number,
|
|
|
|
|
"x", x, "y", y,
|
|
|
|
|
"r", r,
|
|
|
|
|
"sx", sx, "sy", sy);
|
2020-08-26 13:12:42 -05:00
|
|
|
|
|
|
|
|
if (!isEqual(x, 0) || !isEqual(y, 0)) {
|
|
|
|
|
// TRANSLATE/MOVE CLIP
|
|
|
|
|
transform.translate(x, y);
|
2021-10-27 00:26:56 -04:00
|
|
|
}
|
2020-08-26 13:12:42 -05:00
|
|
|
if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) {
|
|
|
|
|
// ROTATE CLIP (around origin_x, origin_y)
|
|
|
|
|
float origin_x_offset = (scaled_source_width * origin_x_value);
|
|
|
|
|
float origin_y_offset = (scaled_source_height * origin_y_value);
|
|
|
|
|
transform.translate(origin_x_offset, origin_y_offset);
|
|
|
|
|
transform.rotate(r);
|
|
|
|
|
transform.shear(shear_x_value, shear_y_value);
|
|
|
|
|
transform.translate(-origin_x_offset,-origin_y_offset);
|
|
|
|
|
}
|
|
|
|
|
// SCALE CLIP (if needed)
|
|
|
|
|
float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
|
|
|
|
|
float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
|
|
|
|
|
if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) {
|
|
|
|
|
transform.scale(source_width_scale, source_height_scale);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-27 22:11:13 -06:00
|
|
|
return transform;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Adjust frame number for Clip position and start (which can result in a different number)
|
|
|
|
|
int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
|
|
|
|
|
|
|
|
|
|
// Get clip position from parent clip (if any)
|
|
|
|
|
float position = 0.0;
|
|
|
|
|
float start = 0.0;
|
2023-03-10 01:15:14 -06:00
|
|
|
Clip *parent = static_cast<Clip *>(ParentClip());
|
2023-02-27 22:11:13 -06:00
|
|
|
if (parent) {
|
|
|
|
|
position = parent->Position();
|
|
|
|
|
start = parent->Start();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Adjust start frame and position based on parent clip.
|
|
|
|
|
// This ensures the same frame # is used by mapped readers and clips,
|
|
|
|
|
// when calculating samples per frame.
|
|
|
|
|
// Thus, this prevents gaps and mismatches in # of samples.
|
|
|
|
|
int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
|
|
|
|
|
int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
|
|
|
|
|
int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
|
|
|
|
|
|
|
|
|
|
return frame_number;
|
2015-03-14 01:36:13 -05:00
|
|
|
}
|