2013-09-11 17:32:40 -05:00
|
|
|
/**
|
|
|
|
|
* @file
|
|
|
|
|
* @brief Source file for Clip class
|
|
|
|
|
* @author Jonathan Thomas <jonathan@openshot.org>
|
|
|
|
|
*
|
|
|
|
|
* @section LICENSE
|
|
|
|
|
*
|
2014-03-29 18:49:22 -05:00
|
|
|
* Copyright (c) 2008-2014 OpenShot Studios, LLC
|
|
|
|
|
* <http://www.openshotstudios.com/>. This file is part of
|
|
|
|
|
* OpenShot Library (libopenshot), an open-source project dedicated to
|
|
|
|
|
* delivering high quality video editing and animation solutions to the
|
|
|
|
|
* world. For more information visit <http://www.openshot.org/>.
|
2013-09-11 17:32:40 -05:00
|
|
|
*
|
2014-03-29 18:49:22 -05:00
|
|
|
* OpenShot Library (libopenshot) is free software: you can redistribute it
|
2014-07-11 16:52:14 -05:00
|
|
|
* and/or modify it under the terms of the GNU Lesser General Public License
|
2014-03-29 18:49:22 -05:00
|
|
|
* as published by the Free Software Foundation, either version 3 of the
|
|
|
|
|
* License, or (at your option) any later version.
|
2013-09-11 17:32:40 -05:00
|
|
|
*
|
2014-03-29 18:49:22 -05:00
|
|
|
* OpenShot Library (libopenshot) is distributed in the hope that it will be
|
|
|
|
|
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2014-07-11 16:52:14 -05:00
|
|
|
* GNU Lesser General Public License for more details.
|
2013-09-11 17:32:40 -05:00
|
|
|
*
|
2014-07-11 16:52:14 -05:00
|
|
|
* You should have received a copy of the GNU Lesser General Public License
|
2014-03-29 18:49:22 -05:00
|
|
|
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
|
2013-09-11 17:32:40 -05:00
|
|
|
*/
|
|
|
|
|
|
2012-10-03 01:55:24 -05:00
|
|
|
#include "../include/Clip.h"
|
|
|
|
|
|
|
|
|
|
using namespace openshot;
|
|
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Init default settings for a clip
|
|
|
|
|
void Clip::init_settings()
|
2012-10-03 01:55:24 -05:00
|
|
|
{
|
|
|
|
|
// Init clip settings
|
2012-10-04 15:07:29 -05:00
|
|
|
Position(0.0);
|
|
|
|
|
Layer(0);
|
|
|
|
|
Start(0.0);
|
|
|
|
|
End(0.0);
|
2012-10-04 16:07:58 -05:00
|
|
|
gravity = GRAVITY_CENTER;
|
|
|
|
|
scale = SCALE_FIT;
|
|
|
|
|
anchor = ANCHOR_CANVAS;
|
2017-03-15 02:06:53 -05:00
|
|
|
display = FRAME_DISPLAY_NONE;
|
2018-06-27 01:35:38 -05:00
|
|
|
mixing = VOLUME_MIX_NONE;
|
2012-11-29 16:32:48 -06:00
|
|
|
waveform = false;
|
2015-02-17 00:21:57 -06:00
|
|
|
previous_properties = "";
|
2012-10-03 01:55:24 -05:00
|
|
|
|
|
|
|
|
// Init scale curves
|
2012-11-08 18:02:20 -06:00
|
|
|
scale_x = Keyframe(1.0);
|
|
|
|
|
scale_y = Keyframe(1.0);
|
2012-10-03 01:55:24 -05:00
|
|
|
|
|
|
|
|
// Init location curves
|
|
|
|
|
location_x = Keyframe(0.0);
|
|
|
|
|
location_y = Keyframe(0.0);
|
|
|
|
|
|
2018-02-03 01:57:18 -06:00
|
|
|
// Init alpha
|
2016-01-16 21:53:07 -06:00
|
|
|
alpha = Keyframe(1.0);
|
2018-02-03 01:57:18 -06:00
|
|
|
|
|
|
|
|
// Init rotation
|
|
|
|
|
init_reader_rotation();
|
2012-10-03 01:55:24 -05:00
|
|
|
|
|
|
|
|
// Init time & volume
|
2017-07-19 16:05:07 -05:00
|
|
|
time = Keyframe(1.0);
|
2012-11-29 16:32:48 -06:00
|
|
|
volume = Keyframe(1.0);
|
2012-10-03 01:55:24 -05:00
|
|
|
|
2012-11-29 23:11:50 -06:00
|
|
|
// Init audio waveform color
|
2015-06-01 00:20:14 -07:00
|
|
|
wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
|
2012-11-29 23:11:50 -06:00
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Init crop settings
|
2012-10-04 16:07:58 -05:00
|
|
|
crop_gravity = GRAVITY_CENTER;
|
2012-10-04 01:34:45 -05:00
|
|
|
crop_width = Keyframe(-1.0);
|
|
|
|
|
crop_height = Keyframe(-1.0);
|
|
|
|
|
crop_x = Keyframe(0.0);
|
|
|
|
|
crop_y = Keyframe(0.0);
|
|
|
|
|
|
|
|
|
|
// Init shear and perspective curves
|
|
|
|
|
shear_x = Keyframe(0.0);
|
|
|
|
|
shear_y = Keyframe(0.0);
|
|
|
|
|
perspective_c1_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c1_y = Keyframe(-1.0);
|
|
|
|
|
perspective_c2_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c2_y = Keyframe(-1.0);
|
|
|
|
|
perspective_c3_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c3_y = Keyframe(-1.0);
|
|
|
|
|
perspective_c4_x = Keyframe(-1.0);
|
|
|
|
|
perspective_c4_y = Keyframe(-1.0);
|
2012-10-04 16:07:58 -05:00
|
|
|
|
2016-04-24 15:37:47 -05:00
|
|
|
// Init audio channel filter and mappings
|
|
|
|
|
channel_filter = Keyframe(-1.0);
|
|
|
|
|
channel_mapping = Keyframe(-1.0);
|
|
|
|
|
|
|
|
|
|
// Init audio and video overrides
|
|
|
|
|
has_audio = Keyframe(-1.0);
|
|
|
|
|
has_video = Keyframe(-1.0);
|
|
|
|
|
|
2012-10-04 16:07:58 -05:00
|
|
|
// Default pointers
|
2015-06-01 00:20:14 -07:00
|
|
|
manage_reader = false;
|
2012-10-04 01:34:45 -05:00
|
|
|
}
|
|
|
|
|
|
2018-02-03 01:57:18 -06:00
|
|
|
// Init reader's rotation (if any)
|
|
|
|
|
void Clip::init_reader_rotation() {
|
|
|
|
|
// Only init rotation from reader when needed
|
|
|
|
|
if (rotation.Points.size() > 1)
|
|
|
|
|
// Do nothing if more than 1 rotation Point
|
|
|
|
|
return;
|
|
|
|
|
else if (rotation.Points.size() == 1 && rotation.GetValue(1) != 0.0)
|
|
|
|
|
// Do nothing if 1 Point, and it's not the default value
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
// Init rotation
|
|
|
|
|
if (reader && reader->info.metadata.count("rotate") > 0) {
|
|
|
|
|
// Use reader metadata rotation (if any)
|
|
|
|
|
// This is typical with cell phone videos filmed in different orientations
|
|
|
|
|
try {
|
|
|
|
|
float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
|
|
|
|
|
rotation = Keyframe(rotate_metadata);
|
|
|
|
|
} catch (exception e) {}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
// Default no rotation
|
|
|
|
|
rotation = Keyframe(0.0);
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Default Constructor for a clip
|
2018-02-06 13:05:30 -06:00
|
|
|
Clip::Clip() : reader(NULL), resampler(NULL), audio_cache(NULL)
|
2012-10-04 01:34:45 -05:00
|
|
|
{
|
|
|
|
|
// Init all default settings
|
|
|
|
|
init_settings();
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-09 01:45:34 -05:00
|
|
|
// Constructor with reader
|
2018-02-06 13:05:30 -06:00
|
|
|
Clip::Clip(ReaderBase* new_reader) : reader(new_reader), resampler(NULL), audio_cache(NULL)
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
2018-02-03 01:57:18 -06:00
|
|
|
// Init all default settings
|
|
|
|
|
init_settings();
|
|
|
|
|
|
2012-12-07 01:05:48 -06:00
|
|
|
// Open and Close the reader (to set the duration of the clip)
|
|
|
|
|
Open();
|
|
|
|
|
Close();
|
2015-02-07 18:06:11 -06:00
|
|
|
|
|
|
|
|
// Update duration
|
|
|
|
|
End(reader->info.duration);
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-04 01:34:45 -05:00
|
|
|
// Constructor with filepath
|
2018-02-06 13:05:30 -06:00
|
|
|
Clip::Clip(string path) : reader(NULL), resampler(NULL), audio_cache(NULL)
|
2012-10-04 01:34:45 -05:00
|
|
|
{
|
|
|
|
|
// Init all default settings
|
|
|
|
|
init_settings();
|
|
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
// Get file extension (and convert to lower case)
|
|
|
|
|
string ext = get_file_extension(path);
|
|
|
|
|
transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
|
2012-10-04 01:34:45 -05:00
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
// Determine if common video formats
|
2012-10-09 01:45:34 -05:00
|
|
|
if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
|
|
|
|
|
ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
|
2012-10-04 18:02:46 -05:00
|
|
|
{
|
2012-10-04 01:34:45 -05:00
|
|
|
try
|
|
|
|
|
{
|
2012-10-04 18:02:46 -05:00
|
|
|
// Open common video format
|
2013-12-07 21:09:55 -06:00
|
|
|
reader = new FFmpegReader(path);
|
2014-01-05 23:12:56 -06:00
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
} catch(...) { }
|
|
|
|
|
}
|
2012-10-04 01:34:45 -05:00
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
// If no video found, try each reader
|
2013-12-07 21:09:55 -06:00
|
|
|
if (!reader)
|
2012-10-04 18:02:46 -05:00
|
|
|
{
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
// Try an image reader
|
2015-06-01 00:20:14 -07:00
|
|
|
reader = new QtImageReader(path);
|
2012-10-04 18:02:46 -05:00
|
|
|
|
|
|
|
|
} catch(...) {
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
// Try a video reader
|
2013-12-07 21:09:55 -06:00
|
|
|
reader = new FFmpegReader(path);
|
2012-10-04 18:02:46 -05:00
|
|
|
|
2014-02-18 23:25:28 -06:00
|
|
|
} catch(...) { }
|
2012-10-04 01:34:45 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-07 18:06:11 -06:00
|
|
|
// Update duration
|
2015-06-01 00:20:14 -07:00
|
|
|
if (reader) {
|
2015-02-11 21:59:13 -06:00
|
|
|
End(reader->info.duration);
|
2015-06-01 00:20:14 -07:00
|
|
|
manage_reader = true;
|
2018-02-03 01:57:18 -06:00
|
|
|
init_reader_rotation();
|
2015-06-01 00:20:14 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Destructor
|
|
|
|
|
Clip::~Clip()
|
|
|
|
|
{
|
|
|
|
|
// Delete the reader if clip created it
|
|
|
|
|
if (manage_reader && reader) {
|
|
|
|
|
delete reader;
|
|
|
|
|
reader = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Close the resampler
|
|
|
|
|
if (resampler) {
|
|
|
|
|
delete resampler;
|
|
|
|
|
resampler = NULL;
|
|
|
|
|
}
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Set the current reader
|
2013-12-07 21:09:55 -06:00
|
|
|
void Clip::Reader(ReaderBase* new_reader)
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
|
|
|
|
// set reader pointer
|
2013-12-07 21:09:55 -06:00
|
|
|
reader = new_reader;
|
2018-02-03 01:57:18 -06:00
|
|
|
|
|
|
|
|
// Init rotation (if any)
|
|
|
|
|
init_reader_rotation();
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Get the current reader
|
2017-10-26 18:44:35 -05:00
|
|
|
ReaderBase* Clip::Reader()
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
|
|
|
|
return reader;
|
2013-09-28 22:00:52 -05:00
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
|
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
|
2012-10-04 16:07:58 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-08 16:22:18 -05:00
|
|
|
// Open the internal reader
|
2017-10-26 18:44:35 -05:00
|
|
|
void Clip::Open()
|
2012-10-08 16:22:18 -05:00
|
|
|
{
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
2012-10-09 01:45:34 -05:00
|
|
|
{
|
|
|
|
|
// Open the reader
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->Open();
|
2012-10-09 01:45:34 -05:00
|
|
|
|
|
|
|
|
// Set some clip properties from the file reader
|
2012-12-07 01:05:48 -06:00
|
|
|
if (end == 0.0)
|
2013-12-07 21:09:55 -06:00
|
|
|
End(reader->info.duration);
|
2012-10-09 01:45:34 -05:00
|
|
|
}
|
2013-09-28 22:00:52 -05:00
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
|
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
|
2012-10-08 16:22:18 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Close the internal reader
|
2017-10-26 18:44:35 -05:00
|
|
|
void Clip::Close()
|
2012-10-08 16:22:18 -05:00
|
|
|
{
|
2014-07-25 23:32:12 -05:00
|
|
|
if (reader) {
|
2016-04-24 15:37:47 -05:00
|
|
|
ZmqLogger::Instance()->AppendDebugMethod("Clip::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2015-02-07 18:06:11 -06:00
|
|
|
// Close the reader
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->Close();
|
2014-07-25 23:32:12 -05:00
|
|
|
}
|
2013-09-28 22:00:52 -05:00
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
|
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
|
2012-10-08 16:22:18 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-14 02:36:05 -05:00
|
|
|
// Get end position of clip (trim end of video), which can be affected by the time curve.
|
2017-10-26 18:44:35 -05:00
|
|
|
float Clip::End()
|
2012-10-14 02:36:05 -05:00
|
|
|
{
|
|
|
|
|
// if a time curve is present, use it's length
|
|
|
|
|
if (time.Points.size() > 1)
|
2012-11-08 04:35:21 -06:00
|
|
|
{
|
|
|
|
|
// Determine the FPS fo this clip
|
|
|
|
|
float fps = 24.0;
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
2012-11-08 04:35:21 -06:00
|
|
|
// file reader
|
2013-12-07 21:09:55 -06:00
|
|
|
fps = reader->info.fps.ToFloat();
|
2013-09-28 22:00:52 -05:00
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
|
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
|
2012-11-08 04:35:21 -06:00
|
|
|
|
2012-10-21 05:29:29 -05:00
|
|
|
return float(time.GetLength()) / fps;
|
2012-11-08 04:35:21 -06:00
|
|
|
}
|
2012-10-14 02:36:05 -05:00
|
|
|
else
|
|
|
|
|
// just use the duration (as detected by the reader)
|
|
|
|
|
return end;
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-08 16:22:18 -05:00
|
|
|
// Get an openshot::Frame object for a specific frame number of this reader.
|
2017-10-26 18:44:35 -05:00
|
|
|
std::shared_ptr<Frame> Clip::GetFrame(int64_t requested_frame)
|
2012-10-08 16:22:18 -05:00
|
|
|
{
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
2013-09-28 22:00:52 -05:00
|
|
|
{
|
|
|
|
|
// Adjust out of bounds frame number
|
|
|
|
|
requested_frame = adjust_frame_number_minimum(requested_frame);
|
2012-10-08 16:22:18 -05:00
|
|
|
|
2016-04-24 15:37:47 -05:00
|
|
|
// Adjust has_video and has_audio overrides
|
|
|
|
|
int enabled_audio = has_audio.GetInt(requested_frame);
|
|
|
|
|
if (enabled_audio == -1 && reader && reader->info.has_audio)
|
|
|
|
|
enabled_audio = 1;
|
|
|
|
|
else if (enabled_audio == -1 && reader && !reader->info.has_audio)
|
|
|
|
|
enabled_audio = 0;
|
|
|
|
|
int enabled_video = has_video.GetInt(requested_frame);
|
|
|
|
|
if (enabled_video == -1 && reader && reader->info.has_video)
|
|
|
|
|
enabled_video = 1;
|
|
|
|
|
else if (enabled_video == -1 && reader && !reader->info.has_audio)
|
|
|
|
|
enabled_video = 0;
|
|
|
|
|
|
2013-09-28 22:00:52 -05:00
|
|
|
// Is a time map detected
|
2017-09-28 16:03:01 -05:00
|
|
|
int64_t new_frame_number = requested_frame;
|
|
|
|
|
int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame));
|
2013-09-28 22:00:52 -05:00
|
|
|
if (time.Values.size() > 1)
|
2017-07-24 15:43:35 -05:00
|
|
|
new_frame_number = time_mapped_number;
|
2012-10-10 15:21:33 -05:00
|
|
|
|
2013-09-28 22:00:52 -05:00
|
|
|
// Now that we have re-mapped what frame number is needed, go and get the frame pointer
|
2017-11-11 17:16:56 -06:00
|
|
|
std::shared_ptr<Frame> original_frame;
|
|
|
|
|
#pragma omp critical (Clip_GetFrame)
|
|
|
|
|
original_frame = GetOrCreateFrame(new_frame_number);
|
2015-03-14 01:36:13 -05:00
|
|
|
|
|
|
|
|
// Create a new frame
|
2017-08-20 17:37:39 -05:00
|
|
|
std::shared_ptr<Frame> frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
|
2017-11-11 17:16:56 -06:00
|
|
|
#pragma omp critical (Clip_GetFrame)
|
|
|
|
|
{
|
|
|
|
|
frame->SampleRate(original_frame->SampleRate());
|
|
|
|
|
frame->ChannelsLayout(original_frame->ChannelsLayout());
|
|
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
|
|
|
|
|
// Copy the image from the odd field
|
2016-04-24 15:37:47 -05:00
|
|
|
if (enabled_video)
|
2017-08-20 17:37:39 -05:00
|
|
|
frame->AddImage(std::shared_ptr<QImage>(new QImage(*original_frame->GetImage())));
|
2015-03-14 01:36:13 -05:00
|
|
|
|
|
|
|
|
// Loop through each channel, add audio
|
2016-04-24 15:37:47 -05:00
|
|
|
if (enabled_audio && reader->info.has_audio)
|
2016-01-09 15:50:53 -06:00
|
|
|
for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
|
|
|
|
|
frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
|
2015-03-14 01:36:13 -05:00
|
|
|
|
2013-09-28 22:00:52 -05:00
|
|
|
// Get time mapped frame number (used to increase speed, change direction, etc...)
|
2017-08-20 17:37:39 -05:00
|
|
|
std::shared_ptr<Frame> new_frame = get_time_mapped_frame(frame, requested_frame);
|
2012-10-10 02:36:53 -05:00
|
|
|
|
2015-03-14 01:36:13 -05:00
|
|
|
// Apply effects to the frame (if any)
|
|
|
|
|
apply_effects(new_frame);
|
|
|
|
|
|
2013-09-28 22:00:52 -05:00
|
|
|
// Return processed 'frame'
|
|
|
|
|
return new_frame;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
// Throw error if reader not initialized
|
|
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
|
2012-10-08 16:22:18 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-04 18:02:46 -05:00
|
|
|
// Get file extension
|
|
|
|
|
string Clip::get_file_extension(string path)
|
|
|
|
|
{
|
|
|
|
|
// return last part of path
|
|
|
|
|
return path.substr(path.find_last_of(".") + 1);
|
|
|
|
|
}
|
2012-10-10 02:36:53 -05:00
|
|
|
|
2012-10-21 05:29:29 -05:00
|
|
|
// Reverse an audio buffer
|
|
|
|
|
void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
|
2012-10-10 02:36:53 -05:00
|
|
|
{
|
2012-10-21 05:29:29 -05:00
|
|
|
int number_of_samples = buffer->getNumSamples();
|
|
|
|
|
int channels = buffer->getNumChannels();
|
|
|
|
|
|
|
|
|
|
// Reverse array (create new buffer to hold the reversed version)
|
|
|
|
|
AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
|
|
|
|
|
reversed->clear();
|
|
|
|
|
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
{
|
|
|
|
|
int n=0;
|
|
|
|
|
for (int s = number_of_samples - 1; s >= 0; s--, n++)
|
2015-02-05 00:11:55 -06:00
|
|
|
reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
|
2012-10-21 05:29:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy the samples back to the original array
|
|
|
|
|
buffer->clear();
|
|
|
|
|
// Loop through channels, and get audio samples
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Get the audio samples for this channel
|
2015-02-05 00:11:55 -06:00
|
|
|
buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
|
|
|
|
delete reversed;
|
|
|
|
|
reversed = NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Adjust the audio and image of a time mapped frame
|
2017-10-26 18:44:35 -05:00
|
|
|
std::shared_ptr<Frame> Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
|
2012-10-21 05:29:29 -05:00
|
|
|
{
|
2013-09-28 22:00:52 -05:00
|
|
|
// Check for valid reader
|
2013-12-07 21:09:55 -06:00
|
|
|
if (!reader)
|
2013-09-28 22:00:52 -05:00
|
|
|
// Throw error if reader not initialized
|
|
|
|
|
throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
|
|
|
|
|
|
2012-10-10 02:36:53 -05:00
|
|
|
// Check for a valid time map curve
|
|
|
|
|
if (time.Values.size() > 1)
|
|
|
|
|
{
|
2017-06-22 15:26:40 -05:00
|
|
|
const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
|
2017-08-20 17:37:39 -05:00
|
|
|
std::shared_ptr<Frame> new_frame;
|
2015-12-28 02:41:32 -06:00
|
|
|
|
2012-10-21 05:29:29 -05:00
|
|
|
// create buffer and resampler
|
|
|
|
|
juce::AudioSampleBuffer *samples = NULL;
|
2012-10-19 01:49:48 -05:00
|
|
|
if (!resampler)
|
|
|
|
|
resampler = new AudioResampler();
|
2012-10-10 02:36:53 -05:00
|
|
|
|
2012-10-19 01:49:48 -05:00
|
|
|
// Get new frame number
|
2017-07-19 16:05:07 -05:00
|
|
|
int new_frame_number = adjust_frame_number_minimum(round(time.GetValue(frame_number)));
|
2012-10-19 01:49:48 -05:00
|
|
|
|
2012-10-21 05:29:29 -05:00
|
|
|
// Create a new frame
|
2015-03-08 21:42:53 -05:00
|
|
|
int samples_in_frame = Frame::GetSamplesPerFrame(new_frame_number, reader->info.fps, reader->info.sample_rate, frame->GetAudioChannelsCount());
|
2017-08-20 17:37:39 -05:00
|
|
|
new_frame = std::make_shared<Frame>(new_frame_number, 1, 1, "#000000", samples_in_frame, frame->GetAudioChannelsCount());
|
2012-10-19 01:49:48 -05:00
|
|
|
|
2012-10-21 05:29:29 -05:00
|
|
|
// Copy the image from the new frame
|
2018-05-25 00:34:29 -05:00
|
|
|
new_frame->AddImage(std::shared_ptr<QImage>(new QImage(*GetOrCreateFrame(new_frame_number)->GetImage())));
|
2012-10-21 05:29:29 -05:00
|
|
|
|
|
|
|
|
// Get delta (difference in previous Y value)
|
|
|
|
|
int delta = int(round(time.GetDelta(frame_number)));
|
|
|
|
|
|
|
|
|
|
// Init audio vars
|
2014-01-28 17:17:38 -06:00
|
|
|
int sample_rate = reader->info.sample_rate;
|
2013-12-07 21:09:55 -06:00
|
|
|
int channels = reader->info.channels;
|
2015-12-28 02:41:32 -06:00
|
|
|
int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Only resample audio if needed
|
|
|
|
|
if (reader->info.has_audio) {
|
|
|
|
|
// Determine if we are speeding up or slowing down
|
|
|
|
|
if (time.GetRepeatFraction(frame_number).den > 1) {
|
|
|
|
|
// SLOWING DOWN AUDIO
|
|
|
|
|
// Resample data, and return new buffer pointer
|
|
|
|
|
AudioSampleBuffer *resampled_buffer = NULL;
|
|
|
|
|
int resampled_buffer_size = 0;
|
2012-10-21 17:51:37 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// SLOW DOWN audio (split audio)
|
|
|
|
|
samples = new juce::AudioSampleBuffer(channels, number_of_samples);
|
2015-12-28 02:41:32 -06:00
|
|
|
samples->clear();
|
|
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Loop through channels, and get audio samples
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Get the audio samples for this channel
|
|
|
|
|
samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
|
|
|
|
|
number_of_samples, 1.0f);
|
2012-10-19 01:49:48 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Reverse the samples (if needed)
|
|
|
|
|
if (!time.IsIncreasing(frame_number))
|
|
|
|
|
reverse_buffer(samples);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Resample audio to be X times slower (where X is the denominator of the repeat fraction)
|
|
|
|
|
resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Resample the data (since it's the 1st slice)
|
|
|
|
|
resampled_buffer = resampler->GetResampledBuffer();
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Get the length of the resampled buffer (if one exists)
|
|
|
|
|
resampled_buffer_size = resampled_buffer->getNumSamples();
|
|
|
|
|
|
|
|
|
|
// Just take the samples we need for the requested frame
|
|
|
|
|
int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
|
|
|
|
|
if (start > 0)
|
|
|
|
|
start -= 1;
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Add new (slower) samples, to the frame object
|
|
|
|
|
new_frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
|
|
|
|
|
number_of_samples, 1.0f);
|
|
|
|
|
|
|
|
|
|
// Clean up
|
|
|
|
|
resampled_buffer = NULL;
|
2012-10-21 05:29:29 -05:00
|
|
|
|
|
|
|
|
}
|
2015-12-31 04:17:54 -06:00
|
|
|
else if (abs(delta) > 1 && abs(delta) < 100) {
|
|
|
|
|
int start = 0;
|
|
|
|
|
if (delta > 0) {
|
|
|
|
|
// SPEED UP (multiple frames of audio), as long as it's not more than X frames
|
|
|
|
|
int total_delta_samples = 0;
|
|
|
|
|
for (int delta_frame = new_frame_number - (delta - 1);
|
|
|
|
|
delta_frame <= new_frame_number; delta_frame++)
|
|
|
|
|
total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
|
|
|
|
|
reader->info.sample_rate,
|
|
|
|
|
reader->info.channels);
|
2015-12-28 02:41:32 -06:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Allocate a new sample buffer for these delta frames
|
|
|
|
|
samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
|
|
|
|
|
samples->clear();
|
|
|
|
|
|
|
|
|
|
// Loop through each frame in this delta
|
|
|
|
|
for (int delta_frame = new_frame_number - (delta - 1);
|
|
|
|
|
delta_frame <= new_frame_number; delta_frame++) {
|
|
|
|
|
// buffer to hold detal samples
|
|
|
|
|
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
|
|
|
|
|
AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
|
|
|
|
|
number_of_delta_samples);
|
|
|
|
|
delta_samples->clear();
|
|
|
|
|
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
|
|
|
|
|
number_of_delta_samples, 1.0f);
|
|
|
|
|
|
|
|
|
|
// Reverse the samples (if needed)
|
|
|
|
|
if (!time.IsIncreasing(frame_number))
|
|
|
|
|
reverse_buffer(delta_samples);
|
|
|
|
|
|
|
|
|
|
// Copy the samples to
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Get the audio samples for this channel
|
|
|
|
|
samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
|
|
|
|
|
number_of_delta_samples, 1.0f);
|
|
|
|
|
|
|
|
|
|
// Clean up
|
|
|
|
|
delete delta_samples;
|
|
|
|
|
delta_samples = NULL;
|
|
|
|
|
|
|
|
|
|
// Increment start position
|
|
|
|
|
start += number_of_delta_samples;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// SPEED UP (multiple frames of audio), as long as it's not more than X frames
|
|
|
|
|
int total_delta_samples = 0;
|
|
|
|
|
for (int delta_frame = new_frame_number - (delta + 1);
|
|
|
|
|
delta_frame >= new_frame_number; delta_frame--)
|
|
|
|
|
total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
|
|
|
|
|
reader->info.sample_rate,
|
|
|
|
|
reader->info.channels);
|
|
|
|
|
|
|
|
|
|
// Allocate a new sample buffer for these delta frames
|
|
|
|
|
samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
|
|
|
|
|
samples->clear();
|
|
|
|
|
|
|
|
|
|
// Loop through each frame in this delta
|
|
|
|
|
for (int delta_frame = new_frame_number - (delta + 1);
|
|
|
|
|
delta_frame >= new_frame_number; delta_frame--) {
|
|
|
|
|
// buffer to hold delta samples
|
|
|
|
|
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
|
|
|
|
|
AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
|
|
|
|
|
number_of_delta_samples);
|
|
|
|
|
delta_samples->clear();
|
|
|
|
|
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
|
|
|
|
|
number_of_delta_samples, 1.0f);
|
|
|
|
|
|
|
|
|
|
// Reverse the samples (if needed)
|
|
|
|
|
if (!time.IsIncreasing(frame_number))
|
|
|
|
|
reverse_buffer(delta_samples);
|
|
|
|
|
|
|
|
|
|
// Copy the samples to
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Get the audio samples for this channel
|
|
|
|
|
samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
|
|
|
|
|
number_of_delta_samples, 1.0f);
|
|
|
|
|
|
|
|
|
|
// Clean up
|
|
|
|
|
delete delta_samples;
|
|
|
|
|
delta_samples = NULL;
|
|
|
|
|
|
|
|
|
|
// Increment start position
|
|
|
|
|
start += number_of_delta_samples;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Resample audio to be X times faster (where X is the delta of the repeat fraction)
|
|
|
|
|
resampler->SetBuffer(samples, float(start) / float(number_of_samples));
|
|
|
|
|
|
|
|
|
|
// Resample data, and return new buffer pointer
|
|
|
|
|
AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
|
|
|
|
|
int resampled_buffer_size = buffer->getNumSamples();
|
|
|
|
|
|
|
|
|
|
// Add the newly resized audio samples to the current frame
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Add new (slower) samples, to the frame object
|
|
|
|
|
new_frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
|
|
|
|
|
|
|
|
|
|
// Clean up
|
|
|
|
|
buffer = NULL;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// Use the samples on this frame (but maybe reverse them if needed)
|
|
|
|
|
samples = new juce::AudioSampleBuffer(channels, number_of_samples);
|
2015-12-28 02:41:32 -06:00
|
|
|
samples->clear();
|
|
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Loop through channels, and get audio samples
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
// Get the audio samples for this channel
|
|
|
|
|
samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// reverse the samples
|
|
|
|
|
if (!time.IsIncreasing(frame_number))
|
|
|
|
|
reverse_buffer(samples);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
// Add reversed samples to the frame object
|
|
|
|
|
for (int channel = 0; channel < channels; channel++)
|
|
|
|
|
new_frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
|
2012-10-21 05:29:29 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
2015-12-31 04:17:54 -06:00
|
|
|
delete samples;
|
|
|
|
|
samples = NULL;
|
2012-10-21 05:29:29 -05:00
|
|
|
}
|
2015-12-28 02:41:32 -06:00
|
|
|
|
|
|
|
|
// Return new time mapped frame
|
|
|
|
|
return new_frame;
|
2012-11-07 17:45:13 -06:00
|
|
|
|
2012-10-21 17:51:37 -05:00
|
|
|
} else
|
|
|
|
|
// Use original frame
|
2012-11-07 17:45:13 -06:00
|
|
|
return frame;
|
2012-10-10 02:36:53 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-10 15:21:33 -05:00
|
|
|
// Adjust frame number minimum value
|
2017-09-28 16:03:01 -05:00
|
|
|
int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
|
2012-10-10 15:21:33 -05:00
|
|
|
{
|
|
|
|
|
// Never return a frame number 0 or below
|
|
|
|
|
if (frame_number < 1)
|
|
|
|
|
return 1;
|
|
|
|
|
else
|
|
|
|
|
return frame_number;
|
|
|
|
|
|
|
|
|
|
}
|
2012-10-21 05:29:29 -05:00
|
|
|
|
2015-12-28 02:41:32 -06:00
|
|
|
// Get or generate a blank frame
|
2017-09-28 16:03:01 -05:00
|
|
|
std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
|
2015-12-28 02:41:32 -06:00
|
|
|
{
|
2017-08-20 17:37:39 -05:00
|
|
|
std::shared_ptr<Frame> new_frame;
|
2015-12-28 02:41:32 -06:00
|
|
|
|
|
|
|
|
// Init some basic properties about this frame
|
|
|
|
|
int samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
|
|
|
|
|
|
|
|
|
|
try {
|
2016-04-24 15:37:47 -05:00
|
|
|
// Debug output
|
|
|
|
|
ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2016-09-17 17:14:27 -05:00
|
|
|
// Determine the max size of this clips source image (based on the timeline's size, the scaling mode,
|
|
|
|
|
// and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
|
2018-01-06 02:22:05 -06:00
|
|
|
// without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
|
2017-07-27 02:25:20 -05:00
|
|
|
// method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
|
|
|
|
|
// the future.
|
2016-09-17 17:14:27 -05:00
|
|
|
if (scale == SCALE_FIT || scale == SCALE_STRETCH) {
|
|
|
|
|
// Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
|
|
|
|
|
float max_scale_x = scale_x.GetMaxPoint().co.Y;
|
|
|
|
|
float max_scale_y = scale_y.GetMaxPoint().co.Y;
|
2017-07-27 02:25:20 -05:00
|
|
|
reader->SetMaxSize(max(float(max_width), max_width * max_scale_x), max(float(max_height), max_height * max_scale_y));
|
2016-09-17 17:14:27 -05:00
|
|
|
|
|
|
|
|
} else if (scale == SCALE_CROP) {
|
|
|
|
|
// Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
|
|
|
|
|
float max_scale_x = scale_x.GetMaxPoint().co.Y;
|
|
|
|
|
float max_scale_y = scale_y.GetMaxPoint().co.Y;
|
|
|
|
|
QSize width_size(max_width * max_scale_x, round(max_width / (float(reader->info.width) / float(reader->info.height))));
|
|
|
|
|
QSize height_size(round(max_height / (float(reader->info.height) / float(reader->info.width))), max_height * max_scale_y);
|
|
|
|
|
|
|
|
|
|
// respect aspect ratio
|
|
|
|
|
if (width_size.width() >= max_width && width_size.height() >= max_height)
|
2017-07-27 02:25:20 -05:00
|
|
|
reader->SetMaxSize(max(max_width, width_size.width()), max(max_height, width_size.height()));
|
2016-09-17 17:14:27 -05:00
|
|
|
else
|
2017-07-27 02:25:20 -05:00
|
|
|
reader->SetMaxSize(max(max_width, height_size.width()), max(max_height, height_size.height()));
|
2016-09-17 17:14:27 -05:00
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
// No scaling, use original image size (slower)
|
2016-09-14 04:11:12 -05:00
|
|
|
reader->SetMaxSize(0, 0);
|
2016-09-17 17:14:27 -05:00
|
|
|
}
|
2016-09-14 04:11:12 -05:00
|
|
|
|
2015-12-28 02:41:32 -06:00
|
|
|
// Attempt to get a frame (but this could fail if a reader has just been closed)
|
|
|
|
|
new_frame = reader->GetFrame(number);
|
|
|
|
|
|
|
|
|
|
// Return real frame
|
2016-09-19 22:27:36 -05:00
|
|
|
if (new_frame)
|
|
|
|
|
return new_frame;
|
2015-12-28 02:41:32 -06:00
|
|
|
|
|
|
|
|
} catch (const ReaderClosed & e) {
|
|
|
|
|
// ...
|
|
|
|
|
} catch (const TooManySeeks & e) {
|
|
|
|
|
// ...
|
|
|
|
|
} catch (const OutOfBoundsFrame & e) {
|
|
|
|
|
// ...
|
|
|
|
|
}
|
|
|
|
|
|
2016-04-24 15:37:47 -05:00
|
|
|
// Debug output
|
|
|
|
|
ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2015-12-28 02:41:32 -06:00
|
|
|
// Create blank frame
|
2017-08-20 17:37:39 -05:00
|
|
|
new_frame = std::make_shared<Frame>(number, reader->info.width, reader->info.height, "#000000", samples_in_frame, reader->info.channels);
|
2015-12-28 02:41:32 -06:00
|
|
|
new_frame->SampleRate(reader->info.sample_rate);
|
|
|
|
|
new_frame->ChannelsLayout(reader->info.channel_layout);
|
2018-09-11 00:40:31 -05:00
|
|
|
new_frame->AddAudioSilence(samples_in_frame);
|
2015-12-28 02:41:32 -06:00
|
|
|
return new_frame;
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
// Generate JSON string of this object
|
|
|
|
|
string Clip::Json() {
|
|
|
|
|
|
|
|
|
|
// Return formatted string
|
|
|
|
|
return JsonValue().toStyledString();
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-09 22:41:42 -06:00
|
|
|
// Get all properties for a specific frame
|
2017-09-28 16:03:01 -05:00
|
|
|
string Clip::PropertiesJSON(int64_t requested_frame) {
|
2015-02-09 22:41:42 -06:00
|
|
|
|
|
|
|
|
// Generate JSON properties list
|
|
|
|
|
Json::Value root;
|
2016-10-19 02:19:07 -05:00
|
|
|
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
|
|
|
|
|
root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
|
|
|
|
|
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
|
|
|
|
|
root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
|
|
|
|
|
root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
|
2017-03-15 02:06:53 -05:00
|
|
|
root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
|
2018-06-27 01:35:38 -05:00
|
|
|
root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
|
2016-10-19 02:19:07 -05:00
|
|
|
root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
|
2015-02-09 22:41:42 -06:00
|
|
|
|
2015-10-02 18:22:10 -05:00
|
|
|
// Add gravity choices (dropdown style)
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
|
|
|
|
|
root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
|
|
|
|
|
|
|
|
|
|
// Add scale choices (dropdown style)
|
|
|
|
|
root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
|
|
|
|
|
root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
|
|
|
|
|
root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
|
|
|
|
|
root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
|
|
|
|
|
|
2017-03-15 02:06:53 -05:00
|
|
|
// Add frame number display choices (dropdown style)
|
|
|
|
|
root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
|
|
|
|
|
root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
|
|
|
|
|
root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
|
|
|
|
|
root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
|
|
|
|
|
|
2018-06-27 01:35:38 -05:00
|
|
|
// Add volume mixing choices (dropdown style)
|
|
|
|
|
root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
|
|
|
|
|
root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
|
|
|
|
|
root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
|
|
|
|
|
|
2015-10-02 18:22:10 -05:00
|
|
|
// Add waveform choices (dropdown style)
|
|
|
|
|
root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
|
|
|
|
|
root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
|
|
|
|
|
|
2015-02-09 22:41:42 -06:00
|
|
|
// Keyframes
|
2016-10-19 02:19:07 -05:00
|
|
|
root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
|
|
|
|
|
root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
|
|
|
|
|
root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
|
|
|
|
|
root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
|
|
|
|
|
root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
|
|
|
|
|
root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
|
|
|
|
|
root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
|
|
|
|
|
root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
|
2015-02-09 22:41:42 -06:00
|
|
|
|
2016-10-19 02:19:07 -05:00
|
|
|
root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
|
|
|
|
|
root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
|
|
|
|
|
root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
|
|
|
|
|
root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
|
2015-11-25 23:54:10 -06:00
|
|
|
|
|
|
|
|
|
2015-02-09 22:41:42 -06:00
|
|
|
// Return formatted string
|
|
|
|
|
return root.toStyledString();
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-06 00:40:26 -06:00
|
|
|
// Generate Json::JsonValue for this object
|
|
|
|
|
Json::Value Clip::JsonValue() {
|
|
|
|
|
|
2013-12-03 00:13:25 -06:00
|
|
|
// Create root json object
|
2013-12-06 00:40:26 -06:00
|
|
|
Json::Value root = ClipBase::JsonValue(); // get parent properties
|
2013-12-03 00:13:25 -06:00
|
|
|
root["gravity"] = gravity;
|
|
|
|
|
root["scale"] = scale;
|
|
|
|
|
root["anchor"] = anchor;
|
2017-03-15 02:06:53 -05:00
|
|
|
root["display"] = display;
|
2018-06-27 01:35:38 -05:00
|
|
|
root["mixing"] = mixing;
|
2013-12-03 00:13:25 -06:00
|
|
|
root["waveform"] = waveform;
|
2014-01-04 19:04:21 -06:00
|
|
|
root["scale_x"] = scale_x.JsonValue();
|
|
|
|
|
root["scale_y"] = scale_y.JsonValue();
|
|
|
|
|
root["location_x"] = location_x.JsonValue();
|
|
|
|
|
root["location_y"] = location_y.JsonValue();
|
|
|
|
|
root["alpha"] = alpha.JsonValue();
|
|
|
|
|
root["rotation"] = rotation.JsonValue();
|
|
|
|
|
root["time"] = time.JsonValue();
|
|
|
|
|
root["volume"] = volume.JsonValue();
|
|
|
|
|
root["wave_color"] = wave_color.JsonValue();
|
|
|
|
|
root["crop_width"] = crop_width.JsonValue();
|
|
|
|
|
root["crop_height"] = crop_height.JsonValue();
|
|
|
|
|
root["crop_x"] = crop_x.JsonValue();
|
|
|
|
|
root["crop_y"] = crop_y.JsonValue();
|
|
|
|
|
root["shear_x"] = shear_x.JsonValue();
|
|
|
|
|
root["shear_y"] = shear_y.JsonValue();
|
2016-04-24 15:37:47 -05:00
|
|
|
root["channel_filter"] = channel_filter.JsonValue();
|
|
|
|
|
root["channel_mapping"] = channel_mapping.JsonValue();
|
|
|
|
|
root["has_audio"] = has_audio.JsonValue();
|
|
|
|
|
root["has_video"] = has_video.JsonValue();
|
2014-01-04 19:04:21 -06:00
|
|
|
root["perspective_c1_x"] = perspective_c1_x.JsonValue();
|
|
|
|
|
root["perspective_c1_y"] = perspective_c1_y.JsonValue();
|
|
|
|
|
root["perspective_c2_x"] = perspective_c2_x.JsonValue();
|
|
|
|
|
root["perspective_c2_y"] = perspective_c2_y.JsonValue();
|
|
|
|
|
root["perspective_c3_x"] = perspective_c3_x.JsonValue();
|
|
|
|
|
root["perspective_c3_y"] = perspective_c3_y.JsonValue();
|
|
|
|
|
root["perspective_c4_x"] = perspective_c4_x.JsonValue();
|
|
|
|
|
root["perspective_c4_y"] = perspective_c4_y.JsonValue();
|
|
|
|
|
|
2015-03-14 01:36:13 -05:00
|
|
|
// Add array of effects
|
|
|
|
|
root["effects"] = Json::Value(Json::arrayValue);
|
|
|
|
|
|
|
|
|
|
// loop through effects
|
|
|
|
|
list<EffectBase*>::iterator effect_itr;
|
|
|
|
|
for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
|
|
|
|
|
{
|
|
|
|
|
// Get clip object from the iterator
|
|
|
|
|
EffectBase *existing_effect = (*effect_itr);
|
|
|
|
|
root["effects"].append(existing_effect->JsonValue());
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
|
|
|
|
root["reader"] = reader->JsonValue();
|
2013-12-03 00:13:25 -06:00
|
|
|
|
2013-12-06 00:40:26 -06:00
|
|
|
// return JsonValue
|
|
|
|
|
return root;
|
2013-12-03 00:13:25 -06:00
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
// Load JSON string into this object
|
2017-10-26 18:44:35 -05:00
|
|
|
void Clip::SetJson(string value) {
|
2013-12-07 21:09:55 -06:00
|
|
|
|
|
|
|
|
// Parse JSON string into JSON objects
|
|
|
|
|
Json::Value root;
|
|
|
|
|
Json::Reader reader;
|
|
|
|
|
bool success = reader.parse( value, root );
|
|
|
|
|
if (!success)
|
|
|
|
|
// Raise exception
|
|
|
|
|
throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
|
|
|
|
|
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
// Set all values that match
|
|
|
|
|
SetJsonValue(root);
|
|
|
|
|
}
|
|
|
|
|
catch (exception e)
|
|
|
|
|
{
|
|
|
|
|
// Error parsing JSON (or missing keys)
|
|
|
|
|
throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-06 00:40:26 -06:00
|
|
|
// Load Json::JsonValue into this object
|
2013-12-07 21:09:55 -06:00
|
|
|
void Clip::SetJsonValue(Json::Value root) {
|
2013-12-06 00:40:26 -06:00
|
|
|
|
|
|
|
|
// Set parent data
|
2013-12-07 21:09:55 -06:00
|
|
|
ClipBase::SetJsonValue(root);
|
2013-12-06 00:40:26 -06:00
|
|
|
|
|
|
|
|
// Set data from Json (if key is found)
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["gravity"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
gravity = (GravityType) root["gravity"].asInt();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["scale"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
scale = (ScaleType) root["scale"].asInt();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["anchor"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
anchor = (AnchorType) root["anchor"].asInt();
|
2017-03-15 02:06:53 -05:00
|
|
|
if (!root["display"].isNull())
|
|
|
|
|
display = (FrameDisplayType) root["display"].asInt();
|
2018-06-27 01:35:38 -05:00
|
|
|
if (!root["mixing"].isNull())
|
|
|
|
|
mixing = (VolumeMixType) root["mixing"].asInt();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["waveform"].isNull())
|
2013-12-06 00:40:26 -06:00
|
|
|
waveform = root["waveform"].asBool();
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["scale_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
scale_x.SetJsonValue(root["scale_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["scale_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
scale_y.SetJsonValue(root["scale_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["location_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
location_x.SetJsonValue(root["location_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["location_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
location_y.SetJsonValue(root["location_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["alpha"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
alpha.SetJsonValue(root["alpha"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["rotation"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
rotation.SetJsonValue(root["rotation"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["time"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
time.SetJsonValue(root["time"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["volume"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
volume.SetJsonValue(root["volume"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["wave_color"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
wave_color.SetJsonValue(root["wave_color"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["crop_width"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
crop_width.SetJsonValue(root["crop_width"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["crop_height"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
crop_height.SetJsonValue(root["crop_height"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["crop_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
crop_x.SetJsonValue(root["crop_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["crop_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
crop_y.SetJsonValue(root["crop_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["shear_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
shear_x.SetJsonValue(root["shear_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["shear_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
shear_y.SetJsonValue(root["shear_y"]);
|
2016-04-24 15:37:47 -05:00
|
|
|
if (!root["channel_filter"].isNull())
|
|
|
|
|
channel_filter.SetJsonValue(root["channel_filter"]);
|
|
|
|
|
if (!root["channel_mapping"].isNull())
|
|
|
|
|
channel_mapping.SetJsonValue(root["channel_mapping"]);
|
|
|
|
|
if (!root["has_audio"].isNull())
|
|
|
|
|
has_audio.SetJsonValue(root["has_audio"]);
|
|
|
|
|
if (!root["has_video"].isNull())
|
|
|
|
|
has_video.SetJsonValue(root["has_video"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c1_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c1_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c2_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c2_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c3_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c3_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c4_x"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["perspective_c4_y"].isNull())
|
2014-01-04 19:04:21 -06:00
|
|
|
perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
|
2015-03-14 01:36:13 -05:00
|
|
|
if (!root["effects"].isNull()) {
|
2015-08-06 20:01:34 -05:00
|
|
|
|
2015-03-14 01:36:13 -05:00
|
|
|
// Clear existing effects
|
|
|
|
|
effects.clear();
|
|
|
|
|
|
|
|
|
|
// loop through effects
|
|
|
|
|
for (int x = 0; x < root["effects"].size(); x++) {
|
|
|
|
|
// Get each effect
|
|
|
|
|
Json::Value existing_effect = root["effects"][x];
|
|
|
|
|
|
|
|
|
|
// Create Effect
|
|
|
|
|
EffectBase *e = NULL;
|
|
|
|
|
|
2016-08-16 22:40:51 -05:00
|
|
|
if (!existing_effect["type"].isNull()) {
|
|
|
|
|
// Create instance of effect
|
2018-09-11 00:40:31 -05:00
|
|
|
if (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) {
|
2015-11-09 00:12:21 -06:00
|
|
|
|
2018-09-11 00:40:31 -05:00
|
|
|
// Load Json into Effect
|
|
|
|
|
e->SetJsonValue(existing_effect);
|
2015-08-16 22:58:07 -05:00
|
|
|
|
2018-09-11 00:40:31 -05:00
|
|
|
// Add Effect to Timeline
|
|
|
|
|
AddEffect(e);
|
|
|
|
|
}
|
2016-08-16 22:40:51 -05:00
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
}
|
|
|
|
|
}
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["reader"].isNull()) // does Json contain a reader?
|
2013-12-07 21:09:55 -06:00
|
|
|
{
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
|
2013-12-07 21:09:55 -06:00
|
|
|
{
|
|
|
|
|
// Close previous reader (if any)
|
2013-12-18 21:55:43 -06:00
|
|
|
bool already_open = false;
|
2013-12-07 21:09:55 -06:00
|
|
|
if (reader)
|
2013-12-18 21:55:43 -06:00
|
|
|
{
|
|
|
|
|
// Track if reader was open
|
|
|
|
|
already_open = reader->IsOpen();
|
|
|
|
|
|
|
|
|
|
// Close and delete existing reader (if any)
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->Close();
|
2013-12-18 21:55:43 -06:00
|
|
|
delete reader;
|
|
|
|
|
reader = NULL;
|
|
|
|
|
}
|
2013-12-07 21:09:55 -06:00
|
|
|
|
|
|
|
|
// Create new reader (and load properties)
|
|
|
|
|
string type = root["reader"]["type"].asString();
|
|
|
|
|
|
|
|
|
|
if (type == "FFmpegReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
2016-09-16 17:43:26 -05:00
|
|
|
reader = new FFmpegReader(root["reader"]["path"].asString(), false);
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
|
|
|
|
|
2015-06-01 00:20:14 -07:00
|
|
|
} else if (type == "QtImageReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
2016-09-16 17:43:26 -05:00
|
|
|
reader = new QtImageReader(root["reader"]["path"].asString(), false);
|
2015-06-01 00:20:14 -07:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
|
|
|
|
|
2016-02-23 00:27:03 -06:00
|
|
|
#ifdef USE_IMAGEMAGICK
|
2013-12-07 21:09:55 -06:00
|
|
|
} else if (type == "ImageReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
2016-09-16 17:43:26 -05:00
|
|
|
reader = new ImageReader(root["reader"]["path"].asString(), false);
|
2013-12-07 21:09:55 -06:00
|
|
|
reader->SetJsonValue(root["reader"]);
|
|
|
|
|
|
|
|
|
|
} else if (type == "TextReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
|
|
|
|
reader = new TextReader();
|
|
|
|
|
reader->SetJsonValue(root["reader"]);
|
2016-02-23 00:27:03 -06:00
|
|
|
#endif
|
2013-12-18 21:55:43 -06:00
|
|
|
|
|
|
|
|
} else if (type == "ChunkReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
|
|
|
|
reader = new ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
|
|
|
|
|
reader->SetJsonValue(root["reader"]);
|
2014-01-05 22:37:11 -06:00
|
|
|
|
|
|
|
|
} else if (type == "DummyReader") {
|
|
|
|
|
|
|
|
|
|
// Create new reader
|
|
|
|
|
reader = new DummyReader();
|
|
|
|
|
reader->SetJsonValue(root["reader"]);
|
2013-12-07 21:09:55 -06:00
|
|
|
}
|
|
|
|
|
|
2015-06-01 00:20:14 -07:00
|
|
|
// mark as managed reader
|
|
|
|
|
if (reader)
|
|
|
|
|
manage_reader = true;
|
|
|
|
|
|
2013-12-18 21:55:43 -06:00
|
|
|
// Re-Open reader (if needed)
|
|
|
|
|
if (already_open)
|
|
|
|
|
reader->Open();
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
}
|
|
|
|
|
}
|
2013-12-06 00:40:26 -06:00
|
|
|
}
|
2015-03-14 01:36:13 -05:00
|
|
|
|
|
|
|
|
// Sort effects by order
|
|
|
|
|
void Clip::sort_effects()
|
|
|
|
|
{
|
|
|
|
|
// sort clips
|
|
|
|
|
effects.sort(CompareClipEffects());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Add an effect to the clip
|
|
|
|
|
void Clip::AddEffect(EffectBase* effect)
|
|
|
|
|
{
|
|
|
|
|
// Add effect to list
|
|
|
|
|
effects.push_back(effect);
|
|
|
|
|
|
|
|
|
|
// Sort effects
|
|
|
|
|
sort_effects();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remove an effect from the clip
|
|
|
|
|
void Clip::RemoveEffect(EffectBase* effect)
|
|
|
|
|
{
|
|
|
|
|
effects.remove(effect);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Apply effects to the source frame (if any)
|
2017-08-20 17:37:39 -05:00
|
|
|
std::shared_ptr<Frame> Clip::apply_effects(std::shared_ptr<Frame> frame)
|
2015-03-14 01:36:13 -05:00
|
|
|
{
|
|
|
|
|
// Find Effects at this position and layer
|
|
|
|
|
list<EffectBase*>::iterator effect_itr;
|
|
|
|
|
for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
|
|
|
|
|
{
|
|
|
|
|
// Get clip object from the iterator
|
|
|
|
|
EffectBase *effect = (*effect_itr);
|
|
|
|
|
|
|
|
|
|
// Apply the effect to this frame
|
|
|
|
|
frame = effect->GetFrame(frame, frame->number);
|
|
|
|
|
|
|
|
|
|
} // end effect loop
|
|
|
|
|
|
|
|
|
|
// Return modified frame
|
|
|
|
|
return frame;
|
|
|
|
|
}
|