diff --git a/src/Clip.cpp b/src/Clip.cpp
index 21c3d4b1..e6ae5563 100644
--- a/src/Clip.cpp
+++ b/src/Clip.cpp
@@ -440,21 +440,18 @@ std::shared_ptr Clip::GetFrame(std::shared_ptr backgroun
// Apply waveform image (if any)
apply_waveform(frame, background_frame);
- // Apply local effects to the frame (if any)
- apply_effects(frame);
+ // Apply effects BEFORE applying keyframes (if any local or global effects are used)
+ apply_effects(frame, background_frame, options, true);
- // Apply global timeline effects (i.e. transitions & masks... if any)
- if (timeline != NULL && options != NULL) {
- if (options->is_top_clip) {
- // Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
- Timeline* timeline_instance = static_cast(timeline);
- frame = timeline_instance->apply_effects(frame, background_frame->number, Layer());
- }
- }
-
- // Apply keyframe / transforms
+ // Apply keyframe / transforms to current clip image
apply_keyframes(frame, background_frame);
+ // Apply effects AFTER applying keyframes (if any local or global effects are used)
+ apply_effects(frame, background_frame, options, false);
+
+ // Apply background canvas (i.e. flatten this image onto previous layer image)
+ apply_background(frame, background_frame);
+
// Add final frame to cache
final_cache.Add(frame);
@@ -1202,16 +1199,41 @@ void Clip::RemoveEffect(EffectBase* effect)
final_cache.Clear();
}
+// Apply background image to the current clip image (i.e. flatten this image onto previous layer)
+void Clip::apply_background(std::shared_ptr frame, std::shared_ptr background_frame) {
+ // Add background canvas
+ std::shared_ptr background_canvas = background_frame->GetImage();
+ QPainter painter(background_canvas.get());
+ painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
+
+ // Composite a new layer onto the image
+ painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
+ painter.drawImage(0, 0, *frame->GetImage());
+ painter.end();
+
+ // Add new QImage to frame
+ frame->AddImage(background_canvas);
+}
+
// Apply effects to the source frame (if any)
-void Clip::apply_effects(std::shared_ptr frame)
+void Clip::apply_effects(std::shared_ptr frame, std::shared_ptr background_frame, TimelineInfoStruct* options, bool before_keyframes)
{
- // Find Effects at this position and layer
for (auto effect : effects)
{
// Apply the effect to this frame
- frame = effect->GetFrame(frame, frame->number);
+ if (effect->info.apply_before_clip && before_keyframes) {
+ effect->GetFrame(frame, frame->number);
+ } else if (!effect->info.apply_before_clip && !before_keyframes) {
+ effect->GetFrame(frame, frame->number);
+ }
+ }
- } // end effect loop
+ if (timeline != NULL && options != NULL) {
+ // Apply global timeline effects (i.e. transitions & masks... if any)
+ Timeline* timeline_instance = static_cast(timeline);
+ options->is_before_clip_keyframes = before_keyframes;
+ timeline_instance->apply_effects(frame, background_frame->number, Layer(), options);
+ }
}
// Compare 2 floating point numbers for equality
@@ -1228,20 +1250,16 @@ void Clip::apply_keyframes(std::shared_ptr frame, std::shared_ptr
return;
}
- // Get image from clip
+ // Get image from clip, and create transparent background image
std::shared_ptr source_image = frame->GetImage();
- std::shared_ptr background_canvas = background_frame->GetImage();
+ std::shared_ptr background_canvas = std::make_shared(background_frame->GetImage()->width(),
+ background_frame->GetImage()->height(),
+ QImage::Format_RGBA8888_Premultiplied);
+ background_canvas->fill(QColor(Qt::transparent));
// Get transform from clip's keyframes
QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod(
- "Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
- "frame->number", frame->number,
- "background_canvas->width()", background_canvas->width(),
- "background_canvas->height()", background_canvas->height());
-
// Load timeline's new frame image into a QPainter
QPainter painter(background_canvas.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
diff --git a/src/Clip.h b/src/Clip.h
index aef80b11..72128e07 100644
--- a/src/Clip.h
+++ b/src/Clip.h
@@ -127,8 +127,11 @@ namespace openshot {
/// Adjust frame number minimum value
int64_t adjust_frame_number_minimum(int64_t frame_number);
+ /// Apply background image to the current clip image (i.e. flatten this image onto previous layer)
+ void apply_background(std::shared_ptr frame, std::shared_ptr background_frame);
+
/// Apply effects to the source frame (if any)
- void apply_effects(std::shared_ptr frame);
+ void apply_effects(std::shared_ptr frame, std::shared_ptr background_frame, TimelineInfoStruct* options, bool before_keyframes);
/// Apply keyframes to an openshot::Frame and use an existing background frame (if any)
void apply_keyframes(std::shared_ptr frame, std::shared_ptr background_frame);
diff --git a/src/EffectBase.cpp b/src/EffectBase.cpp
index b3f8b03e..d833b8cb 100644
--- a/src/EffectBase.cpp
+++ b/src/EffectBase.cpp
@@ -30,7 +30,6 @@ void EffectBase::InitEffectInfo()
End(0.0);
Order(0);
ParentClip(NULL);
-
parentEffect = NULL;
info.has_video = false;
@@ -39,6 +38,7 @@ void EffectBase::InitEffectInfo()
info.name = "";
info.description = "";
info.parent_effect_id = "";
+ info.apply_before_clip = true;
}
// Display file information
@@ -51,6 +51,8 @@ void EffectBase::DisplayInfo(std::ostream* out) {
*out << "--> Description: " << info.description << std::endl;
*out << "--> Has Video: " << info.has_video << std::endl;
*out << "--> Has Audio: " << info.has_audio << std::endl;
+ *out << "--> Apply Before Clip Keyframes: " << info.apply_before_clip << std::endl;
+ *out << "--> Order: " << order << std::endl;
*out << "----------------------------" << std::endl;
}
@@ -85,6 +87,7 @@ Json::Value EffectBase::JsonValue() const {
root["has_video"] = info.has_video;
root["has_audio"] = info.has_audio;
root["has_tracked_object"] = info.has_tracked_object;
+ root["apply_before_clip"] = info.apply_before_clip;
root["order"] = Order();
// return JsonValue
@@ -145,6 +148,9 @@ void EffectBase::SetJsonValue(const Json::Value root) {
if (!my_root["order"].isNull())
Order(my_root["order"].asInt());
+ if (!my_root["apply_before_clip"].isNull())
+ info.apply_before_clip = my_root["apply_before_clip"].asBool();
+
if (!my_root["parent_effect_id"].isNull()){
info.parent_effect_id = my_root["parent_effect_id"].asString();
if (info.parent_effect_id.size() > 0 && info.parent_effect_id != "" && parentEffect == NULL)
@@ -169,6 +175,28 @@ Json::Value EffectBase::JsonInfo() const {
return root;
}
+// Get all properties for a specific frame
+Json::Value EffectBase::BasePropertiesJSON(int64_t requested_frame) const {
+ // Generate JSON properties list
+ Json::Value root;
+ root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
+ root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
+ root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
+ root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
+ root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
+ root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
+
+ // Add replace_image choices (dropdown style)
+ root["apply_before_clip"] = add_property_json("Apply Before Clip Keyframes", info.apply_before_clip, "int", "", NULL, 0, 1, false, requested_frame);
+ root["apply_before_clip"]["choices"].append(add_property_choice_json("Yes", true, info.apply_before_clip));
+ root["apply_before_clip"]["choices"].append(add_property_choice_json("No", false, info.apply_before_clip));
+
+ // Set the parent effect which properties this effect will inherit
+ root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
+
+ return root;
+}
+
/// Parent clip object of this reader (which can be unparented and NULL)
openshot::ClipBase* EffectBase::ParentClip() {
return clip;
diff --git a/src/EffectBase.h b/src/EffectBase.h
index bd217faf..fb39d62b 100644
--- a/src/EffectBase.h
+++ b/src/EffectBase.h
@@ -40,6 +40,7 @@ namespace openshot
bool has_video; ///< Determines if this effect manipulates the image of a frame
bool has_audio; ///< Determines if this effect manipulates the audio of a frame
bool has_tracked_object; ///< Determines if this effect track objects through the clip
+ bool apply_before_clip; ///< Apply effect before we evaluate the clip's keyframes
};
/**
@@ -58,7 +59,6 @@ namespace openshot
openshot::ClipBase* clip; ///< Pointer to the parent clip instance (if any)
public:
-
/// Parent effect (which properties will set this effect properties)
EffectBase* parentEffect;
@@ -106,7 +106,11 @@ namespace openshot
return;
};
- Json::Value JsonInfo() const; ///< Generate JSON object of meta data / info
+ /// Generate JSON object of meta data / info
+ Json::Value JsonInfo() const;
+
+ /// Generate JSON object of base properties (recommended to be used by all effects)
+ Json::Value BasePropertiesJSON(int64_t requested_frame) const;
/// Get the order that this effect should be executed.
int Order() const { return order; }
diff --git a/src/Timeline.cpp b/src/Timeline.cpp
index ee487b9a..de51a563 100644
--- a/src/Timeline.cpp
+++ b/src/Timeline.cpp
@@ -523,7 +523,7 @@ double Timeline::calculate_time(int64_t number, Fraction rate)
}
// Apply effects to the source frame (if any)
-std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer)
+std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct* options)
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod(
@@ -541,14 +541,6 @@ std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, int
bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod(
- "Timeline::apply_effects (Does effect intersect)",
- "effect->Position()", effect->Position(),
- "does_effect_intersect", does_effect_intersect,
- "timeline_frame_number", timeline_frame_number,
- "layer", layer);
-
// Clip is visible
if (does_effect_intersect)
{
@@ -556,6 +548,12 @@ std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, int
long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
+ if (!options->is_top_clip)
+ continue; // skip effect, if overlapped/covered by another clip on same layer
+
+ if (options->is_before_clip_keyframes != effect->info.apply_before_clip)
+ continue; // skip effect, if this filter does not match
+
// Debug output
ZmqLogger::Instance()->AppendDebugMethod(
"Timeline::apply_effects (Process Effect)",
@@ -615,6 +613,7 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in
// Create timeline options (with details about this current frame request)
TimelineInfoStruct* options = new TimelineInfoStruct();
options->is_top_clip = is_top_clip;
+ options->is_before_clip_keyframes = true;
// Get the clip's frame, composited on top of the current timeline frame
std::shared_ptr source_frame;
diff --git a/src/Timeline.h b/src/Timeline.h
index d71643c7..3d16cfc6 100644
--- a/src/Timeline.h
+++ b/src/Timeline.h
@@ -68,15 +68,13 @@ namespace openshot {
/// the Clip with the highest end-frame number using std::max_element
struct CompareClipEndFrames {
bool operator()(const openshot::Clip* lhs, const openshot::Clip* rhs) {
- return (lhs->Position() + lhs->Duration())
- <= (rhs->Position() + rhs->Duration());
+ return (lhs->Position() + lhs->Duration()) <= (rhs->Position() + rhs->Duration());
}};
/// Like CompareClipEndFrames, but for effects
struct CompareEffectEndFrames {
bool operator()(const openshot::EffectBase* lhs, const openshot::EffectBase* rhs) {
- return (lhs->Position() + lhs->Duration())
- <= (rhs->Position() + rhs->Duration());
+ return (lhs->Position() + lhs->Duration()) <= (rhs->Position() + rhs->Duration());
}};
/**
@@ -231,7 +229,7 @@ namespace openshot {
/// @param convert_absolute_paths Should all paths be converted to absolute paths (relative to the location of projectPath)
Timeline(const std::string& projectPath, bool convert_absolute_paths);
- virtual ~Timeline();
+ virtual ~Timeline();
/// Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
void AddTrackedObject(std::shared_ptr trackedObject);
@@ -240,9 +238,9 @@ namespace openshot {
/// Return the ID's of the tracked objects as a list of strings
std::list GetTrackedObjectsIds() const;
/// Return the trackedObject's properties as a JSON string
- #ifdef USE_OPENCV
+ #ifdef USE_OPENCV
std::string GetTrackedObjectValues(std::string id, int64_t frame_number) const;
- #endif
+ #endif
/// @brief Add an openshot::Clip to the timeline
/// @param clip Add an openshot::Clip to the timeline. A clip can contain any type of Reader.
@@ -252,8 +250,8 @@ namespace openshot {
/// @param effect Add an effect to the timeline. An effect can modify the audio or video of an openshot::Frame.
void AddEffect(openshot::EffectBase* effect);
- /// Apply global/timeline effects to the source frame (if any)
- std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer);
+ /// Apply global/timeline effects to the source frame (if any)
+ std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct* options);
/// Apply the timeline's framerate and samplerate to all clips
void ApplyMapperToClips();
@@ -266,7 +264,7 @@ namespace openshot {
/// Clear all clips, effects, and frame mappers from timeline (and free memory)
void Clear();
-
+
/// Clear all cache for this timeline instance, including all clips' cache
/// @param deep If True, clear all FrameMappers and nested Readers (QtImageReader, FFmpegReader, etc...)
void ClearAllCache(bool deep=false);
diff --git a/src/TimelineBase.h b/src/TimelineBase.h
index d3d3d067..46194c50 100644
--- a/src/TimelineBase.h
+++ b/src/TimelineBase.h
@@ -18,21 +18,22 @@
namespace openshot {
- // Forward decl
- class Clip;
+ // Forward decl
+ class Clip;
- /**
- * @brief This struct contains info about the current Timeline clip instance
- *
- * When the Timeline requests an openshot::Frame instance from a Clip, it passes
- * this struct along, with some additional details from the Timeline, such as if this clip is
- * above or below overlapping clips, etc... This info can help determine if a Clip should apply
- * global effects from the Timeline, such as a global Transition/Mask effect.
- */
- struct TimelineInfoStruct
- {
- bool is_top_clip; ///< Is clip on top (if overlapping another clip)
- };
+ /**
+ * @brief This struct contains info about the current Timeline clip instance
+ *
+ * When the Timeline requests an openshot::Frame instance from a Clip, it passes
+ * this struct along, with some additional details from the Timeline, such as if this clip is
+ * above or below overlapping clips, etc... This info can help determine if a Clip should apply
+ * global effects from the Timeline, such as a global Transition/Mask effect.
+ */
+ struct TimelineInfoStruct
+ {
+ bool is_top_clip; ///< Is clip on top (if overlapping another clip)
+ bool is_before_clip_keyframes; ///< Is this before clip keyframes are applied
+ };
/**
* @brief This class represents a timeline (used for building generic timeline implementations)
diff --git a/src/audio_effects/Compressor.cpp b/src/audio_effects/Compressor.cpp
index 07043482..83cf03aa 100644
--- a/src/audio_effects/Compressor.cpp
+++ b/src/audio_effects/Compressor.cpp
@@ -19,11 +19,11 @@ using namespace openshot;
Compressor::Compressor() : Compressor::Compressor(-10, 1, 1, 1, 1, false) {}
Compressor::Compressor(Keyframe threshold, Keyframe ratio, Keyframe attack,
- Keyframe release, Keyframe makeup_gain,
- Keyframe bypass):
- threshold(threshold), ratio(ratio), attack(attack),
- release(release), makeup_gain(makeup_gain), bypass(bypass),
- input_level(0.0), yl_prev(0.0)
+ Keyframe release, Keyframe makeup_gain,
+ Keyframe bypass):
+ threshold(threshold), ratio(ratio), attack(attack),
+ release(release), makeup_gain(makeup_gain), bypass(bypass),
+ input_level(0.0), yl_prev(0.0)
{
// Init effect properties
init_effect_details();
@@ -48,33 +48,33 @@ void Compressor::init_effect_details()
std::shared_ptr Compressor::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Adding Compressor
- const int num_input_channels = frame->audio->getNumChannels();
- const int num_output_channels = frame->audio->getNumChannels();
- const int num_samples = frame->audio->getNumSamples();
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
- mixed_down_input.setSize(1, num_samples);
+ mixed_down_input.setSize(1, num_samples);
inverse_sample_rate = 1.0f / frame->SampleRate();
- inverseE = 1.0f / M_E;
+ inverseE = 1.0f / M_E;
if ((bool)bypass.GetValue(frame_number))
- return frame;
+ return frame;
mixed_down_input.clear();
for (int channel = 0; channel < num_input_channels; ++channel)
- mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
+ mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
- for (int sample = 0; sample < num_samples; ++sample) {
- float T = threshold.GetValue(frame_number);
- float R = ratio.GetValue(frame_number);
- float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
- float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
- float gain = makeup_gain.GetValue(frame_number);
+ for (int sample = 0; sample < num_samples; ++sample) {
+ float T = threshold.GetValue(frame_number);
+ float R = ratio.GetValue(frame_number);
+ float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
+ float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
+ float gain = makeup_gain.GetValue(frame_number);
float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f);
input_level = input_squared;
- xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
+ xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
if (xg < T)
yg = xg;
@@ -88,17 +88,17 @@ std::shared_ptr Compressor::GetFrame(std::shared_ptraudio->getSample(channel, sample)*control;
- frame->audio->setSample(channel, sample, new_value);
- }
+ for (int channel = 0; channel < num_input_channels; ++channel) {
+ float new_value = frame->audio->getSample(channel, sample)*control;
+ frame->audio->setSample(channel, sample, new_value);
+ }
}
- for (int channel = num_input_channels; channel < num_output_channels; ++channel)
- frame->audio->clear(channel, 0, num_samples);
+ for (int channel = num_input_channels; channel < num_output_channels; ++channel)
+ frame->audio->clear(channel, 0, num_samples);
// return the modified frame
return frame;
@@ -106,10 +106,10 @@ std::shared_ptr Compressor::GetFrame(std::shared_ptr Delay::GetFrame(std::shared_ptraudio->getNumChannels(); channel++)
{
float *channel_data = frame->audio->getWritePointer(channel);
- float *delay_data = delay_buffer.getWritePointer(channel);
- local_write_position = delay_write_position;
+ float *delay_data = delay_buffer.getWritePointer(channel);
+ local_write_position = delay_write_position;
for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
{
const float in = (float)(channel_data[sample]);
- float out = 0.0f;
+ float out = 0.0f;
- float read_position = fmodf((float)local_write_position - delay_time_value + (float)delay_buffer_samples, delay_buffer_samples);
- int local_read_position = floorf(read_position);
+ float read_position = fmodf((float)local_write_position - delay_time_value + (float)delay_buffer_samples, delay_buffer_samples);
+ int local_read_position = floorf(read_position);
- if (local_read_position != local_write_position)
+ if (local_read_position != local_write_position)
{
- float fraction = read_position - (float)local_read_position;
- float delayed1 = delay_data[(local_read_position + 0)];
- float delayed2 = delay_data[(local_read_position + 1) % delay_buffer_samples];
- out = (float)(delayed1 + fraction * (delayed2 - delayed1));
+ float fraction = read_position - (float)local_read_position;
+ float delayed1 = delay_data[(local_read_position + 0)];
+ float delayed2 = delay_data[(local_read_position + 1) % delay_buffer_samples];
+ out = (float)(delayed1 + fraction * (delayed2 - delayed1));
- channel_data[sample] = in + (out - in);
+ channel_data[sample] = in + (out - in);
delay_data[local_write_position] = in;
- }
+ }
- if (++local_write_position >= delay_buffer_samples)
- local_write_position -= delay_buffer_samples;
+ if (++local_write_position >= delay_buffer_samples)
+ local_write_position -= delay_buffer_samples;
}
}
- delay_write_position = local_write_position;
+ delay_write_position = local_write_position;
// return the modified frame
return frame;
@@ -152,12 +152,7 @@ void Delay::SetJsonValue(const Json::Value root) {
std::string Delay::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["delay_time"] = add_property_json("Delay Time", delay_time.GetValue(requested_frame), "float", "", &delay_time, 0, 5, false, requested_frame);
diff --git a/src/audio_effects/Distortion.cpp b/src/audio_effects/Distortion.cpp
index 18e38144..d5382f12 100644
--- a/src/audio_effects/Distortion.cpp
+++ b/src/audio_effects/Distortion.cpp
@@ -18,10 +18,10 @@ using namespace openshot;
Distortion::Distortion(): Distortion::Distortion(HARD_CLIPPING, 10, -10, 5) { }
Distortion::Distortion(openshot::DistortionType distortion_type,
- Keyframe input_gain, Keyframe output_gain,
- Keyframe tone):
- distortion_type(distortion_type), input_gain(input_gain),
- output_gain(output_gain), tone(tone)
+ Keyframe input_gain, Keyframe output_gain,
+ Keyframe tone):
+ distortion_type(distortion_type), input_gain(input_gain),
+ output_gain(output_gain), tone(tone)
{
// Init effect properties
init_effect_details();
@@ -48,12 +48,12 @@ std::shared_ptr Distortion::GetFrame(std::shared_ptraudio->getNumChannels(); ++i) {
- Filter* filter;
- filters.add (filter = new Filter());
- }
+ for (int i = 0; i < frame->audio->getNumChannels(); ++i) {
+ Filter* filter;
+ filters.add (filter = new Filter());
+ }
- updateFilters(frame_number);
+ updateFilters(frame_number);
// Add distortion
for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
@@ -73,53 +73,53 @@ std::shared_ptr Distortion::GetFrame(std::shared_ptr threshold)
- out = threshold;
- else if (in < -threshold)
- out = -threshold;
- else
- out = in;
- break;
- }
+ if (in > threshold)
+ out = threshold;
+ else if (in < -threshold)
+ out = -threshold;
+ else
+ out = in;
+ break;
+ }
- case SOFT_CLIPPING: {
- float threshold1 = 1.0f / 3.0f;
- float threshold2 = 2.0f / 3.0f;
- if (in > threshold2)
- out = 1.0f;
- else if (in > threshold1)
- out = 1.0f - powf (2.0f - 3.0f * in, 2.0f) / 3.0f;
- else if (in < -threshold2)
- out = -1.0f;
- else if (in < -threshold1)
- out = -1.0f + powf (2.0f + 3.0f * in, 2.0f) / 3.0f;
- else
- out = 2.0f * in;
- out *= 0.5f;
- break;
- }
+ case SOFT_CLIPPING: {
+ float threshold1 = 1.0f / 3.0f;
+ float threshold2 = 2.0f / 3.0f;
+ if (in > threshold2)
+ out = 1.0f;
+ else if (in > threshold1)
+ out = 1.0f - powf (2.0f - 3.0f * in, 2.0f) / 3.0f;
+ else if (in < -threshold2)
+ out = -1.0f;
+ else if (in < -threshold1)
+ out = -1.0f + powf (2.0f + 3.0f * in, 2.0f) / 3.0f;
+ else
+ out = 2.0f * in;
+ out *= 0.5f;
+ break;
+ }
- case EXPONENTIAL: {
- if (in > 0.0f)
- out = 1.0f - expf (-in);
- else
- out = -1.0f + expf (in);
- break;
- }
+ case EXPONENTIAL: {
+ if (in > 0.0f)
+ out = 1.0f - expf (-in);
+ else
+ out = -1.0f + expf (in);
+ break;
+ }
- case FULL_WAVE_RECTIFIER: {
- out = fabsf (in);
- break;
- }
+ case FULL_WAVE_RECTIFIER: {
+ out = fabsf (in);
+ break;
+ }
- case HALF_WAVE_RECTIFIER: {
- if (in > 0.0f)
- out = in;
- else
- out = 0.0f;
- break;
- }
- }
+ case HALF_WAVE_RECTIFIER: {
+ if (in > 0.0f)
+ out = in;
+ else
+ out = 0.0f;
+ break;
+ }
+ }
float filtered = filters[channel]->processSingleSampleRaw(out);
channel_data[sample] = filtered*powf(10.0f, output_gain_value * 0.05f);
@@ -132,11 +132,11 @@ std::shared_ptr Distortion::GetFrame(std::shared_ptrupdateCoefficients(discrete_frequency, gain);
+ for (int i = 0; i < filters.size(); ++i)
+ filters[i]->updateCoefficients(discrete_frequency, gain);
}
// Generate JSON string of this object
@@ -216,12 +216,7 @@ void Distortion::SetJsonValue(const Json::Value root) {
std::string Distortion::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["distortion_type"] = add_property_json("Distortion Type", distortion_type, "int", "", NULL, 0, 3, false, requested_frame);
diff --git a/src/audio_effects/Echo.cpp b/src/audio_effects/Echo.cpp
index afffdcf3..cc04e19f 100644
--- a/src/audio_effects/Echo.cpp
+++ b/src/audio_effects/Echo.cpp
@@ -19,7 +19,7 @@ using namespace openshot;
Echo::Echo() : Echo::Echo(0.1, 0.5, 0.5) { }
Echo::Echo(Keyframe echo_time, Keyframe feedback, Keyframe mix) :
- echo_time(echo_time), feedback(feedback), mix(mix)
+ echo_time(echo_time), feedback(feedback), mix(mix)
{
// Init effect properties
init_effect_details();
@@ -72,33 +72,33 @@ std::shared_ptr Echo::GetFrame(std::shared_ptr
for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
{
float *channel_data = frame->audio->getWritePointer(channel);
- float *echo_data = echo_buffer.getWritePointer(channel);
- local_write_position = echo_write_position;
+ float *echo_data = echo_buffer.getWritePointer(channel);
+ local_write_position = echo_write_position;
for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
{
const float in = (float)(channel_data[sample]);
- float out = 0.0f;
+ float out = 0.0f;
- float read_position = fmodf((float)local_write_position - echo_time_value + (float)echo_buffer_samples, echo_buffer_samples);
- int local_read_position = floorf(read_position);
+ float read_position = fmodf((float)local_write_position - echo_time_value + (float)echo_buffer_samples, echo_buffer_samples);
+ int local_read_position = floorf(read_position);
- if (local_read_position != local_write_position)
+ if (local_read_position != local_write_position)
{
- float fraction = read_position - (float)local_read_position;
- float echoed1 = echo_data[(local_read_position + 0)];
- float echoed2 = echo_data[(local_read_position + 1) % echo_buffer_samples];
- out = (float)(echoed1 + fraction * (echoed2 - echoed1));
- channel_data[sample] = in + mix_value*(out - in);
+ float fraction = read_position - (float)local_read_position;
+ float echoed1 = echo_data[(local_read_position + 0)];
+ float echoed2 = echo_data[(local_read_position + 1) % echo_buffer_samples];
+ out = (float)(echoed1 + fraction * (echoed2 - echoed1));
+ channel_data[sample] = in + mix_value*(out - in);
echo_data[local_write_position] = in + out*feedback_value;
- }
+ }
- if (++local_write_position >= echo_buffer_samples)
- local_write_position -= echo_buffer_samples;
+ if (++local_write_position >= echo_buffer_samples)
+ local_write_position -= echo_buffer_samples;
}
}
- echo_write_position = local_write_position;
+ echo_write_position = local_write_position;
// return the modified frame
return frame;
@@ -161,12 +161,7 @@ void Echo::SetJsonValue(const Json::Value root) {
std::string Echo::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["echo_time"] = add_property_json("Time", echo_time.GetValue(requested_frame), "float", "", &echo_time, 0, 5, false, requested_frame);
diff --git a/src/audio_effects/Expander.cpp b/src/audio_effects/Expander.cpp
index cbdd2312..f6e110c1 100644
--- a/src/audio_effects/Expander.cpp
+++ b/src/audio_effects/Expander.cpp
@@ -20,9 +20,9 @@ Expander::Expander(): Expander::Expander(-10, 1, 1, 1, 1, false) { }
// Default constructor
Expander::Expander(Keyframe threshold, Keyframe ratio, Keyframe attack,
- Keyframe release, Keyframe makeup_gain, Keyframe bypass) :
- threshold(threshold), ratio(ratio), attack(attack),
- release(release), makeup_gain(makeup_gain), bypass(bypass)
+ Keyframe release, Keyframe makeup_gain, Keyframe bypass) :
+ threshold(threshold), ratio(ratio), attack(attack),
+ release(release), makeup_gain(makeup_gain), bypass(bypass)
{
// Init effect properties
init_effect_details();
@@ -41,8 +41,8 @@ void Expander::init_effect_details()
info.has_audio = true;
info.has_video = false;
- input_level = 0.0f;
- yl_prev = 0.0f;
+ input_level = 0.0f;
+ yl_prev = 0.0f;
}
@@ -52,34 +52,34 @@ void Expander::init_effect_details()
std::shared_ptr Expander::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
// Adding Expander
- const int num_input_channels = frame->audio->getNumChannels();
- const int num_output_channels = frame->audio->getNumChannels();
- const int num_samples = frame->audio->getNumSamples();
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
- mixed_down_input.setSize(1, num_samples);
+ mixed_down_input.setSize(1, num_samples);
inverse_sample_rate = 1.0f / frame->SampleRate();
- inverseE = 1.0f / M_E;
+ inverseE = 1.0f / M_E;
if ((bool)bypass.GetValue(frame_number))
- return frame;
+ return frame;
mixed_down_input.clear();
for (int channel = 0; channel < num_input_channels; ++channel)
- mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
+ mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
- for (int sample = 0; sample < num_samples; ++sample) {
- float T = threshold.GetValue(frame_number);
- float R = ratio.GetValue(frame_number);
- float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
- float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
- float gain = makeup_gain.GetValue(frame_number);
+ for (int sample = 0; sample < num_samples; ++sample) {
+ float T = threshold.GetValue(frame_number);
+ float R = ratio.GetValue(frame_number);
+ float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
+ float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
+ float gain = makeup_gain.GetValue(frame_number);
float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f);
const float average_factor = 0.9999f;
input_level = average_factor * input_level + (1.0f - average_factor) * input_squared;
- xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
+ xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
if (xg > T)
yg = xg;
@@ -94,17 +94,17 @@ std::shared_ptr Expander::GetFrame(std::shared_ptraudio->getSample(channel, sample)*control;
- frame->audio->setSample(channel, sample, new_value);
- }
+ for (int channel = 0; channel < num_input_channels; ++channel) {
+ float new_value = frame->audio->getSample(channel, sample)*control;
+ frame->audio->setSample(channel, sample, new_value);
+ }
}
- for (int channel = num_input_channels; channel < num_output_channels; ++channel)
- frame->audio->clear(channel, 0, num_samples);
+ for (int channel = num_input_channels; channel < num_output_channels; ++channel)
+ frame->audio->clear(channel, 0, num_samples);
// return the modified frame
return frame;
@@ -112,10 +112,10 @@ std::shared_ptr Expander::GetFrame(std::shared_ptr ParametricEQ::GetFrame(std::shared_ptraudio->getNumChannels();
- const int num_output_channels = frame->audio->getNumChannels();
- const int num_samples = frame->audio->getNumSamples();
- updateFilters(frame_number, num_samples);
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+ updateFilters(frame_number, num_samples);
for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
{
@@ -69,9 +69,9 @@ std::shared_ptr ParametricEQ::GetFrame(std::shared_ptrprocessSamples(channel_data, num_samples);
}
- for (int channel = num_input_channels; channel < num_output_channels; ++channel)
+ for (int channel = num_input_channels; channel < num_output_channels; ++channel)
{
- frame->audio->clear(channel, 0, num_samples);
+ frame->audio->clear(channel, 0, num_samples);
}
// return the modified frame
@@ -161,12 +161,12 @@ void ParametricEQ::Filter::updateCoefficients (
void ParametricEQ::updateFilters(int64_t frame_number, double sample_rate)
{
- double discrete_frequency = 2.0 * M_PI * (double)frequency.GetValue(frame_number) / sample_rate;
+ double discrete_frequency = 2.0 * M_PI * (double)frequency.GetValue(frame_number) / sample_rate;
double q_value = (double)q_factor.GetValue(frame_number);
double gain_value = pow(10.0, (double)gain.GetValue(frame_number) * 0.05);
int filter_type_value = (int)filter_type;
- for (int i = 0; i < filters.size(); ++i)
+ for (int i = 0; i < filters.size(); ++i)
filters[i]->updateCoefficients(discrete_frequency, q_value, gain_value, filter_type_value);
}
@@ -233,12 +233,7 @@ void ParametricEQ::SetJsonValue(const Json::Value root) {
std::string ParametricEQ::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["filter_type"] = add_property_json("Filter Type", filter_type, "int", "", NULL, 0, 3, false, requested_frame);
diff --git a/src/audio_effects/Robotization.cpp b/src/audio_effects/Robotization.cpp
index a26d9493..f546b18e 100644
--- a/src/audio_effects/Robotization.cpp
+++ b/src/audio_effects/Robotization.cpp
@@ -18,13 +18,13 @@ using namespace openshot;
using namespace juce;
Robotization::Robotization()
- : Robotization::Robotization(FFT_SIZE_512, HOP_SIZE_2, RECTANGULAR) {}
+ : Robotization::Robotization(FFT_SIZE_512, HOP_SIZE_2, RECTANGULAR) {}
Robotization::Robotization(openshot::FFTSize fft_size,
- openshot::HopSize hop_size,
- openshot::WindowType window_type) :
- fft_size(fft_size), hop_size(hop_size),
- window_type(window_type), stft(*this)
+ openshot::HopSize hop_size,
+ openshot::WindowType window_type) :
+ fft_size(fft_size), hop_size(hop_size),
+ window_type(window_type), stft(*this)
{
// Init effect properties
init_effect_details();
@@ -49,20 +49,20 @@ void Robotization::init_effect_details()
std::shared_ptr Robotization::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
const std::lock_guard lock(mutex);
- ScopedNoDenormals noDenormals;
+ ScopedNoDenormals noDenormals;
- const int num_input_channels = frame->audio->getNumChannels();
- const int num_output_channels = frame->audio->getNumChannels();
- const int num_samples = frame->audio->getNumSamples();
- const int hop_size_value = 1 << ((int)hop_size + 1);
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+ const int hop_size_value = 1 << ((int)hop_size + 1);
const int fft_size_value = 1 << ((int)fft_size + 5);
- stft.setup(num_output_channels);
- stft.updateParameters((int)fft_size_value,
- (int)hop_size_value,
- (int)window_type);
+ stft.setup(num_output_channels);
+ stft.updateParameters((int)fft_size_value,
+ (int)hop_size_value,
+ (int)window_type);
- stft.process(*frame->audio);
+ stft.process(*frame->audio);
// return the modified frame
return frame;
@@ -139,12 +139,7 @@ void Robotization::SetJsonValue(const Json::Value root) {
std::string Robotization::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame);
diff --git a/src/audio_effects/Whisperization.cpp b/src/audio_effects/Whisperization.cpp
index df65cbb0..925409bf 100644
--- a/src/audio_effects/Whisperization.cpp
+++ b/src/audio_effects/Whisperization.cpp
@@ -18,13 +18,13 @@ using namespace openshot;
using namespace juce;
Whisperization::Whisperization():
- Whisperization::Whisperization(FFT_SIZE_512, HOP_SIZE_8, RECTANGULAR) {}
+ Whisperization::Whisperization(FFT_SIZE_512, HOP_SIZE_8, RECTANGULAR) {}
Whisperization::Whisperization(openshot::FFTSize fft_size,
- openshot::HopSize hop_size,
- openshot::WindowType window_type) :
- fft_size(fft_size), hop_size(hop_size),
- window_type(window_type), stft(*this)
+ openshot::HopSize hop_size,
+ openshot::WindowType window_type) :
+ fft_size(fft_size), hop_size(hop_size),
+ window_type(window_type), stft(*this)
{
// Init effect properties
init_effect_details();
@@ -48,21 +48,21 @@ void Whisperization::init_effect_details()
// modified openshot::Frame object
std::shared_ptr Whisperization::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
- const std::lock_guard lock(mutex);
- ScopedNoDenormals noDenormals;
+ const std::lock_guard lock(mutex);
+ ScopedNoDenormals noDenormals;
- const int num_input_channels = frame->audio->getNumChannels();
- const int num_output_channels = frame->audio->getNumChannels();
- const int num_samples = frame->audio->getNumSamples();
- const int hop_size_value = 1 << ((int)hop_size + 1);
+ const int num_input_channels = frame->audio->getNumChannels();
+ const int num_output_channels = frame->audio->getNumChannels();
+ const int num_samples = frame->audio->getNumSamples();
+ const int hop_size_value = 1 << ((int)hop_size + 1);
const int fft_size_value = 1 << ((int)fft_size + 5);
- stft.setup(num_output_channels);
- stft.updateParameters((int)fft_size_value,
- (int)hop_size_value,
- (int)window_type);
+ stft.setup(num_output_channels);
+ stft.updateParameters((int)fft_size_value,
+ (int)hop_size_value,
+ (int)window_type);
- stft.process(*frame->audio);
+ stft.process(*frame->audio);
// return the modified frame
return frame;
@@ -147,12 +147,7 @@ void Whisperization::SetJsonValue(const Json::Value root) {
std::string Whisperization::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame);
diff --git a/src/effects/Bars.cpp b/src/effects/Bars.cpp
index daec750d..da5b7036 100644
--- a/src/effects/Bars.cpp
+++ b/src/effects/Bars.cpp
@@ -160,13 +160,7 @@ void Bars::SetJsonValue(const Json::Value root) {
std::string Bars::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["color"] = add_property_json("Bar Color", 0.0, "color", "", &color.red, 0, 255, false, requested_frame);
@@ -178,9 +172,6 @@ std::string Bars::PropertiesJSON(int64_t requested_frame) const {
root["right"] = add_property_json("Right Size", right.GetValue(requested_frame), "float", "", &right, 0.0, 0.5, false, requested_frame);
root["bottom"] = add_property_json("Bottom Size", bottom.GetValue(requested_frame), "float", "", &bottom, 0.0, 0.5, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Blur.cpp b/src/effects/Blur.cpp
index 6d4dc483..d1ffe053 100644
--- a/src/effects/Blur.cpp
+++ b/src/effects/Blur.cpp
@@ -211,13 +211,7 @@ void Blur::SetJsonValue(const Json::Value root) {
std::string Blur::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["horizontal_radius"] = add_property_json("Horizontal Radius", horizontal_radius.GetValue(requested_frame), "float", "", &horizontal_radius, 0, 100, false, requested_frame);
@@ -225,9 +219,6 @@ std::string Blur::PropertiesJSON(int64_t requested_frame) const {
root["sigma"] = add_property_json("Sigma", sigma.GetValue(requested_frame), "float", "", &sigma, 0, 100, false, requested_frame);
root["iterations"] = add_property_json("Iterations", iterations.GetValue(requested_frame), "float", "", &iterations, 0, 100, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Brightness.cpp b/src/effects/Brightness.cpp
index 58340fff..5fdf7f84 100644
--- a/src/effects/Brightness.cpp
+++ b/src/effects/Brightness.cpp
@@ -146,21 +146,12 @@ void Brightness::SetJsonValue(const Json::Value root) {
std::string Brightness::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["brightness"] = add_property_json("Brightness", brightness.GetValue(requested_frame), "float", "", &brightness, -1.0, 1.0, false, requested_frame);
root["contrast"] = add_property_json("Contrast", contrast.GetValue(requested_frame), "float", "", &contrast, -128, 128.0, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Caption.cpp b/src/effects/Caption.cpp
index eb723307..8e36dbc5 100644
--- a/src/effects/Caption.cpp
+++ b/src/effects/Caption.cpp
@@ -454,13 +454,7 @@ void Caption::SetJsonValue(const Json::Value root) {
std::string Caption::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["color"] = add_property_json("Color", 0.0, "color", "", &color.red, 0, 255, false, requested_frame);
@@ -490,9 +484,6 @@ std::string Caption::PropertiesJSON(int64_t requested_frame) const {
root["caption_text"] = add_property_json("Captions", 0.0, "caption", caption_text, NULL, -1, -1, false, requested_frame);
root["caption_font"] = add_property_json("Font", 0.0, "font", font_name, NULL, -1, -1, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/ChromaKey.cpp b/src/effects/ChromaKey.cpp
index fe6928a3..41421eac 100644
--- a/src/effects/ChromaKey.cpp
+++ b/src/effects/ChromaKey.cpp
@@ -497,7 +497,7 @@ std::shared_ptr ChromaKey::GetFrame(std::shared_ptr Crop::GetFrame(std::shared_ptr
double right_value = right.GetValue(frame_number);
double bottom_value = bottom.GetValue(frame_number);
- // Get the current shift amount
- double x_shift = x.GetValue(frame_number);
- double y_shift = y.GetValue(frame_number);
+ // Get the current shift amount
+ double x_shift = x.GetValue(frame_number);
+ double y_shift = y.GetValue(frame_number);
QSize sz = frame_image->size();
- // Compute destination rectangle to paint into
- QRectF paint_r(
- left_value * sz.width(), top_value * sz.height(),
- std::max(0.0, 1.0 - left_value - right_value) * sz.width(),
- std::max(0.0, 1.0 - top_value - bottom_value) * sz.height());
+ // Compute destination rectangle to paint into
+ QRectF paint_r(
+ left_value * sz.width(), top_value * sz.height(),
+ std::max(0.0, 1.0 - left_value - right_value) * sz.width(),
+ std::max(0.0, 1.0 - top_value - bottom_value) * sz.height());
- // Copy rectangle is destination translated by offsets
- QRectF copy_r = paint_r;
- copy_r.translate(x_shift * sz.width(), y_shift * sz.height());
+ // Copy rectangle is destination translated by offsets
+ QRectF copy_r = paint_r;
+ copy_r.translate(x_shift * sz.width(), y_shift * sz.height());
- // Constrain offset copy rect to stay within image borders
- if (copy_r.left() < 0) {
- paint_r.setLeft(paint_r.left() - copy_r.left());
- copy_r.setLeft(0);
- }
- if (copy_r.right() > sz.width()) {
- paint_r.setRight(paint_r.right() - (copy_r.right() - sz.width()));
- copy_r.setRight(sz.width());
- }
- if (copy_r.top() < 0) {
- paint_r.setTop(paint_r.top() - copy_r.top());
- copy_r.setTop(0);
- }
- if (copy_r.bottom() > sz.height()) {
- paint_r.setBottom(paint_r.bottom() - (copy_r.bottom() - sz.height()));
- copy_r.setBottom(sz.height());
- }
+ // Constrain offset copy rect to stay within image borders
+ if (copy_r.left() < 0) {
+ paint_r.setLeft(paint_r.left() - copy_r.left());
+ copy_r.setLeft(0);
+ }
+ if (copy_r.right() > sz.width()) {
+ paint_r.setRight(paint_r.right() - (copy_r.right() - sz.width()));
+ copy_r.setRight(sz.width());
+ }
+ if (copy_r.top() < 0) {
+ paint_r.setTop(paint_r.top() - copy_r.top());
+ copy_r.setTop(0);
+ }
+ if (copy_r.bottom() > sz.height()) {
+ paint_r.setBottom(paint_r.bottom() - (copy_r.bottom() - sz.height()));
+ copy_r.setBottom(sz.height());
+ }
- QImage cropped(sz, QImage::Format_RGBA8888_Premultiplied);
- cropped.fill(Qt::transparent);
+ QImage cropped(sz, QImage::Format_RGBA8888_Premultiplied);
+ cropped.fill(Qt::transparent);
- const QImage src(*frame_image);
+ QPainter p(&cropped);
+ p.drawImage(paint_r, *frame_image, copy_r);
+ p.end();
- QPainter p(&cropped);
- p.drawImage(paint_r, src, copy_r);
- p.end();
-
- // Set frame image
- frame->AddImage(std::make_shared(cropped.copy()));
+ if (resize) {
+ // Resize image to match cropped QRect (reduce frame size)
+ frame->AddImage(std::make_shared(cropped.copy(paint_r.toRect())));
+ } else {
+ // Copy cropped image into transparent frame image (maintain frame size)
+ frame->AddImage(std::make_shared(cropped.copy()));
+ }
// return the modified frame
return frame;
@@ -129,8 +132,9 @@ Json::Value Crop::JsonValue() const {
root["top"] = top.JsonValue();
root["right"] = right.JsonValue();
root["bottom"] = bottom.JsonValue();
- root["x"] = x.JsonValue();
- root["y"] = y.JsonValue();
+ root["x"] = x.JsonValue();
+ root["y"] = y.JsonValue();
+ root["resize"] = resize;
// return JsonValue
return root;
@@ -168,34 +172,32 @@ void Crop::SetJsonValue(const Json::Value root) {
right.SetJsonValue(root["right"]);
if (!root["bottom"].isNull())
bottom.SetJsonValue(root["bottom"]);
- if (!root["x"].isNull())
- x.SetJsonValue(root["x"]);
- if (!root["y"].isNull())
- y.SetJsonValue(root["y"]);
+ if (!root["x"].isNull())
+ x.SetJsonValue(root["x"]);
+ if (!root["y"].isNull())
+ y.SetJsonValue(root["y"]);
+ if (!root["resize"].isNull())
+ resize = root["resize"].asBool();
}
// Get all properties for a specific frame
std::string Crop::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["left"] = add_property_json("Left Size", left.GetValue(requested_frame), "float", "", &left, 0.0, 1.0, false, requested_frame);
root["top"] = add_property_json("Top Size", top.GetValue(requested_frame), "float", "", &top, 0.0, 1.0, false, requested_frame);
root["right"] = add_property_json("Right Size", right.GetValue(requested_frame), "float", "", &right, 0.0, 1.0, false, requested_frame);
root["bottom"] = add_property_json("Bottom Size", bottom.GetValue(requested_frame), "float", "", &bottom, 0.0, 1.0, false, requested_frame);
- root["x"] = add_property_json("X Offset", x.GetValue(requested_frame), "float", "", &x, -1.0, 1.0, false, requested_frame);
- root["y"] = add_property_json("Y Offset", y.GetValue(requested_frame), "float", "", &y, -1.0, 1.0, false, requested_frame);
+ root["x"] = add_property_json("X Offset", x.GetValue(requested_frame), "float", "", &x, -1.0, 1.0, false, requested_frame);
+ root["y"] = add_property_json("Y Offset", y.GetValue(requested_frame), "float", "", &y, -1.0, 1.0, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
+ // Add replace_image choices (dropdown style)
+ root["resize"] = add_property_json("Resize Image", resize, "int", "", NULL, 0, 1, false, requested_frame);
+ root["resize"]["choices"].append(add_property_choice_json("Yes", true, resize));
+ root["resize"]["choices"].append(add_property_choice_json("No", false, resize));
// Return formatted string
return root.toStyledString();
diff --git a/src/effects/Crop.h b/src/effects/Crop.h
index a21391ab..9d03a8dc 100644
--- a/src/effects/Crop.h
+++ b/src/effects/Crop.h
@@ -46,8 +46,9 @@ namespace openshot
Keyframe top; ///< Size of top bar
Keyframe right; ///< Size of right bar
Keyframe bottom; ///< Size of bottom bar
- Keyframe x; ///< X-offset
- Keyframe y; ///< Y-offset
+ Keyframe x; ///< X-offset
+ Keyframe y; ///< Y-offset
+ bool resize; ///< Auto-resize image after crop operation
/// Blank constructor, useful when using Json to load the effect properties
Crop();
@@ -58,10 +59,10 @@ namespace openshot
/// @param top The curve to adjust the top bar size (between 0 and 1)
/// @param right The curve to adjust the right bar size (between 0 and 1)
/// @param bottom The curve to adjust the bottom bar size (between 0 and 1)
- /// @param x x-offset of original image in output frame (-1.0 - 1.0)
- /// @param y y-offset of original image in output frame (-1.0 - 1.0)
+ /// @param x x-offset of original image in output frame (-1.0 - 1.0)
+ /// @param y y-offset of original image in output frame (-1.0 - 1.0)
Crop(Keyframe left, Keyframe top, Keyframe right, Keyframe bottom,
- Keyframe x=0.0, Keyframe y=0.0);
+ Keyframe x=0.0, Keyframe y=0.0);
/// @brief This method is required for all derived classes of ClipBase, and returns a
/// new openshot::Frame object. All Clip keyframes and effects are resolved into
@@ -70,9 +71,9 @@ namespace openshot
/// @returns A new openshot::Frame object
/// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
std::shared_ptr
- GetFrame(int64_t frame_number) override {
- return GetFrame(std::make_shared(), frame_number);
- }
+ GetFrame(int64_t frame_number) override {
+ return GetFrame(std::make_shared(), frame_number);
+ }
/// @brief This method is required for all derived classes of ClipBase, and returns a
/// modified openshot::Frame object
@@ -84,7 +85,7 @@ namespace openshot
/// @param frame The frame object that needs the clip or effect applied to it
/// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
std::shared_ptr
- GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+ GetFrame(std::shared_ptr frame, int64_t frame_number) override;
// Get and Set JSON methods
std::string Json() const override; ///< Generate JSON string of this object
diff --git a/src/effects/Deinterlace.cpp b/src/effects/Deinterlace.cpp
index a665679b..9a1a67e3 100644
--- a/src/effects/Deinterlace.cpp
+++ b/src/effects/Deinterlace.cpp
@@ -131,22 +131,13 @@ void Deinterlace::SetJsonValue(const Json::Value root) {
std::string Deinterlace::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
- root["isOdd"] = add_property_json("Is Odd Frame", isOdd, "bool", "", NULL, 0, 1, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Add Is Odd Frame choices (dropdown style)
+ root["isOdd"] = add_property_json("Is Odd Frame", isOdd, "bool", "", NULL, 0, 1, true, requested_frame);
root["isOdd"]["choices"].append(add_property_choice_json("Yes", true, isOdd));
root["isOdd"]["choices"].append(add_property_choice_json("No", false, isOdd));
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Hue.cpp b/src/effects/Hue.cpp
index 750992b4..12a64a39 100644
--- a/src/effects/Hue.cpp
+++ b/src/effects/Hue.cpp
@@ -144,20 +144,11 @@ void Hue::SetJsonValue(const Json::Value root) {
std::string Hue::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["hue"] = add_property_json("Hue", hue.GetValue(requested_frame), "float", "", &hue, 0.0, 1.0, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Mask.cpp b/src/effects/Mask.cpp
index 428e53b8..6992eecc 100644
--- a/src/effects/Mask.cpp
+++ b/src/effects/Mask.cpp
@@ -128,7 +128,7 @@ std::shared_ptr Mask::GetFrame(std::shared_ptr
pixels[byte_index + 2] = constrain(255 * alpha_percent);
pixels[byte_index + 3] = constrain(255 * alpha_percent);
} else {
- // Mulitply new alpha value with all the colors (since we are using a premultiplied
+ // Multiply new alpha value with all the colors (since we are using a premultiplied
// alpha format)
pixels[byte_index + 0] *= alpha_percent;
pixels[byte_index + 1] *= alpha_percent;
@@ -255,16 +255,10 @@ void Mask::SetJsonValue(const Json::Value root) {
std::string Mask::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
- root["replace_image"] = add_property_json("Replace Image", replace_image, "int", "", NULL, 0, 1, false, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Add replace_image choices (dropdown style)
+ root["replace_image"] = add_property_json("Replace Image", replace_image, "int", "", NULL, 0, 1, false, requested_frame);
root["replace_image"]["choices"].append(add_property_choice_json("Yes", true, replace_image));
root["replace_image"]["choices"].append(add_property_choice_json("No", false, replace_image));
@@ -277,9 +271,6 @@ std::string Mask::PropertiesJSON(int64_t requested_frame) const {
else
root["reader"] = add_property_json("Source", 0.0, "reader", "{}", NULL, 0, 1, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Negate.cpp b/src/effects/Negate.cpp
index c22d91b1..c51ee1e5 100644
--- a/src/effects/Negate.cpp
+++ b/src/effects/Negate.cpp
@@ -87,16 +87,7 @@ void Negate::SetJsonValue(const Json::Value root) {
std::string Negate::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
-
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Return formatted string
return root.toStyledString();
diff --git a/src/effects/ObjectDetection.cpp b/src/effects/ObjectDetection.cpp
index 146fff7c..e1278b01 100644
--- a/src/effects/ObjectDetection.cpp
+++ b/src/effects/ObjectDetection.cpp
@@ -30,537 +30,531 @@ using namespace openshot;
/// Blank constructor, useful when using Json to load the effect properties
ObjectDetection::ObjectDetection(std::string clipObDetectDataPath)
{
- // Init effect properties
- init_effect_details();
+ // Init effect properties
+ init_effect_details();
- // Tries to load the tracker data from protobuf
- LoadObjDetectdData(clipObDetectDataPath);
+ // Tries to load the tracker data from protobuf
+ LoadObjDetectdData(clipObDetectDataPath);
- // Initialize the selected object index as the first object index
- selectedObjectIndex = trackedObjects.begin()->first;
+ // Initialize the selected object index as the first object index
+ selectedObjectIndex = trackedObjects.begin()->first;
}
// Default constructor
ObjectDetection::ObjectDetection()
{
- // Init effect properties
- init_effect_details();
+ // Init effect properties
+ init_effect_details();
- // Initialize the selected object index as the first object index
- selectedObjectIndex = trackedObjects.begin()->first;
+ // Initialize the selected object index as the first object index
+ selectedObjectIndex = trackedObjects.begin()->first;
}
// Init effect settings
void ObjectDetection::init_effect_details()
{
- /// Initialize the values of the EffectInfo struct.
- InitEffectInfo();
+ /// Initialize the values of the EffectInfo struct.
+ InitEffectInfo();
- /// Set the effect info
- info.class_name = "ObjectDetection";
- info.name = "Object Detector";
- info.description = "Detect objects through the video.";
- info.has_audio = false;
- info.has_video = true;
- info.has_tracked_object = true;
+ /// Set the effect info
+ info.class_name = "ObjectDetection";
+ info.name = "Object Detector";
+ info.description = "Detect objects through the video.";
+ info.has_audio = false;
+ info.has_video = true;
+ info.has_tracked_object = true;
}
// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
std::shared_ptr ObjectDetection::GetFrame(std::shared_ptr frame, int64_t frame_number)
{
- // Get the frame's image
- cv::Mat cv_image = frame->GetImageCV();
+ // Get the frame's image
+ cv::Mat cv_image = frame->GetImageCV();
- // Check if frame isn't NULL
- if(cv_image.empty()){
- return frame;
- }
+ // Check if frame isn't NULL
+ if(cv_image.empty()){
+ return frame;
+ }
- // Initialize the Qt rectangle that will hold the positions of the bounding-box
- std::vector boxRects;
- // Initialize the image of the TrackedObject child clip
- std::vector> childClipImages;
+ // Initialize the Qt rectangle that will hold the positions of the bounding-box
+ std::vector boxRects;
+ // Initialize the image of the TrackedObject child clip
+ std::vector> childClipImages;
- // Check if track data exists for the requested frame
- if (detectionsData.find(frame_number) != detectionsData.end()) {
- float fw = cv_image.size().width;
- float fh = cv_image.size().height;
+ // Check if track data exists for the requested frame
+ if (detectionsData.find(frame_number) != detectionsData.end()) {
+ float fw = cv_image.size().width;
+ float fh = cv_image.size().height;
- DetectionData detections = detectionsData[frame_number];
- for(int i = 0; i 0 &&
- std::find(display_classes.begin(), display_classes.end(), classNames[detections.classIds.at(i)]) == display_classes.end()){
- continue;
- }
+ // Does not show boxes with confidence below the threshold
+ if(detections.confidences.at(i) < confidence_threshold){
+ continue;
+ }
+ // Just display selected classes
+ if( display_classes.size() > 0 &&
+ std::find(display_classes.begin(), display_classes.end(), classNames[detections.classIds.at(i)]) == display_classes.end()){
+ continue;
+ }
- // Get the object id
- int objectId = detections.objectIds.at(i);
+ // Get the object id
+ int objectId = detections.objectIds.at(i);
- // Search for the object in the trackedObjects map
- auto trackedObject_it = trackedObjects.find(objectId);
+ // Search for the object in the trackedObjects map
+ auto trackedObject_it = trackedObjects.find(objectId);
- // Cast the object as TrackedObjectBBox
- std::shared_ptr trackedObject = std::static_pointer_cast(trackedObject_it->second);
+ // Cast the object as TrackedObjectBBox
+ std::shared_ptr trackedObject = std::static_pointer_cast(trackedObject_it->second);
- // Check if the tracked object has data for this frame
- if (trackedObject->Contains(frame_number) &&
- trackedObject->visible.GetValue(frame_number) == 1)
- {
- // Get the bounding-box of given frame
- BBox trackedBox = trackedObject->GetBox(frame_number);
- bool draw_text = !display_box_text.GetValue(frame_number);
- std::vector stroke_rgba = trackedObject->stroke.GetColorRGBA(frame_number);
- int stroke_width = trackedObject->stroke_width.GetValue(frame_number);
- float stroke_alpha = trackedObject->stroke_alpha.GetValue(frame_number);
- std::vector bg_rgba = trackedObject->background.GetColorRGBA(frame_number);
- float bg_alpha = trackedObject->background_alpha.GetValue(frame_number);
+ // Check if the tracked object has data for this frame
+ if (trackedObject->Contains(frame_number) &&
+ trackedObject->visible.GetValue(frame_number) == 1)
+ {
+ // Get the bounding-box of given frame
+ BBox trackedBox = trackedObject->GetBox(frame_number);
+ bool draw_text = !display_box_text.GetValue(frame_number);
+ std::vector stroke_rgba = trackedObject->stroke.GetColorRGBA(frame_number);
+ int stroke_width = trackedObject->stroke_width.GetValue(frame_number);
+ float stroke_alpha = trackedObject->stroke_alpha.GetValue(frame_number);
+ std::vector bg_rgba = trackedObject->background.GetColorRGBA(frame_number);
+ float bg_alpha = trackedObject->background_alpha.GetValue(frame_number);
- cv::Rect2d box(
- (int)( (trackedBox.cx-trackedBox.width/2)*fw),
- (int)( (trackedBox.cy-trackedBox.height/2)*fh),
- (int)( trackedBox.width*fw),
- (int)( trackedBox.height*fh)
- );
+ cv::Rect2d box(
+ (int)( (trackedBox.cx-trackedBox.width/2)*fw),
+ (int)( (trackedBox.cy-trackedBox.height/2)*fh),
+ (int)( trackedBox.width*fw),
+ (int)( trackedBox.height*fh)
+ );
- // If the Draw Box property is off, then make the box invisible
- if (trackedObject->draw_box.GetValue(frame_number) == 0)
- {
- bg_alpha = 1.0;
- stroke_alpha = 1.0;
- }
+ // If the Draw Box property is off, then make the box invisible
+ if (trackedObject->draw_box.GetValue(frame_number) == 0)
+ {
+ bg_alpha = 1.0;
+ stroke_alpha = 1.0;
+ }
- drawPred(detections.classIds.at(i), detections.confidences.at(i),
- box, cv_image, detections.objectIds.at(i), bg_rgba, bg_alpha, 1, true, draw_text);
- drawPred(detections.classIds.at(i), detections.confidences.at(i),
- box, cv_image, detections.objectIds.at(i), stroke_rgba, stroke_alpha, stroke_width, false, draw_text);
+ drawPred(detections.classIds.at(i), detections.confidences.at(i),
+ box, cv_image, detections.objectIds.at(i), bg_rgba, bg_alpha, 1, true, draw_text);
+ drawPred(detections.classIds.at(i), detections.confidences.at(i),
+ box, cv_image, detections.objectIds.at(i), stroke_rgba, stroke_alpha, stroke_width, false, draw_text);
- // Get the Detected Object's child clip
- if (trackedObject->ChildClipId() != ""){
- // Cast the parent timeline of this effect
- Timeline* parentTimeline = static_cast(ParentTimeline());
- if (parentTimeline){
- // Get the Tracked Object's child clip
- Clip* childClip = parentTimeline->GetClip(trackedObject->ChildClipId());
+ // Get the Detected Object's child clip
+ if (trackedObject->ChildClipId() != ""){
+ // Cast the parent timeline of this effect
+ Timeline* parentTimeline = static_cast(ParentTimeline());
+ if (parentTimeline){
+ // Get the Tracked Object's child clip
+ Clip* childClip = parentTimeline->GetClip(trackedObject->ChildClipId());
- if (childClip){
- // Get the image of the child clip for this frame
- std::shared_ptr childClipFrame = childClip->GetFrame(frame_number);
- childClipImages.push_back(childClipFrame->GetImage());
+ if (childClip){
+ // Get the image of the child clip for this frame
+ std::shared_ptr childClipFrame = childClip->GetFrame(frame_number);
+ childClipImages.push_back(childClipFrame->GetImage());
- // Set the Qt rectangle with the bounding-box properties
- QRectF boxRect;
- boxRect.setRect((int)((trackedBox.cx-trackedBox.width/2)*fw),
- (int)((trackedBox.cy - trackedBox.height/2)*fh),
- (int)(trackedBox.width*fw),
- (int)(trackedBox.height*fh));
- boxRects.push_back(boxRect);
- }
- }
- }
- }
- }
- }
+ // Set the Qt rectangle with the bounding-box properties
+ QRectF boxRect;
+ boxRect.setRect((int)((trackedBox.cx-trackedBox.width/2)*fw),
+ (int)((trackedBox.cy - trackedBox.height/2)*fh),
+ (int)(trackedBox.width*fw),
+ (int)(trackedBox.height*fh));
+ boxRects.push_back(boxRect);
+ }
+ }
+ }
+ }
+ }
+ }
- // Update Qt image with new Opencv frame
- frame->SetImageCV(cv_image);
+ // Update Qt image with new Opencv frame
+ frame->SetImageCV(cv_image);
- // Set the bounding-box image with the Tracked Object's child clip image
- if(boxRects.size() > 0){
- // Get the frame image
- QImage frameImage = *(frame->GetImage());
- for(int i; i < boxRects.size();i++){
- // Set a Qt painter to the frame image
- QPainter painter(&frameImage);
- // Draw the child clip image inside the bounding-box
- painter.drawImage(boxRects[i], *childClipImages[i]);
- }
- // Set the frame image as the composed image
- frame->AddImage(std::make_shared(frameImage));
- }
+ // Set the bounding-box image with the Tracked Object's child clip image
+ if(boxRects.size() > 0){
+ // Get the frame image
+ QImage frameImage = *(frame->GetImage());
+ for(int i; i < boxRects.size();i++){
+ // Set a Qt painter to the frame image
+ QPainter painter(&frameImage);
+ // Draw the child clip image inside the bounding-box
+ painter.drawImage(boxRects[i], *childClipImages[i]);
+ }
+ // Set the frame image as the composed image
+ frame->AddImage(std::make_shared(frameImage));
+ }
- return frame;
+ return frame;
}
void ObjectDetection::DrawRectangleRGBA(cv::Mat &frame_image, cv::RotatedRect box, std::vector color, float alpha,
- int thickness, bool is_background){
- // Get the bouding box vertices
- cv::Point2f vertices2f[4];
- box.points(vertices2f);
+ int thickness, bool is_background){
+ // Get the bouding box vertices
+ cv::Point2f vertices2f[4];
+ box.points(vertices2f);
- // TODO: take a rectangle of frame_image by refencence and draw on top of that to improve speed
- // select min enclosing rectangle to draw on a small portion of the image
- // cv::Rect rect = box.boundingRect();
- // cv::Mat image = frame_image(rect)
+ // TODO: take a rectangle of frame_image by refencence and draw on top of that to improve speed
+ // select min enclosing rectangle to draw on a small portion of the image
+ // cv::Rect rect = box.boundingRect();
+ // cv::Mat image = frame_image(rect)
- if(is_background){
- cv::Mat overlayFrame;
- frame_image.copyTo(overlayFrame);
+ if(is_background){
+ cv::Mat overlayFrame;
+ frame_image.copyTo(overlayFrame);
- // draw bounding box background
- cv::Point vertices[4];
- for(int i = 0; i < 4; ++i){
- vertices[i] = vertices2f[i];}
+ // draw bounding box background
+ cv::Point vertices[4];
+ for(int i = 0; i < 4; ++i){
+ vertices[i] = vertices2f[i];}
- cv::Rect rect = box.boundingRect();
- cv::fillConvexPoly(overlayFrame, vertices, 4, cv::Scalar(color[2],color[1],color[0]), cv::LINE_AA);
- // add opacity
- cv::addWeighted(overlayFrame, 1-alpha, frame_image, alpha, 0, frame_image);
- }
- else{
- cv::Mat overlayFrame;
- frame_image.copyTo(overlayFrame);
+ cv::Rect rect = box.boundingRect();
+ cv::fillConvexPoly(overlayFrame, vertices, 4, cv::Scalar(color[2],color[1],color[0]), cv::LINE_AA);
+ // add opacity
+ cv::addWeighted(overlayFrame, 1-alpha, frame_image, alpha, 0, frame_image);
+ }
+ else{
+ cv::Mat overlayFrame;
+ frame_image.copyTo(overlayFrame);
- // Draw bounding box
- for (int i = 0; i < 4; i++)
- {
- cv::line(overlayFrame, vertices2f[i], vertices2f[(i+1)%4], cv::Scalar(color[2],color[1],color[0]),
- thickness, cv::LINE_AA);
- }
+ // Draw bounding box
+ for (int i = 0; i < 4; i++)
+ {
+ cv::line(overlayFrame, vertices2f[i], vertices2f[(i+1)%4], cv::Scalar(color[2],color[1],color[0]),
+ thickness, cv::LINE_AA);
+ }
- // add opacity
- cv::addWeighted(overlayFrame, 1-alpha, frame_image, alpha, 0, frame_image);
- }
+ // add opacity
+ cv::addWeighted(overlayFrame, 1-alpha, frame_image, alpha, 0, frame_image);
+ }
}
void ObjectDetection::drawPred(int classId, float conf, cv::Rect2d box, cv::Mat& frame, int objectNumber, std::vector color,
- float alpha, int thickness, bool is_background, bool display_text)
+ float alpha, int thickness, bool is_background, bool display_text)
{
- if(is_background){
- cv::Mat overlayFrame;
- frame.copyTo(overlayFrame);
+ if(is_background){
+ cv::Mat overlayFrame;
+ frame.copyTo(overlayFrame);
- //Draw a rectangle displaying the bounding box
- cv::rectangle(overlayFrame, box, cv::Scalar(color[2],color[1],color[0]), cv::FILLED);
+ //Draw a rectangle displaying the bounding box
+ cv::rectangle(overlayFrame, box, cv::Scalar(color[2],color[1],color[0]), cv::FILLED);
- // add opacity
- cv::addWeighted(overlayFrame, 1-alpha, frame, alpha, 0, frame);
- }
- else{
- cv::Mat overlayFrame;
- frame.copyTo(overlayFrame);
+ // add opacity
+ cv::addWeighted(overlayFrame, 1-alpha, frame, alpha, 0, frame);
+ }
+ else{
+ cv::Mat overlayFrame;
+ frame.copyTo(overlayFrame);
- //Draw a rectangle displaying the bounding box
- cv::rectangle(overlayFrame, box, cv::Scalar(color[2],color[1],color[0]), thickness);
+ //Draw a rectangle displaying the bounding box
+ cv::rectangle(overlayFrame, box, cv::Scalar(color[2],color[1],color[0]), thickness);
- if(display_text){
- //Get the label for the class name and its confidence
- std::string label = cv::format("%.2f", conf);
- if (!classNames.empty())
- {
- CV_Assert(classId < (int)classNames.size());
- label = classNames[classId] + ":" + label;
- }
+ if(display_text){
+ //Get the label for the class name and its confidence
+ std::string label = cv::format("%.2f", conf);
+ if (!classNames.empty())
+ {
+ CV_Assert(classId < (int)classNames.size());
+ label = classNames[classId] + ":" + label;
+ }
- //Display the label at the top of the bounding box
- int baseLine;
- cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
+ //Display the label at the top of the bounding box
+ int baseLine;
+ cv::Size labelSize = cv::getTextSize(label, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
- double left = box.x;
- double top = std::max((int)box.y, labelSize.height);
+ double left = box.x;
+ double top = std::max((int)box.y, labelSize.height);
- cv::rectangle(overlayFrame, cv::Point(left, top - round(1.025*labelSize.height)), cv::Point(left + round(1.025*labelSize.width), top + baseLine),
- cv::Scalar(color[2],color[1],color[0]), cv::FILLED);
- putText(overlayFrame, label, cv::Point(left+1, top), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0,0,0),1);
- }
- // add opacity
- cv::addWeighted(overlayFrame, 1-alpha, frame, alpha, 0, frame);
- }
+ cv::rectangle(overlayFrame, cv::Point(left, top - round(1.025*labelSize.height)), cv::Point(left + round(1.025*labelSize.width), top + baseLine),
+ cv::Scalar(color[2],color[1],color[0]), cv::FILLED);
+ putText(overlayFrame, label, cv::Point(left+1, top), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0,0,0),1);
+ }
+ // add opacity
+ cv::addWeighted(overlayFrame, 1-alpha, frame, alpha, 0, frame);
+ }
}
// Load protobuf data file
bool ObjectDetection::LoadObjDetectdData(std::string inputFilePath){
- // Create tracker message
- pb_objdetect::ObjDetect objMessage;
+ // Create tracker message
+ pb_objdetect::ObjDetect objMessage;
- // Read the existing tracker message.
- std::fstream input(inputFilePath, std::ios::in | std::ios::binary);
- if (!objMessage.ParseFromIstream(&input)) {
- std::cerr << "Failed to parse protobuf message." << std::endl;
- return false;
- }
+ // Read the existing tracker message.
+ std::fstream input(inputFilePath, std::ios::in | std::ios::binary);
+ if (!objMessage.ParseFromIstream(&input)) {
+ std::cerr << "Failed to parse protobuf message." << std::endl;
+ return false;
+ }
- // Make sure classNames, detectionsData and trackedObjects are empty
- classNames.clear();
- detectionsData.clear();
- trackedObjects.clear();
+ // Make sure classNames, detectionsData and trackedObjects are empty
+ classNames.clear();
+ detectionsData.clear();
+ trackedObjects.clear();
- // Seed to generate same random numbers
- std::srand(1);
- // Get all classes names and assign a color to them
- for(int i = 0; i < objMessage.classnames_size(); i++)
- {
- classNames.push_back(objMessage.classnames(i));
- classesColor.push_back(cv::Scalar(std::rand()%205 + 50, std::rand()%205 + 50, std::rand()%205 + 50));
- }
+ // Seed to generate same random numbers
+ std::srand(1);
+ // Get all classes names and assign a color to them
+ for(int i = 0; i < objMessage.classnames_size(); i++)
+ {
+ classNames.push_back(objMessage.classnames(i));
+ classesColor.push_back(cv::Scalar(std::rand()%205 + 50, std::rand()%205 + 50, std::rand()%205 + 50));
+ }
- // Iterate over all frames of the saved message
- for (size_t i = 0; i < objMessage.frame_size(); i++)
- {
- // Create protobuf message reader
- const pb_objdetect::Frame& pbFrameData = objMessage.frame(i);
+ // Iterate over all frames of the saved message
+ for (size_t i = 0; i < objMessage.frame_size(); i++)
+ {
+ // Create protobuf message reader
+ const pb_objdetect::Frame& pbFrameData = objMessage.frame(i);
- // Get frame Id
- size_t id = pbFrameData.id();
+ // Get frame Id
+ size_t id = pbFrameData.id();
- // Load bounding box data
- const google::protobuf::RepeatedPtrField &pBox = pbFrameData.bounding_box();
+ // Load bounding box data
+ const google::protobuf::RepeatedPtrField &pBox = pbFrameData.bounding_box();
- // Construct data vectors related to detections in the current frame
- std::vector classIds;
- std::vector confidences;
- std::vector> boxes;
- std::vector objectIds;
+ // Construct data vectors related to detections in the current frame
+ std::vector classIds;
+ std::vector confidences;
+ std::vector> boxes;
+ std::vector objectIds;
- // Iterate through the detected objects
- for(int i = 0; i < pbFrameData.bounding_box_size(); i++)
- {
- // Get bounding box coordinates
- float x = pBox.Get(i).x();
- float y = pBox.Get(i).y();
- float w = pBox.Get(i).w();
- float h = pBox.Get(i).h();
- // Get class Id (which will be assign to a class name)
- int classId = pBox.Get(i).classid();
- // Get prediction confidence
- float confidence = pBox.Get(i).confidence();
+ // Iterate through the detected objects
+ for(int i = 0; i < pbFrameData.bounding_box_size(); i++)
+ {
+ // Get bounding box coordinates
+ float x = pBox.Get(i).x();
+ float y = pBox.Get(i).y();
+ float w = pBox.Get(i).w();
+ float h = pBox.Get(i).h();
+ // Get class Id (which will be assign to a class name)
+ int classId = pBox.Get(i).classid();
+ // Get prediction confidence
+ float confidence = pBox.Get(i).confidence();
- // Get the object Id
- int objectId = pBox.Get(i).objectid();
+ // Get the object Id
+ int objectId = pBox.Get(i).objectid();
- // Search for the object id on trackedObjects map
- auto trackedObject = trackedObjects.find(objectId);
- // Check if object already exists on the map
- if (trackedObject != trackedObjects.end())
- {
- // Add a new BBox to it
- trackedObject->second->AddBox(id, x+(w/2), y+(h/2), w, h, 0.0);
- }
- else
- {
- // There is no tracked object with that id, so insert a new one
- TrackedObjectBBox trackedObj((int)classesColor[classId](0), (int)classesColor[classId](1), (int)classesColor[classId](2), (int)0);
- trackedObj.AddBox(id, x+(w/2), y+(h/2), w, h, 0.0);
+ // Search for the object id on trackedObjects map
+ auto trackedObject = trackedObjects.find(objectId);
+ // Check if object already exists on the map
+ if (trackedObject != trackedObjects.end())
+ {
+ // Add a new BBox to it
+ trackedObject->second->AddBox(id, x+(w/2), y+(h/2), w, h, 0.0);
+ }
+ else
+ {
+ // There is no tracked object with that id, so insert a new one
+ TrackedObjectBBox trackedObj((int)classesColor[classId](0), (int)classesColor[classId](1), (int)classesColor[classId](2), (int)0);
+ trackedObj.AddBox(id, x+(w/2), y+(h/2), w, h, 0.0);
- std::shared_ptr trackedObjPtr = std::make_shared(trackedObj);
- ClipBase* parentClip = this->ParentClip();
- trackedObjPtr->ParentClip(parentClip);
+ std::shared_ptr trackedObjPtr = std::make_shared(trackedObj);
+ ClipBase* parentClip = this->ParentClip();
+ trackedObjPtr->ParentClip(parentClip);
- // Create a temp ID. This ID is necessary to initialize the object_id Json list
- // this Id will be replaced by the one created in the UI
- trackedObjPtr->Id(std::to_string(objectId));
- trackedObjects.insert({objectId, trackedObjPtr});
- }
+ // Create a temp ID. This ID is necessary to initialize the object_id Json list
+ // this Id will be replaced by the one created in the UI
+ trackedObjPtr->Id(std::to_string(objectId));
+ trackedObjects.insert({objectId, trackedObjPtr});
+ }
- // Create OpenCV rectangle with the bouding box info
- cv::Rect_ box(x, y, w, h);
+ // Create OpenCV rectangle with the bouding box info
+ cv::Rect_ box(x, y, w, h);
- // Push back data into vectors
- boxes.push_back(box);
- classIds.push_back(classId);
- confidences.push_back(confidence);
- objectIds.push_back(objectId);
- }
+ // Push back data into vectors
+ boxes.push_back(box);
+ classIds.push_back(classId);
+ confidences.push_back(confidence);
+ objectIds.push_back(objectId);
+ }
- // Assign data to object detector map
- detectionsData[id] = DetectionData(classIds, confidences, boxes, id, objectIds);
- }
+ // Assign data to object detector map
+ detectionsData[id] = DetectionData(classIds, confidences, boxes, id, objectIds);
+ }
- // Delete all global objects allocated by libprotobuf.
- google::protobuf::ShutdownProtobufLibrary();
+ // Delete all global objects allocated by libprotobuf.
+ google::protobuf::ShutdownProtobufLibrary();
- return true;
+ return true;
}
// Get the indexes and IDs of all visible objects in the given frame
std::string ObjectDetection::GetVisibleObjects(int64_t frame_number) const{
- // Initialize the JSON objects
- Json::Value root;
- root["visible_objects_index"] = Json::Value(Json::arrayValue);
- root["visible_objects_id"] = Json::Value(Json::arrayValue);
+ // Initialize the JSON objects
+ Json::Value root;
+ root["visible_objects_index"] = Json::Value(Json::arrayValue);
+ root["visible_objects_id"] = Json::Value(Json::arrayValue);
- // Check if track data exists for the requested frame
- if (detectionsData.find(frame_number) == detectionsData.end()){
- return root.toStyledString();
- }
- DetectionData detections = detectionsData.at(frame_number);
+ // Check if track data exists for the requested frame
+ if (detectionsData.find(frame_number) == detectionsData.end()){
+ return root.toStyledString();
+ }
+ DetectionData detections = detectionsData.at(frame_number);
- // Iterate through the tracked objects
- for(int i = 0; i 0 &&
- std::find(display_classes.begin(), display_classes.end(), classNames[detections.classIds.at(i)]) == display_classes.end()){
- continue;
- }
+ // Just display selected classes
+ if( display_classes.size() > 0 &&
+ std::find(display_classes.begin(), display_classes.end(), classNames[detections.classIds.at(i)]) == display_classes.end()){
+ continue;
+ }
- int objectId = detections.objectIds.at(i);
- // Search for the object in the trackedObjects map
- auto trackedObject = trackedObjects.find(objectId);
+ int objectId = detections.objectIds.at(i);
+ // Search for the object in the trackedObjects map
+ auto trackedObject = trackedObjects.find(objectId);
- // Get the tracked object JSON properties for this frame
- Json::Value trackedObjectJSON = trackedObject->second->PropertiesJSON(frame_number);
+ // Get the tracked object JSON properties for this frame
+ Json::Value trackedObjectJSON = trackedObject->second->PropertiesJSON(frame_number);
- if (trackedObjectJSON["visible"]["value"].asBool() &&
- trackedObject->second->ExactlyContains(frame_number)){
- // Save the object's index and ID if it's visible in this frame
- root["visible_objects_index"].append(trackedObject->first);
- root["visible_objects_id"].append(trackedObject->second->Id());
- }
- }
+ if (trackedObjectJSON["visible"]["value"].asBool() &&
+ trackedObject->second->ExactlyContains(frame_number)){
+ // Save the object's index and ID if it's visible in this frame
+ root["visible_objects_index"].append(trackedObject->first);
+ root["visible_objects_id"].append(trackedObject->second->Id());
+ }
+ }
- return root.toStyledString();
+ return root.toStyledString();
}
// Generate JSON string of this object
std::string ObjectDetection::Json() const {
- // Return formatted string
- return JsonValue().toStyledString();
+ // Return formatted string
+ return JsonValue().toStyledString();
}
// Generate Json::Value for this object
Json::Value ObjectDetection::JsonValue() const {
- // Create root json object
- Json::Value root = EffectBase::JsonValue(); // get parent properties
- root["type"] = info.class_name;
- root["protobuf_data_path"] = protobuf_data_path;
- root["selected_object_index"] = selectedObjectIndex;
- root["confidence_threshold"] = confidence_threshold;
- root["display_box_text"] = display_box_text.JsonValue();
+ // Create root json object
+ Json::Value root = EffectBase::JsonValue(); // get parent properties
+ root["type"] = info.class_name;
+ root["protobuf_data_path"] = protobuf_data_path;
+ root["selected_object_index"] = selectedObjectIndex;
+ root["confidence_threshold"] = confidence_threshold;
+ root["display_box_text"] = display_box_text.JsonValue();
- // Add tracked object's IDs to root
- Json::Value objects;
- for (auto const& trackedObject : trackedObjects){
- Json::Value trackedObjectJSON = trackedObject.second->JsonValue();
- // add object json
- objects[trackedObject.second->Id()] = trackedObjectJSON;
- }
- root["objects"] = objects;
+ // Add tracked object's IDs to root
+ Json::Value objects;
+ for (auto const& trackedObject : trackedObjects){
+ Json::Value trackedObjectJSON = trackedObject.second->JsonValue();
+ // add object json
+ objects[trackedObject.second->Id()] = trackedObjectJSON;
+ }
+ root["objects"] = objects;
- // return JsonValue
- return root;
+ // return JsonValue
+ return root;
}
// Load JSON string into this object
void ObjectDetection::SetJson(const std::string value) {
- // Parse JSON string into JSON objects
- try
- {
- const Json::Value root = openshot::stringToJson(value);
- // Set all values that match
- SetJsonValue(root);
- }
- catch (const std::exception& e)
- {
- // Error parsing JSON (or missing keys)
- throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
- }
+ // Parse JSON string into JSON objects
+ try
+ {
+ const Json::Value root = openshot::stringToJson(value);
+ // Set all values that match
+ SetJsonValue(root);
+ }
+ catch (const std::exception& e)
+ {
+ // Error parsing JSON (or missing keys)
+ throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
+ }
}
// Load Json::Value into this object
void ObjectDetection::SetJsonValue(const Json::Value root) {
- // Set parent data
- EffectBase::SetJsonValue(root);
+ // Set parent data
+ EffectBase::SetJsonValue(root);
- // Set data from Json (if key is found)
- if (!root["protobuf_data_path"].isNull() && protobuf_data_path.size() <= 1){
- protobuf_data_path = root["protobuf_data_path"].asString();
+ // Set data from Json (if key is found)
+ if (!root["protobuf_data_path"].isNull() && protobuf_data_path.size() <= 1){
+ protobuf_data_path = root["protobuf_data_path"].asString();
- if(!LoadObjDetectdData(protobuf_data_path)){
- throw InvalidFile("Invalid protobuf data path", "");
- protobuf_data_path = "";
- }
- }
+ if(!LoadObjDetectdData(protobuf_data_path)){
+ throw InvalidFile("Invalid protobuf data path", "");
+ protobuf_data_path = "";
+ }
+ }
- // Set the selected object index
- if (!root["selected_object_index"].isNull())
- selectedObjectIndex = root["selected_object_index"].asInt();
+ // Set the selected object index
+ if (!root["selected_object_index"].isNull())
+ selectedObjectIndex = root["selected_object_index"].asInt();
- if (!root["confidence_threshold"].isNull())
- confidence_threshold = root["confidence_threshold"].asFloat();
+ if (!root["confidence_threshold"].isNull())
+ confidence_threshold = root["confidence_threshold"].asFloat();
- if (!root["display_box_text"].isNull())
- display_box_text.SetJsonValue(root["display_box_text"]);
+ if (!root["display_box_text"].isNull())
+ display_box_text.SetJsonValue(root["display_box_text"]);
- if (!root["class_filter"].isNull()){
- class_filter = root["class_filter"].asString();
- std::stringstream ss(class_filter);
- display_classes.clear();
- while( ss.good() )
- {
- // Parse comma separated string
- std::string substr;
- std::getline( ss, substr, ',' );
- display_classes.push_back( substr );
- }
- }
+ if (!root["class_filter"].isNull()){
+ class_filter = root["class_filter"].asString();
+ std::stringstream ss(class_filter);
+ display_classes.clear();
+ while( ss.good() )
+ {
+ // Parse comma separated string
+ std::string substr;
+ std::getline( ss, substr, ',' );
+ display_classes.push_back( substr );
+ }
+ }
- if (!root["objects"].isNull()){
- for (auto const& trackedObject : trackedObjects){
- std::string obj_id = std::to_string(trackedObject.first);
- if(!root["objects"][obj_id].isNull()){
- trackedObject.second->SetJsonValue(root["objects"][obj_id]);
- }
- }
- }
+ if (!root["objects"].isNull()){
+ for (auto const& trackedObject : trackedObjects){
+ std::string obj_id = std::to_string(trackedObject.first);
+ if(!root["objects"][obj_id].isNull()){
+ trackedObject.second->SetJsonValue(root["objects"][obj_id]);
+ }
+ }
+ }
- // Set the tracked object's ids
- if (!root["objects_id"].isNull()){
- for (auto const& trackedObject : trackedObjects){
- Json::Value trackedObjectJSON;
- trackedObjectJSON["box_id"] = root["objects_id"][trackedObject.first].asString();
- trackedObject.second->SetJsonValue(trackedObjectJSON);
- }
- }
+ // Set the tracked object's ids
+ if (!root["objects_id"].isNull()){
+ for (auto const& trackedObject : trackedObjects){
+ Json::Value trackedObjectJSON;
+ trackedObjectJSON["box_id"] = root["objects_id"][trackedObject.first].asString();
+ trackedObject.second->SetJsonValue(trackedObjectJSON);
+ }
+ }
}
// Get all properties for a specific frame
std::string ObjectDetection::PropertiesJSON(int64_t requested_frame) const {
- // Generate JSON properties list
- Json::Value root;
+ // Generate JSON properties list
+ Json::Value root = BasePropertiesJSON(requested_frame);
- Json::Value objects;
- if(trackedObjects.count(selectedObjectIndex) != 0){
- auto selectedObject = trackedObjects.at(selectedObjectIndex);
- if (selectedObject){
- Json::Value trackedObjectJSON = selectedObject->PropertiesJSON(requested_frame);
- // add object json
- objects[selectedObject->Id()] = trackedObjectJSON;
- }
- }
- root["objects"] = objects;
+ Json::Value objects;
+ if(trackedObjects.count(selectedObjectIndex) != 0){
+ auto selectedObject = trackedObjects.at(selectedObjectIndex);
+ if (selectedObject){
+ Json::Value trackedObjectJSON = selectedObject->PropertiesJSON(requested_frame);
+ // add object json
+ objects[selectedObject->Id()] = trackedObjectJSON;
+ }
+ }
+ root["objects"] = objects;
- root["selected_object_index"] = add_property_json("Selected Object", selectedObjectIndex, "int", "", NULL, 0, 200, false, requested_frame);
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
- root["confidence_threshold"] = add_property_json("Confidence Theshold", confidence_threshold, "float", "", NULL, 0, 1, false, requested_frame);
- root["class_filter"] = add_property_json("Class Filter", 0.0, "string", class_filter, NULL, -1, -1, false, requested_frame);
+ root["selected_object_index"] = add_property_json("Selected Object", selectedObjectIndex, "int", "", NULL, 0, 200, false, requested_frame);
+ root["confidence_threshold"] = add_property_json("Confidence Theshold", confidence_threshold, "float", "", NULL, 0, 1, false, requested_frame);
+ root["class_filter"] = add_property_json("Class Filter", 0.0, "string", class_filter, NULL, -1, -1, false, requested_frame);
- root["display_box_text"] = add_property_json("Draw Box Text", display_box_text.GetValue(requested_frame), "int", "", &display_box_text, 0, 1.0, false, requested_frame);
- root["display_box_text"]["choices"].append(add_property_choice_json("Off", 1, display_box_text.GetValue(requested_frame)));
- root["display_box_text"]["choices"].append(add_property_choice_json("On", 0, display_box_text.GetValue(requested_frame)));
+ root["display_box_text"] = add_property_json("Draw Box Text", display_box_text.GetValue(requested_frame), "int", "", &display_box_text, 0, 1.0, false, requested_frame);
+ root["display_box_text"]["choices"].append(add_property_choice_json("Off", 1, display_box_text.GetValue(requested_frame)));
+ root["display_box_text"]["choices"].append(add_property_choice_json("On", 0, display_box_text.GetValue(requested_frame)));
- // Return formatted string
- return root.toStyledString();
+ // Return formatted string
+ return root.toStyledString();
}
diff --git a/src/effects/Pixelate.cpp b/src/effects/Pixelate.cpp
index 80a57aa6..76cc3c39 100644
--- a/src/effects/Pixelate.cpp
+++ b/src/effects/Pixelate.cpp
@@ -152,13 +152,7 @@ void Pixelate::SetJsonValue(const Json::Value root) {
std::string Pixelate::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["pixelization"] = add_property_json("Pixelization", pixelization.GetValue(requested_frame), "float", "", &pixelization, 0.0, 0.9999, false, requested_frame);
@@ -167,9 +161,6 @@ std::string Pixelate::PropertiesJSON(int64_t requested_frame) const {
root["right"] = add_property_json("Right Margin", right.GetValue(requested_frame), "float", "", &right, 0.0, 1.0, false, requested_frame);
root["bottom"] = add_property_json("Bottom Margin", bottom.GetValue(requested_frame), "float", "", &bottom, 0.0, 1.0, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Saturation.cpp b/src/effects/Saturation.cpp
index 6733deec..2e06db9e 100644
--- a/src/effects/Saturation.cpp
+++ b/src/effects/Saturation.cpp
@@ -207,13 +207,7 @@ void Saturation::SetJsonValue(const Json::Value root) {
std::string Saturation::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["saturation"] = add_property_json("Saturation", saturation.GetValue(requested_frame), "float", "", &saturation, 0.0, 4.0, false, requested_frame);
@@ -221,9 +215,6 @@ std::string Saturation::PropertiesJSON(int64_t requested_frame) const {
root["saturation_G"] = add_property_json("Saturation (Green)", saturation_G.GetValue(requested_frame), "float", "", &saturation_G, 0.0, 4.0, false, requested_frame);
root["saturation_B"] = add_property_json("Saturation (Blue)", saturation_B.GetValue(requested_frame), "float", "", &saturation_B, 0.0, 4.0, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Shift.cpp b/src/effects/Shift.cpp
index 66b03eb9..e99570a2 100644
--- a/src/effects/Shift.cpp
+++ b/src/effects/Shift.cpp
@@ -169,21 +169,12 @@ void Shift::SetJsonValue(const Json::Value root) {
std::string Shift::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["x"] = add_property_json("X Shift", x.GetValue(requested_frame), "float", "", &x, -1, 1, false, requested_frame);
root["y"] = add_property_json("Y Shift", y.GetValue(requested_frame), "float", "", &y, -1, 1, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Stabilizer.cpp b/src/effects/Stabilizer.cpp
index f5cfb0e4..998730fb 100644
--- a/src/effects/Stabilizer.cpp
+++ b/src/effects/Stabilizer.cpp
@@ -28,10 +28,10 @@ using google::protobuf::util::TimeUtil;
/// Blank constructor, useful when using Json to load the effect properties
Stabilizer::Stabilizer(std::string clipStabilizedDataPath):protobuf_data_path(clipStabilizedDataPath)
{
- // Init effect properties
+ // Init effect properties
init_effect_details();
- // Tries to load the stabilization data from protobuf
- LoadStabilizedData(clipStabilizedDataPath);
+ // Tries to load the stabilization data from protobuf
+ LoadStabilizedData(clipStabilizedDataPath);
}
// Default constructor
@@ -104,51 +104,51 @@ std::shared_ptr Stabilizer::GetFrame(std::shared_ptr frame, int64_
// Load protobuf data file
bool Stabilizer::LoadStabilizedData(std::string inputFilePath){
- using std::ios;
- // Create stabilization message
- pb_stabilize::Stabilization stabilizationMessage;
+ using std::ios;
+ // Create stabilization message
+ pb_stabilize::Stabilization stabilizationMessage;
- // Read the existing tracker message.
- std::fstream input(inputFilePath, ios::in | ios::binary);
- if (!stabilizationMessage.ParseFromIstream(&input)) {
- std::cerr << "Failed to parse protobuf message." << std::endl;
- return false;
- }
+ // Read the existing tracker message.
+ std::fstream input(inputFilePath, ios::in | ios::binary);
+ if (!stabilizationMessage.ParseFromIstream(&input)) {
+ std::cerr << "Failed to parse protobuf message." << std::endl;
+ return false;
+ }
- // Make sure the data maps are empty
- transformationData.clear();
- trajectoryData.clear();
+ // Make sure the data maps are empty
+ transformationData.clear();
+ trajectoryData.clear();
- // Iterate over all frames of the saved message and assign to the data maps
- for (size_t i = 0; i < stabilizationMessage.frame_size(); i++) {
+ // Iterate over all frames of the saved message and assign to the data maps
+ for (size_t i = 0; i < stabilizationMessage.frame_size(); i++) {
// Create stabilization message
- const pb_stabilize::Frame& pbFrameData = stabilizationMessage.frame(i);
+ const pb_stabilize::Frame& pbFrameData = stabilizationMessage.frame(i);
- // Load frame number
- size_t id = pbFrameData.id();
+ // Load frame number
+ size_t id = pbFrameData.id();
- // Load camera trajectory data
- float x = pbFrameData.x();
- float y = pbFrameData.y();
- float a = pbFrameData.a();
+ // Load camera trajectory data
+ float x = pbFrameData.x();
+ float y = pbFrameData.y();
+ float a = pbFrameData.a();
- // Assign data to trajectory map
- trajectoryData[i] = EffectCamTrajectory(x,y,a);
+ // Assign data to trajectory map
+ trajectoryData[i] = EffectCamTrajectory(x,y,a);
- // Load transformation data
- float dx = pbFrameData.dx();
- float dy = pbFrameData.dy();
- float da = pbFrameData.da();
+ // Load transformation data
+ float dx = pbFrameData.dx();
+ float dy = pbFrameData.dy();
+ float da = pbFrameData.da();
- // Assing data to transformation map
- transformationData[id] = EffectTransformParam(dx,dy,da);
- }
+ // Assing data to transformation map
+ transformationData[id] = EffectTransformParam(dx,dy,da);
+ }
- // Delete all global objects allocated by libprotobuf.
- google::protobuf::ShutdownProtobufLibrary();
+ // Delete all global objects allocated by libprotobuf.
+ google::protobuf::ShutdownProtobufLibrary();
- return true;
+ return true;
}
@@ -213,19 +213,10 @@ void Stabilizer::SetJsonValue(const Json::Value root) {
std::string Stabilizer::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
root["zoom"] = add_property_json("Zoom", zoom.GetValue(requested_frame), "float", "", &zoom, 0.0, 2.0, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Tracker.cpp b/src/effects/Tracker.cpp
index 67269a3a..7a0a3df5 100644
--- a/src/effects/Tracker.cpp
+++ b/src/effects/Tracker.cpp
@@ -338,7 +338,7 @@ void Tracker::SetJsonValue(const Json::Value root) {
std::string Tracker::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Add trackedObject properties to JSON
Json::Value objects;
@@ -349,14 +349,6 @@ std::string Tracker::PropertiesJSON(int64_t requested_frame) const {
}
root["objects"] = objects;
- // Append effect's properties
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
-
// Return formatted string
return root.toStyledString();
}
diff --git a/src/effects/Wave.cpp b/src/effects/Wave.cpp
index 9287fe0b..286cb632 100644
--- a/src/effects/Wave.cpp
+++ b/src/effects/Wave.cpp
@@ -154,13 +154,7 @@ void Wave::SetJsonValue(const Json::Value root) {
std::string Wave::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
- Json::Value root;
- root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
- root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
- root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
- root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
+ Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["wavelength"] = add_property_json("Wave length", wavelength.GetValue(requested_frame), "float", "", &wavelength, 0.0, 3.0, false, requested_frame);
@@ -169,9 +163,6 @@ std::string Wave::PropertiesJSON(int64_t requested_frame) const {
root["shift_x"] = add_property_json("X Shift", shift_x.GetValue(requested_frame), "float", "", &shift_x, 0.0, 1000.0, false, requested_frame);
root["speed_y"] = add_property_json("Vertical speed", speed_y.GetValue(requested_frame), "float", "", &speed_y, 0.0, 300.0, false, requested_frame);
- // Set the parent effect which properties this effect will inherit
- root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
-
// Return formatted string
return root.toStyledString();
}