Merge pull request #932 from OpenShot/effect-sequencing

Allow Effects to be applied BEFORE or AFTER a clip's keyframes
This commit is contained in:
Jonathan Thomas
2023-06-11 10:31:39 -05:00
committed by GitHub
35 changed files with 834 additions and 974 deletions

View File

@@ -440,21 +440,18 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> backgroun
// Apply waveform image (if any)
apply_waveform(frame, background_frame);
// Apply local effects to the frame (if any)
apply_effects(frame);
// Apply effects BEFORE applying keyframes (if any local or global effects are used)
apply_effects(frame, background_frame, options, true);
// Apply global timeline effects (i.e. transitions & masks... if any)
if (timeline != NULL && options != NULL) {
if (options->is_top_clip) {
// Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
Timeline* timeline_instance = static_cast<Timeline*>(timeline);
frame = timeline_instance->apply_effects(frame, background_frame->number, Layer());
}
}
// Apply keyframe / transforms
// Apply keyframe / transforms to current clip image
apply_keyframes(frame, background_frame);
// Apply effects AFTER applying keyframes (if any local or global effects are used)
apply_effects(frame, background_frame, options, false);
// Apply background canvas (i.e. flatten this image onto previous layer image)
apply_background(frame, background_frame);
// Add final frame to cache
final_cache.Add(frame);
@@ -1202,16 +1199,41 @@ void Clip::RemoveEffect(EffectBase* effect)
final_cache.Clear();
}
// Apply background image to the current clip image (i.e. flatten this image onto previous layer)
void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
// Add background canvas
std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
QPainter painter(background_canvas.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
// Composite a new layer onto the image
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
painter.drawImage(0, 0, *frame->GetImage());
painter.end();
// Add new QImage to frame
frame->AddImage(background_canvas);
}
// Apply effects to the source frame (if any)
void Clip::apply_effects(std::shared_ptr<Frame> frame)
void Clip::apply_effects(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame, TimelineInfoStruct* options, bool before_keyframes)
{
// Find Effects at this position and layer
for (auto effect : effects)
{
// Apply the effect to this frame
frame = effect->GetFrame(frame, frame->number);
if (effect->info.apply_before_clip && before_keyframes) {
effect->GetFrame(frame, frame->number);
} else if (!effect->info.apply_before_clip && !before_keyframes) {
effect->GetFrame(frame, frame->number);
}
}
} // end effect loop
if (timeline != NULL && options != NULL) {
// Apply global timeline effects (i.e. transitions & masks... if any)
Timeline* timeline_instance = static_cast<Timeline*>(timeline);
options->is_before_clip_keyframes = before_keyframes;
timeline_instance->apply_effects(frame, background_frame->number, Layer(), options);
}
}
// Compare 2 floating point numbers for equality
@@ -1228,20 +1250,16 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<Frame>
return;
}
// Get image from clip
// Get image from clip, and create transparent background image
std::shared_ptr<QImage> source_image = frame->GetImage();
std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(background_frame->GetImage()->width(),
background_frame->GetImage()->height(),
QImage::Format_RGBA8888_Premultiplied);
background_canvas->fill(QColor(Qt::transparent));
// Get transform from clip's keyframes
QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
// Debug output
ZmqLogger::Instance()->AppendDebugMethod(
"Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)",
"frame->number", frame->number,
"background_canvas->width()", background_canvas->width(),
"background_canvas->height()", background_canvas->height());
// Load timeline's new frame image into a QPainter
QPainter painter(background_canvas.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);

View File

@@ -127,8 +127,11 @@ namespace openshot {
/// Adjust frame number minimum value
int64_t adjust_frame_number_minimum(int64_t frame_number);
/// Apply background image to the current clip image (i.e. flatten this image onto previous layer)
void apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame);
/// Apply effects to the source frame (if any)
void apply_effects(std::shared_ptr<openshot::Frame> frame);
void apply_effects(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame, TimelineInfoStruct* options, bool before_keyframes);
/// Apply keyframes to an openshot::Frame and use an existing background frame (if any)
void apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<Frame> background_frame);

View File

@@ -30,7 +30,6 @@ void EffectBase::InitEffectInfo()
End(0.0);
Order(0);
ParentClip(NULL);
parentEffect = NULL;
info.has_video = false;
@@ -39,6 +38,7 @@ void EffectBase::InitEffectInfo()
info.name = "";
info.description = "";
info.parent_effect_id = "";
info.apply_before_clip = true;
}
// Display file information
@@ -51,6 +51,8 @@ void EffectBase::DisplayInfo(std::ostream* out) {
*out << "--> Description: " << info.description << std::endl;
*out << "--> Has Video: " << info.has_video << std::endl;
*out << "--> Has Audio: " << info.has_audio << std::endl;
*out << "--> Apply Before Clip Keyframes: " << info.apply_before_clip << std::endl;
*out << "--> Order: " << order << std::endl;
*out << "----------------------------" << std::endl;
}
@@ -85,6 +87,7 @@ Json::Value EffectBase::JsonValue() const {
root["has_video"] = info.has_video;
root["has_audio"] = info.has_audio;
root["has_tracked_object"] = info.has_tracked_object;
root["apply_before_clip"] = info.apply_before_clip;
root["order"] = Order();
// return JsonValue
@@ -145,6 +148,9 @@ void EffectBase::SetJsonValue(const Json::Value root) {
if (!my_root["order"].isNull())
Order(my_root["order"].asInt());
if (!my_root["apply_before_clip"].isNull())
info.apply_before_clip = my_root["apply_before_clip"].asBool();
if (!my_root["parent_effect_id"].isNull()){
info.parent_effect_id = my_root["parent_effect_id"].asString();
if (info.parent_effect_id.size() > 0 && info.parent_effect_id != "" && parentEffect == NULL)
@@ -169,6 +175,28 @@ Json::Value EffectBase::JsonInfo() const {
return root;
}
// Get all properties for a specific frame
Json::Value EffectBase::BasePropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
// Add replace_image choices (dropdown style)
root["apply_before_clip"] = add_property_json("Apply Before Clip Keyframes", info.apply_before_clip, "int", "", NULL, 0, 1, false, requested_frame);
root["apply_before_clip"]["choices"].append(add_property_choice_json("Yes", true, info.apply_before_clip));
root["apply_before_clip"]["choices"].append(add_property_choice_json("No", false, info.apply_before_clip));
// Set the parent effect which properties this effect will inherit
root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
return root;
}
/// Parent clip object of this reader (which can be unparented and NULL)
openshot::ClipBase* EffectBase::ParentClip() {
return clip;

View File

@@ -40,6 +40,7 @@ namespace openshot
bool has_video; ///< Determines if this effect manipulates the image of a frame
bool has_audio; ///< Determines if this effect manipulates the audio of a frame
bool has_tracked_object; ///< Determines if this effect track objects through the clip
bool apply_before_clip; ///< Apply effect before we evaluate the clip's keyframes
};
/**
@@ -58,7 +59,6 @@ namespace openshot
openshot::ClipBase* clip; ///< Pointer to the parent clip instance (if any)
public:
/// Parent effect (which properties will set this effect properties)
EffectBase* parentEffect;
@@ -106,7 +106,11 @@ namespace openshot
return;
};
Json::Value JsonInfo() const; ///< Generate JSON object of meta data / info
/// Generate JSON object of meta data / info
Json::Value JsonInfo() const;
/// Generate JSON object of base properties (recommended to be used by all effects)
Json::Value BasePropertiesJSON(int64_t requested_frame) const;
/// Get the order that this effect should be executed.
int Order() const { return order; }

View File

@@ -523,7 +523,7 @@ double Timeline::calculate_time(int64_t number, Fraction rate)
}
// Apply effects to the source frame (if any)
std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer)
std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct* options)
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod(
@@ -541,14 +541,6 @@ std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int
bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod(
"Timeline::apply_effects (Does effect intersect)",
"effect->Position()", effect->Position(),
"does_effect_intersect", does_effect_intersect,
"timeline_frame_number", timeline_frame_number,
"layer", layer);
// Clip is visible
if (does_effect_intersect)
{
@@ -556,6 +548,12 @@ std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int
long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
if (!options->is_top_clip)
continue; // skip effect, if overlapped/covered by another clip on same layer
if (options->is_before_clip_keyframes != effect->info.apply_before_clip)
continue; // skip effect, if this filter does not match
// Debug output
ZmqLogger::Instance()->AppendDebugMethod(
"Timeline::apply_effects (Process Effect)",
@@ -615,6 +613,7 @@ void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, in
// Create timeline options (with details about this current frame request)
TimelineInfoStruct* options = new TimelineInfoStruct();
options->is_top_clip = is_top_clip;
options->is_before_clip_keyframes = true;
// Get the clip's frame, composited on top of the current timeline frame
std::shared_ptr<Frame> source_frame;

View File

@@ -68,15 +68,13 @@ namespace openshot {
/// the Clip with the highest end-frame number using std::max_element
struct CompareClipEndFrames {
bool operator()(const openshot::Clip* lhs, const openshot::Clip* rhs) {
return (lhs->Position() + lhs->Duration())
<= (rhs->Position() + rhs->Duration());
return (lhs->Position() + lhs->Duration()) <= (rhs->Position() + rhs->Duration());
}};
/// Like CompareClipEndFrames, but for effects
struct CompareEffectEndFrames {
bool operator()(const openshot::EffectBase* lhs, const openshot::EffectBase* rhs) {
return (lhs->Position() + lhs->Duration())
<= (rhs->Position() + rhs->Duration());
return (lhs->Position() + lhs->Duration()) <= (rhs->Position() + rhs->Duration());
}};
/**
@@ -231,7 +229,7 @@ namespace openshot {
/// @param convert_absolute_paths Should all paths be converted to absolute paths (relative to the location of projectPath)
Timeline(const std::string& projectPath, bool convert_absolute_paths);
virtual ~Timeline();
virtual ~Timeline();
/// Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
void AddTrackedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject);
@@ -240,9 +238,9 @@ namespace openshot {
/// Return the ID's of the tracked objects as a list of strings
std::list<std::string> GetTrackedObjectsIds() const;
/// Return the trackedObject's properties as a JSON string
#ifdef USE_OPENCV
#ifdef USE_OPENCV
std::string GetTrackedObjectValues(std::string id, int64_t frame_number) const;
#endif
#endif
/// @brief Add an openshot::Clip to the timeline
/// @param clip Add an openshot::Clip to the timeline. A clip can contain any type of Reader.
@@ -252,8 +250,8 @@ namespace openshot {
/// @param effect Add an effect to the timeline. An effect can modify the audio or video of an openshot::Frame.
void AddEffect(openshot::EffectBase* effect);
/// Apply global/timeline effects to the source frame (if any)
std::shared_ptr<openshot::Frame> apply_effects(std::shared_ptr<openshot::Frame> frame, int64_t timeline_frame_number, int layer);
/// Apply global/timeline effects to the source frame (if any)
std::shared_ptr<openshot::Frame> apply_effects(std::shared_ptr<openshot::Frame> frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct* options);
/// Apply the timeline's framerate and samplerate to all clips
void ApplyMapperToClips();
@@ -266,7 +264,7 @@ namespace openshot {
/// Clear all clips, effects, and frame mappers from timeline (and free memory)
void Clear();
/// Clear all cache for this timeline instance, including all clips' cache
/// @param deep If True, clear all FrameMappers and nested Readers (QtImageReader, FFmpegReader, etc...)
void ClearAllCache(bool deep=false);

View File

@@ -18,21 +18,22 @@
namespace openshot {
// Forward decl
class Clip;
// Forward decl
class Clip;
/**
* @brief This struct contains info about the current Timeline clip instance
*
* When the Timeline requests an openshot::Frame instance from a Clip, it passes
* this struct along, with some additional details from the Timeline, such as if this clip is
* above or below overlapping clips, etc... This info can help determine if a Clip should apply
* global effects from the Timeline, such as a global Transition/Mask effect.
*/
struct TimelineInfoStruct
{
bool is_top_clip; ///< Is clip on top (if overlapping another clip)
};
/**
* @brief This struct contains info about the current Timeline clip instance
*
* When the Timeline requests an openshot::Frame instance from a Clip, it passes
* this struct along, with some additional details from the Timeline, such as if this clip is
* above or below overlapping clips, etc... This info can help determine if a Clip should apply
* global effects from the Timeline, such as a global Transition/Mask effect.
*/
struct TimelineInfoStruct
{
bool is_top_clip; ///< Is clip on top (if overlapping another clip)
bool is_before_clip_keyframes; ///< Is this before clip keyframes are applied
};
/**
* @brief This class represents a timeline (used for building generic timeline implementations)

View File

@@ -19,11 +19,11 @@ using namespace openshot;
Compressor::Compressor() : Compressor::Compressor(-10, 1, 1, 1, 1, false) {}
Compressor::Compressor(Keyframe threshold, Keyframe ratio, Keyframe attack,
Keyframe release, Keyframe makeup_gain,
Keyframe bypass):
threshold(threshold), ratio(ratio), attack(attack),
release(release), makeup_gain(makeup_gain), bypass(bypass),
input_level(0.0), yl_prev(0.0)
Keyframe release, Keyframe makeup_gain,
Keyframe bypass):
threshold(threshold), ratio(ratio), attack(attack),
release(release), makeup_gain(makeup_gain), bypass(bypass),
input_level(0.0), yl_prev(0.0)
{
// Init effect properties
init_effect_details();
@@ -48,33 +48,33 @@ void Compressor::init_effect_details()
std::shared_ptr<openshot::Frame> Compressor::GetFrame(std::shared_ptr<openshot::Frame> frame, int64_t frame_number)
{
// Adding Compressor
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
mixed_down_input.setSize(1, num_samples);
mixed_down_input.setSize(1, num_samples);
inverse_sample_rate = 1.0f / frame->SampleRate();
inverseE = 1.0f / M_E;
inverseE = 1.0f / M_E;
if ((bool)bypass.GetValue(frame_number))
return frame;
return frame;
mixed_down_input.clear();
for (int channel = 0; channel < num_input_channels; ++channel)
mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
for (int sample = 0; sample < num_samples; ++sample) {
float T = threshold.GetValue(frame_number);
float R = ratio.GetValue(frame_number);
float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
float gain = makeup_gain.GetValue(frame_number);
for (int sample = 0; sample < num_samples; ++sample) {
float T = threshold.GetValue(frame_number);
float R = ratio.GetValue(frame_number);
float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
float gain = makeup_gain.GetValue(frame_number);
float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f);
input_level = input_squared;
xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
if (xg < T)
yg = xg;
@@ -88,17 +88,17 @@ std::shared_ptr<openshot::Frame> Compressor::GetFrame(std::shared_ptr<openshot::
else
yl = alphaR * yl_prev + (1.0f - alphaR) * xl;
control = powf (10.0f, (gain - yl) * 0.05f);
yl_prev = yl;
control = powf (10.0f, (gain - yl) * 0.05f);
yl_prev = yl;
for (int channel = 0; channel < num_input_channels; ++channel) {
float new_value = frame->audio->getSample(channel, sample)*control;
frame->audio->setSample(channel, sample, new_value);
}
for (int channel = 0; channel < num_input_channels; ++channel) {
float new_value = frame->audio->getSample(channel, sample)*control;
frame->audio->setSample(channel, sample, new_value);
}
}
for (int channel = num_input_channels; channel < num_output_channels; ++channel)
frame->audio->clear(channel, 0, num_samples);
for (int channel = num_input_channels; channel < num_output_channels; ++channel)
frame->audio->clear(channel, 0, num_samples);
// return the modified frame
return frame;
@@ -106,10 +106,10 @@ std::shared_ptr<openshot::Frame> Compressor::GetFrame(std::shared_ptr<openshot::
float Compressor::calculateAttackOrRelease(float value)
{
if (value == 0.0f)
return 0.0f;
else
return pow (inverseE, inverse_sample_rate / value);
if (value == 0.0f)
return 0.0f;
else
return pow (inverseE, inverse_sample_rate / value);
}
// Generate JSON string of this object
@@ -183,12 +183,7 @@ void Compressor::SetJsonValue(const Json::Value root) {
std::string Compressor::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 0, false, requested_frame);

View File

@@ -68,34 +68,34 @@ std::shared_ptr<openshot::Frame> Delay::GetFrame(std::shared_ptr<openshot::Frame
for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
{
float *channel_data = frame->audio->getWritePointer(channel);
float *delay_data = delay_buffer.getWritePointer(channel);
local_write_position = delay_write_position;
float *delay_data = delay_buffer.getWritePointer(channel);
local_write_position = delay_write_position;
for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
{
const float in = (float)(channel_data[sample]);
float out = 0.0f;
float out = 0.0f;
float read_position = fmodf((float)local_write_position - delay_time_value + (float)delay_buffer_samples, delay_buffer_samples);
int local_read_position = floorf(read_position);
float read_position = fmodf((float)local_write_position - delay_time_value + (float)delay_buffer_samples, delay_buffer_samples);
int local_read_position = floorf(read_position);
if (local_read_position != local_write_position)
if (local_read_position != local_write_position)
{
float fraction = read_position - (float)local_read_position;
float delayed1 = delay_data[(local_read_position + 0)];
float delayed2 = delay_data[(local_read_position + 1) % delay_buffer_samples];
out = (float)(delayed1 + fraction * (delayed2 - delayed1));
float fraction = read_position - (float)local_read_position;
float delayed1 = delay_data[(local_read_position + 0)];
float delayed2 = delay_data[(local_read_position + 1) % delay_buffer_samples];
out = (float)(delayed1 + fraction * (delayed2 - delayed1));
channel_data[sample] = in + (out - in);
channel_data[sample] = in + (out - in);
delay_data[local_write_position] = in;
}
}
if (++local_write_position >= delay_buffer_samples)
local_write_position -= delay_buffer_samples;
if (++local_write_position >= delay_buffer_samples)
local_write_position -= delay_buffer_samples;
}
}
delay_write_position = local_write_position;
delay_write_position = local_write_position;
// return the modified frame
return frame;
@@ -152,12 +152,7 @@ void Delay::SetJsonValue(const Json::Value root) {
std::string Delay::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["delay_time"] = add_property_json("Delay Time", delay_time.GetValue(requested_frame), "float", "", &delay_time, 0, 5, false, requested_frame);

View File

@@ -18,10 +18,10 @@ using namespace openshot;
Distortion::Distortion(): Distortion::Distortion(HARD_CLIPPING, 10, -10, 5) { }
Distortion::Distortion(openshot::DistortionType distortion_type,
Keyframe input_gain, Keyframe output_gain,
Keyframe tone):
distortion_type(distortion_type), input_gain(input_gain),
output_gain(output_gain), tone(tone)
Keyframe input_gain, Keyframe output_gain,
Keyframe tone):
distortion_type(distortion_type), input_gain(input_gain),
output_gain(output_gain), tone(tone)
{
// Init effect properties
init_effect_details();
@@ -48,12 +48,12 @@ std::shared_ptr<openshot::Frame> Distortion::GetFrame(std::shared_ptr<openshot::
{
filters.clear();
for (int i = 0; i < frame->audio->getNumChannels(); ++i) {
Filter* filter;
filters.add (filter = new Filter());
}
for (int i = 0; i < frame->audio->getNumChannels(); ++i) {
Filter* filter;
filters.add (filter = new Filter());
}
updateFilters(frame_number);
updateFilters(frame_number);
// Add distortion
for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
@@ -73,53 +73,53 @@ std::shared_ptr<openshot::Frame> Distortion::GetFrame(std::shared_ptr<openshot::
case HARD_CLIPPING: {
float threshold = 0.5f;
if (in > threshold)
out = threshold;
else if (in < -threshold)
out = -threshold;
else
out = in;
break;
}
if (in > threshold)
out = threshold;
else if (in < -threshold)
out = -threshold;
else
out = in;
break;
}
case SOFT_CLIPPING: {
float threshold1 = 1.0f / 3.0f;
float threshold2 = 2.0f / 3.0f;
if (in > threshold2)
out = 1.0f;
else if (in > threshold1)
out = 1.0f - powf (2.0f - 3.0f * in, 2.0f) / 3.0f;
else if (in < -threshold2)
out = -1.0f;
else if (in < -threshold1)
out = -1.0f + powf (2.0f + 3.0f * in, 2.0f) / 3.0f;
else
out = 2.0f * in;
out *= 0.5f;
break;
}
case SOFT_CLIPPING: {
float threshold1 = 1.0f / 3.0f;
float threshold2 = 2.0f / 3.0f;
if (in > threshold2)
out = 1.0f;
else if (in > threshold1)
out = 1.0f - powf (2.0f - 3.0f * in, 2.0f) / 3.0f;
else if (in < -threshold2)
out = -1.0f;
else if (in < -threshold1)
out = -1.0f + powf (2.0f + 3.0f * in, 2.0f) / 3.0f;
else
out = 2.0f * in;
out *= 0.5f;
break;
}
case EXPONENTIAL: {
if (in > 0.0f)
out = 1.0f - expf (-in);
else
out = -1.0f + expf (in);
break;
}
case EXPONENTIAL: {
if (in > 0.0f)
out = 1.0f - expf (-in);
else
out = -1.0f + expf (in);
break;
}
case FULL_WAVE_RECTIFIER: {
out = fabsf (in);
break;
}
case FULL_WAVE_RECTIFIER: {
out = fabsf (in);
break;
}
case HALF_WAVE_RECTIFIER: {
if (in > 0.0f)
out = in;
else
out = 0.0f;
break;
}
}
case HALF_WAVE_RECTIFIER: {
if (in > 0.0f)
out = in;
else
out = 0.0f;
break;
}
}
float filtered = filters[channel]->processSingleSampleRaw(out);
channel_data[sample] = filtered*powf(10.0f, output_gain_value * 0.05f);
@@ -132,11 +132,11 @@ std::shared_ptr<openshot::Frame> Distortion::GetFrame(std::shared_ptr<openshot::
void Distortion::updateFilters(int64_t frame_number)
{
double discrete_frequency = M_PI * 0.01;
double gain = pow(10.0, (float)tone.GetValue(frame_number) * 0.05);
double discrete_frequency = M_PI * 0.01;
double gain = pow(10.0, (float)tone.GetValue(frame_number) * 0.05);
for (int i = 0; i < filters.size(); ++i)
filters[i]->updateCoefficients(discrete_frequency, gain);
for (int i = 0; i < filters.size(); ++i)
filters[i]->updateCoefficients(discrete_frequency, gain);
}
// Generate JSON string of this object
@@ -216,12 +216,7 @@ void Distortion::SetJsonValue(const Json::Value root) {
std::string Distortion::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["distortion_type"] = add_property_json("Distortion Type", distortion_type, "int", "", NULL, 0, 3, false, requested_frame);

View File

@@ -19,7 +19,7 @@ using namespace openshot;
Echo::Echo() : Echo::Echo(0.1, 0.5, 0.5) { }
Echo::Echo(Keyframe echo_time, Keyframe feedback, Keyframe mix) :
echo_time(echo_time), feedback(feedback), mix(mix)
echo_time(echo_time), feedback(feedback), mix(mix)
{
// Init effect properties
init_effect_details();
@@ -72,33 +72,33 @@ std::shared_ptr<openshot::Frame> Echo::GetFrame(std::shared_ptr<openshot::Frame>
for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
{
float *channel_data = frame->audio->getWritePointer(channel);
float *echo_data = echo_buffer.getWritePointer(channel);
local_write_position = echo_write_position;
float *echo_data = echo_buffer.getWritePointer(channel);
local_write_position = echo_write_position;
for (auto sample = 0; sample < frame->audio->getNumSamples(); ++sample)
{
const float in = (float)(channel_data[sample]);
float out = 0.0f;
float out = 0.0f;
float read_position = fmodf((float)local_write_position - echo_time_value + (float)echo_buffer_samples, echo_buffer_samples);
int local_read_position = floorf(read_position);
float read_position = fmodf((float)local_write_position - echo_time_value + (float)echo_buffer_samples, echo_buffer_samples);
int local_read_position = floorf(read_position);
if (local_read_position != local_write_position)
if (local_read_position != local_write_position)
{
float fraction = read_position - (float)local_read_position;
float echoed1 = echo_data[(local_read_position + 0)];
float echoed2 = echo_data[(local_read_position + 1) % echo_buffer_samples];
out = (float)(echoed1 + fraction * (echoed2 - echoed1));
channel_data[sample] = in + mix_value*(out - in);
float fraction = read_position - (float)local_read_position;
float echoed1 = echo_data[(local_read_position + 0)];
float echoed2 = echo_data[(local_read_position + 1) % echo_buffer_samples];
out = (float)(echoed1 + fraction * (echoed2 - echoed1));
channel_data[sample] = in + mix_value*(out - in);
echo_data[local_write_position] = in + out*feedback_value;
}
}
if (++local_write_position >= echo_buffer_samples)
local_write_position -= echo_buffer_samples;
if (++local_write_position >= echo_buffer_samples)
local_write_position -= echo_buffer_samples;
}
}
echo_write_position = local_write_position;
echo_write_position = local_write_position;
// return the modified frame
return frame;
@@ -161,12 +161,7 @@ void Echo::SetJsonValue(const Json::Value root) {
std::string Echo::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["echo_time"] = add_property_json("Time", echo_time.GetValue(requested_frame), "float", "", &echo_time, 0, 5, false, requested_frame);

View File

@@ -20,9 +20,9 @@ Expander::Expander(): Expander::Expander(-10, 1, 1, 1, 1, false) { }
// Default constructor
Expander::Expander(Keyframe threshold, Keyframe ratio, Keyframe attack,
Keyframe release, Keyframe makeup_gain, Keyframe bypass) :
threshold(threshold), ratio(ratio), attack(attack),
release(release), makeup_gain(makeup_gain), bypass(bypass)
Keyframe release, Keyframe makeup_gain, Keyframe bypass) :
threshold(threshold), ratio(ratio), attack(attack),
release(release), makeup_gain(makeup_gain), bypass(bypass)
{
// Init effect properties
init_effect_details();
@@ -41,8 +41,8 @@ void Expander::init_effect_details()
info.has_audio = true;
info.has_video = false;
input_level = 0.0f;
yl_prev = 0.0f;
input_level = 0.0f;
yl_prev = 0.0f;
}
@@ -52,34 +52,34 @@ void Expander::init_effect_details()
std::shared_ptr<openshot::Frame> Expander::GetFrame(std::shared_ptr<openshot::Frame> frame, int64_t frame_number)
{
// Adding Expander
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
mixed_down_input.setSize(1, num_samples);
mixed_down_input.setSize(1, num_samples);
inverse_sample_rate = 1.0f / frame->SampleRate();
inverseE = 1.0f / M_E;
inverseE = 1.0f / M_E;
if ((bool)bypass.GetValue(frame_number))
return frame;
return frame;
mixed_down_input.clear();
for (int channel = 0; channel < num_input_channels; ++channel)
mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
mixed_down_input.addFrom(0, 0, *frame->audio, channel, 0, num_samples, 1.0f / num_input_channels);
for (int sample = 0; sample < num_samples; ++sample) {
float T = threshold.GetValue(frame_number);
float R = ratio.GetValue(frame_number);
float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
float gain = makeup_gain.GetValue(frame_number);
for (int sample = 0; sample < num_samples; ++sample) {
float T = threshold.GetValue(frame_number);
float R = ratio.GetValue(frame_number);
float alphaA = calculateAttackOrRelease(attack.GetValue(frame_number));
float alphaR = calculateAttackOrRelease(release.GetValue(frame_number));
float gain = makeup_gain.GetValue(frame_number);
float input_squared = powf(mixed_down_input.getSample(0, sample), 2.0f);
const float average_factor = 0.9999f;
input_level = average_factor * input_level + (1.0f - average_factor) * input_squared;
xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
xg = (input_level <= 1e-6f) ? -60.0f : 10.0f * log10f(input_level);
if (xg > T)
yg = xg;
@@ -94,17 +94,17 @@ std::shared_ptr<openshot::Frame> Expander::GetFrame(std::shared_ptr<openshot::Fr
yl = alphaR * yl_prev + (1.0f - alphaR) * xl;
control = powf (10.0f, (gain - yl) * 0.05f);
yl_prev = yl;
control = powf (10.0f, (gain - yl) * 0.05f);
yl_prev = yl;
for (int channel = 0; channel < num_input_channels; ++channel) {
float new_value = frame->audio->getSample(channel, sample)*control;
frame->audio->setSample(channel, sample, new_value);
}
for (int channel = 0; channel < num_input_channels; ++channel) {
float new_value = frame->audio->getSample(channel, sample)*control;
frame->audio->setSample(channel, sample, new_value);
}
}
for (int channel = num_input_channels; channel < num_output_channels; ++channel)
frame->audio->clear(channel, 0, num_samples);
for (int channel = num_input_channels; channel < num_output_channels; ++channel)
frame->audio->clear(channel, 0, num_samples);
// return the modified frame
return frame;
@@ -112,10 +112,10 @@ std::shared_ptr<openshot::Frame> Expander::GetFrame(std::shared_ptr<openshot::Fr
float Expander::calculateAttackOrRelease(float value)
{
if (value == 0.0f)
return 0.0f;
else
return pow (inverseE, inverse_sample_rate / value);
if (value == 0.0f)
return 0.0f;
else
return pow (inverseE, inverse_sample_rate / value);
}
// Generate JSON string of this object
@@ -189,12 +189,7 @@ void Expander::SetJsonValue(const Json::Value root) {
std::string Expander::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["threshold"] = add_property_json("Threshold (dB)", threshold.GetValue(requested_frame), "float", "", &threshold, -60, 0, false, requested_frame);

View File

@@ -115,12 +115,7 @@ void Noise::SetJsonValue(const Json::Value root) {
std::string Noise::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["level"] = add_property_json("Level", level.GetValue(requested_frame), "int", "", &level, 0, 100, false, requested_frame);

View File

@@ -19,9 +19,9 @@ using namespace juce;
ParametricEQ::ParametricEQ(): ParametricEQ::ParametricEQ(LOW_PASS, 500, 0, 0) {}
ParametricEQ::ParametricEQ(openshot::FilterType filter_type,
Keyframe frequency, Keyframe gain, Keyframe q_factor) :
filter_type(filter_type),
frequency(frequency), gain(gain), q_factor(q_factor)
Keyframe frequency, Keyframe gain, Keyframe q_factor) :
filter_type(filter_type),
frequency(frequency), gain(gain), q_factor(q_factor)
{
// Init effect properties
init_effect_details();
@@ -59,9 +59,9 @@ std::shared_ptr<openshot::Frame> ParametricEQ::GetFrame(std::shared_ptr<openshot
}
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
updateFilters(frame_number, num_samples);
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
updateFilters(frame_number, num_samples);
for (int channel = 0; channel < frame->audio->getNumChannels(); channel++)
{
@@ -69,9 +69,9 @@ std::shared_ptr<openshot::Frame> ParametricEQ::GetFrame(std::shared_ptr<openshot
filters[channel]->processSamples(channel_data, num_samples);
}
for (int channel = num_input_channels; channel < num_output_channels; ++channel)
for (int channel = num_input_channels; channel < num_output_channels; ++channel)
{
frame->audio->clear(channel, 0, num_samples);
frame->audio->clear(channel, 0, num_samples);
}
// return the modified frame
@@ -161,12 +161,12 @@ void ParametricEQ::Filter::updateCoefficients (
void ParametricEQ::updateFilters(int64_t frame_number, double sample_rate)
{
double discrete_frequency = 2.0 * M_PI * (double)frequency.GetValue(frame_number) / sample_rate;
double discrete_frequency = 2.0 * M_PI * (double)frequency.GetValue(frame_number) / sample_rate;
double q_value = (double)q_factor.GetValue(frame_number);
double gain_value = pow(10.0, (double)gain.GetValue(frame_number) * 0.05);
int filter_type_value = (int)filter_type;
for (int i = 0; i < filters.size(); ++i)
for (int i = 0; i < filters.size(); ++i)
filters[i]->updateCoefficients(discrete_frequency, q_value, gain_value, filter_type_value);
}
@@ -233,12 +233,7 @@ void ParametricEQ::SetJsonValue(const Json::Value root) {
std::string ParametricEQ::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["filter_type"] = add_property_json("Filter Type", filter_type, "int", "", NULL, 0, 3, false, requested_frame);

View File

@@ -18,13 +18,13 @@ using namespace openshot;
using namespace juce;
Robotization::Robotization()
: Robotization::Robotization(FFT_SIZE_512, HOP_SIZE_2, RECTANGULAR) {}
: Robotization::Robotization(FFT_SIZE_512, HOP_SIZE_2, RECTANGULAR) {}
Robotization::Robotization(openshot::FFTSize fft_size,
openshot::HopSize hop_size,
openshot::WindowType window_type) :
fft_size(fft_size), hop_size(hop_size),
window_type(window_type), stft(*this)
openshot::HopSize hop_size,
openshot::WindowType window_type) :
fft_size(fft_size), hop_size(hop_size),
window_type(window_type), stft(*this)
{
// Init effect properties
init_effect_details();
@@ -49,20 +49,20 @@ void Robotization::init_effect_details()
std::shared_ptr<openshot::Frame> Robotization::GetFrame(std::shared_ptr<openshot::Frame> frame, int64_t frame_number)
{
const std::lock_guard<std::recursive_mutex> lock(mutex);
ScopedNoDenormals noDenormals;
ScopedNoDenormals noDenormals;
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
const int hop_size_value = 1 << ((int)hop_size + 1);
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
const int hop_size_value = 1 << ((int)hop_size + 1);
const int fft_size_value = 1 << ((int)fft_size + 5);
stft.setup(num_output_channels);
stft.updateParameters((int)fft_size_value,
(int)hop_size_value,
(int)window_type);
stft.setup(num_output_channels);
stft.updateParameters((int)fft_size_value,
(int)hop_size_value,
(int)window_type);
stft.process(*frame->audio);
stft.process(*frame->audio);
// return the modified frame
return frame;
@@ -139,12 +139,7 @@ void Robotization::SetJsonValue(const Json::Value root) {
std::string Robotization::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame);

View File

@@ -18,13 +18,13 @@ using namespace openshot;
using namespace juce;
Whisperization::Whisperization():
Whisperization::Whisperization(FFT_SIZE_512, HOP_SIZE_8, RECTANGULAR) {}
Whisperization::Whisperization(FFT_SIZE_512, HOP_SIZE_8, RECTANGULAR) {}
Whisperization::Whisperization(openshot::FFTSize fft_size,
openshot::HopSize hop_size,
openshot::WindowType window_type) :
fft_size(fft_size), hop_size(hop_size),
window_type(window_type), stft(*this)
openshot::HopSize hop_size,
openshot::WindowType window_type) :
fft_size(fft_size), hop_size(hop_size),
window_type(window_type), stft(*this)
{
// Init effect properties
init_effect_details();
@@ -48,21 +48,21 @@ void Whisperization::init_effect_details()
// modified openshot::Frame object
std::shared_ptr<openshot::Frame> Whisperization::GetFrame(std::shared_ptr<openshot::Frame> frame, int64_t frame_number)
{
const std::lock_guard<std::recursive_mutex> lock(mutex);
ScopedNoDenormals noDenormals;
const std::lock_guard<std::recursive_mutex> lock(mutex);
ScopedNoDenormals noDenormals;
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
const int hop_size_value = 1 << ((int)hop_size + 1);
const int num_input_channels = frame->audio->getNumChannels();
const int num_output_channels = frame->audio->getNumChannels();
const int num_samples = frame->audio->getNumSamples();
const int hop_size_value = 1 << ((int)hop_size + 1);
const int fft_size_value = 1 << ((int)fft_size + 5);
stft.setup(num_output_channels);
stft.updateParameters((int)fft_size_value,
(int)hop_size_value,
(int)window_type);
stft.setup(num_output_channels);
stft.updateParameters((int)fft_size_value,
(int)hop_size_value,
(int)window_type);
stft.process(*frame->audio);
stft.process(*frame->audio);
// return the modified frame
return frame;
@@ -147,12 +147,7 @@ void Whisperization::SetJsonValue(const Json::Value root) {
std::string Whisperization::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["fft_size"] = add_property_json("FFT Size", fft_size, "int", "", NULL, 0, 8, false, requested_frame);

View File

@@ -160,13 +160,7 @@ void Bars::SetJsonValue(const Json::Value root) {
std::string Bars::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["color"] = add_property_json("Bar Color", 0.0, "color", "", &color.red, 0, 255, false, requested_frame);
@@ -178,9 +172,6 @@ std::string Bars::PropertiesJSON(int64_t requested_frame) const {
root["right"] = add_property_json("Right Size", right.GetValue(requested_frame), "float", "", &right, 0.0, 0.5, false, requested_frame);
root["bottom"] = add_property_json("Bottom Size", bottom.GetValue(requested_frame), "float", "", &bottom, 0.0, 0.5, false, requested_frame);
// Set the parent effect which properties this effect will inherit
root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
// Return formatted string
return root.toStyledString();
}

View File

@@ -211,13 +211,7 @@ void Blur::SetJsonValue(const Json::Value root) {
std::string Blur::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["horizontal_radius"] = add_property_json("Horizontal Radius", horizontal_radius.GetValue(requested_frame), "float", "", &horizontal_radius, 0, 100, false, requested_frame);
@@ -225,9 +219,6 @@ std::string Blur::PropertiesJSON(int64_t requested_frame) const {
root["sigma"] = add_property_json("Sigma", sigma.GetValue(requested_frame), "float", "", &sigma, 0, 100, false, requested_frame);
root["iterations"] = add_property_json("Iterations", iterations.GetValue(requested_frame), "float", "", &iterations, 0, 100, false, requested_frame);
// Set the parent effect which properties this effect will inherit
root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
// Return formatted string
return root.toStyledString();
}

View File

@@ -146,21 +146,12 @@ void Brightness::SetJsonValue(const Json::Value root) {
std::string Brightness::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["brightness"] = add_property_json("Brightness", brightness.GetValue(requested_frame), "float", "", &brightness, -1.0, 1.0, false, requested_frame);
root["contrast"] = add_property_json("Contrast", contrast.GetValue(requested_frame), "float", "", &contrast, -128, 128.0, false, requested_frame);
// Set the parent effect which properties this effect will inherit
root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
// Return formatted string
return root.toStyledString();
}

View File

@@ -454,13 +454,7 @@ void Caption::SetJsonValue(const Json::Value root) {
std::string Caption::PropertiesJSON(int64_t requested_frame) const {
// Generate JSON properties list
Json::Value root;
root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 1000 * 60 * 30, false, requested_frame);
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 1000 * 60 * 30, true, requested_frame);
Json::Value root = BasePropertiesJSON(requested_frame);
// Keyframes
root["color"] = add_property_json("Color", 0.0, "color", "", &color.red, 0, 255, false, requested_frame);
@@ -490,9 +484,6 @@ std::string Caption::PropertiesJSON(int64_t requested_frame) const {
root["caption_text"] = add_property_json("Captions", 0.0, "caption", caption_text, NULL, -1, -1, false, requested_frame);
root["caption_font"] = add_property_json("Font", 0.0, "font", font_name, NULL, -1, -1, false, requested_frame);
// Set the parent effect which properties this effect will inherit
root["parent_effect_id"] = add_property_json("Parent", 0.0, "string", info.parent_effect_id, NULL, -1, -1, false, requested_frame);
// Return formatted string
return root.toStyledString();
}

Some files were not shown because too many files have changed in this diff Show More