Large refactor of OpenMP integration (or rather the removal of OpenMP on the Timeline and FFmpeg-related classes). The logic behind this decision, was based on profiling libopenshot and the amount of wasted CPU idle time on all the various threads. The slow code is still synchronous, and all the threads must wait on each other, adding additional overhead. So, removing lots of unneeded threads, and simplifying the underlying Timeline->Clip->FFmpegReader flow. Also, removed 2 calls to QPainter::drawImage, by improving the flexibility of Clip->GetFrame.

This commit is contained in:
Jonathan Thomas
2021-02-17 19:44:44 -06:00
parent 15695e3c0c
commit 3daa5bdb7b
15 changed files with 1006 additions and 1211 deletions

2
.gitignore vendored
View File

@@ -6,6 +6,6 @@
.project
.cproject
/.metadata/
cmake-build-debug/*
tags
*~

View File

@@ -39,51 +39,52 @@ using namespace openshot;
int main(int argc, char* argv[]) {
Settings *s = Settings::Instance();
s->HARDWARE_DECODER = 2; // 1 VA-API, 2 NVDEC, 6 VDPAU
s->HW_DE_DEVICE_SET = 0;
// Types for storing time durations in whole and fractional milliseconds
using ms = std::chrono::milliseconds;
using s = std::chrono::seconds;
using double_ms = std::chrono::duration<double, ms::period>;
std::string input_filepath = TEST_MEDIA_PATH;
input_filepath += "sintel_trailer-720p.mp4";
// Track total time
const auto total_time = double_ms(0.0);
FFmpegReader r9(input_filepath);
// FFmpeg Reader performance test
const auto total_1 = std::chrono::high_resolution_clock::now();
FFmpegReader r9("/home/jonathan/Videos/sintel_trailer-1080p.mp4");
r9.Open();
r9.DisplayInfo();
/* WRITER ---------------- */
FFmpegWriter w9("metadata.mp4");
// Set options
w9.SetAudioOptions(true, "libmp3lame", r9.info.sample_rate, r9.info.channels, r9.info.channel_layout, 128000);
w9.SetVideoOptions(true, "libx264", r9.info.fps, 1024, 576, Fraction(1,1), false, false, 3000000);
w9.info.metadata["title"] = "testtest";
w9.info.metadata["artist"] = "aaa";
w9.info.metadata["album"] = "bbb";
w9.info.metadata["year"] = "2015";
w9.info.metadata["description"] = "ddd";
w9.info.metadata["comment"] = "eee";
w9.info.metadata["comment"] = "comment";
w9.info.metadata["copyright"] = "copyright OpenShot!";
// Open writer
w9.Open();
for (long int frame = 1; frame <= 100; frame++)
for (long int frame = 1; frame <= 1000; frame++)
{
//int frame_number = (rand() % 750) + 1;
int frame_number = frame;
std::shared_ptr<Frame> f = r9.GetFrame(frame_number);
w9.WriteFrame(f);
const auto time1 = std::chrono::high_resolution_clock::now();
std::shared_ptr<Frame> f = r9.GetFrame(frame);
const auto time2 = std::chrono::high_resolution_clock::now();
std::cout << "FFmpegReader: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl;
}
// Close writer & reader
w9.Close();
// Close timeline
const auto total_2 = std::chrono::high_resolution_clock::now();
auto total_sec = std::chrono::duration_cast<ms>(total_2 - total_1);
std::cout << "FFmpegReader TOTAL: " << total_sec.count() << " ms" << std::endl;
r9.Close();
std::cout << "Completed successfully!" << std::endl;
// Timeline Reader performance test
Timeline tm(r9.info.width, r9.info.height, r9.info.fps, r9.info.sample_rate, r9.info.channels, r9.info.channel_layout);
Clip *c = new Clip(&r9);
tm.AddClip(c);
tm.Open();
const auto total_3 = std::chrono::high_resolution_clock::now();
for (long int frame = 1; frame <= 1000; frame++)
{
const auto time1 = std::chrono::high_resolution_clock::now();
std::shared_ptr<Frame> f = tm.GetFrame(frame);
const auto time2 = std::chrono::high_resolution_clock::now();
std::cout << "Timeline: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl;
}
const auto total_4 = std::chrono::high_resolution_clock::now();
total_sec = std::chrono::duration_cast<ms>(total_4 - total_3);
std::cout << "Timeline TOTAL: " << total_sec.count() << " ms" << std::endl;
tm.Close();
std::cout << "Completed successfully!" << std::endl;
return 0;
}

View File

@@ -346,7 +346,7 @@ std::shared_ptr<Frame> Clip::GetFrame(int64_t frame_number)
}
// Use an existing openshot::Frame object and draw this Clip's frame onto it
std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, int64_t frame_number)
std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number)
{
// Check for open reader (or throw exception)
if (!is_open)
@@ -376,7 +376,7 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, in
int enabled_video = has_video.GetInt(frame_number);
if (enabled_video == -1 && reader && reader->info.has_video)
enabled_video = 1;
else if (enabled_video == -1 && reader && !reader->info.has_audio)
else if (enabled_video == -1 && reader && !reader->info.has_video)
enabled_video = 0;
// Is a time map detected
@@ -386,27 +386,14 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, in
new_frame_number = time_mapped_number;
// Now that we have re-mapped what frame number is needed, go and get the frame pointer
std::shared_ptr<Frame> original_frame;
original_frame = GetOrCreateFrame(new_frame_number);
// Copy the image from the odd field
if (enabled_video)
frame->AddImage(std::make_shared<QImage>(*original_frame->GetImage()));
// Loop through each channel, add audio
if (enabled_audio && reader->info.has_audio)
for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
// Get time mapped frame number (used to increase speed, change direction, etc...)
// TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set)
get_time_mapped_frame(frame, new_frame_number);
// Adjust # of samples to match requested (the interaction with time curves will make this tricky)
// TODO: Implement move samples to/from next frame
get_time_mapped_frame(original_frame, new_frame_number);
// Apply effects to the frame (if any)
apply_effects(frame);
apply_effects(original_frame);
// Determine size of image (from Timeline or Reader)
int width = 0;
@@ -422,13 +409,13 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, in
}
// Apply keyframe / transforms
apply_keyframes(frame, width, height);
apply_keyframes(original_frame, background_frame->GetImage());
// Cache frame
cache.Add(frame);
// Cache frame
cache.Add(original_frame);
// Return processed 'frame'
return frame;
return original_frame;
}
else
// Throw error if reader not initialized
@@ -709,7 +696,6 @@ std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
// Create a new copy of reader frame
// This allows a clip to modify the pixels and audio of this frame without
// changing the underlying reader's frame data
//std::shared_ptr<Frame> reader_copy(new Frame(number, 1, 1, "#000000", reader_frame->GetAudioSamplesCount(), reader_frame->GetAudioChannelsCount()));
auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
reader_copy->SampleRate(reader_frame->SampleRate());
reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
@@ -1126,18 +1112,84 @@ bool Clip::isEqual(double a, double b)
return fabs(a - b) < 0.000001;
}
// Apply keyframes to the source frame (if any)
void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
// Skip out if video was disabled or only an audio frame (no visualisation in use)
if (has_video.GetInt(frame->number) == 0 ||
(!Waveform() && !Reader()->info.has_video))
// Skip the rest of the image processing for performance reasons
return;
// Get image from clip
std::shared_ptr<QImage> source_image = frame->GetImage();
// Size of final image
int width = background_canvas->width();
int height = background_canvas->height();
// Get transform from clip's keyframes
QTransform transform = get_transform(frame, width, height);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);
// Load timeline's new frame image into a QPainter
QPainter painter(background_canvas.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
// Apply transform (translate, rotate, scale)
painter.setTransform(transform);
// Composite a new layer onto the image
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
painter.drawImage(0, 0, *source_image);
if (timeline) {
Timeline *t = (Timeline *) timeline;
// Draw frame #'s on top of image (if needed)
if (display != FRAME_DISPLAY_NONE) {
std::stringstream frame_number_str;
switch (display) {
case (FRAME_DISPLAY_NONE):
// This is only here to prevent unused-enum warnings
break;
case (FRAME_DISPLAY_CLIP):
frame_number_str << frame->number;
break;
case (FRAME_DISPLAY_TIMELINE):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
break;
case (FRAME_DISPLAY_BOTH):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
break;
}
// Draw frame number on top of image
painter.setPen(QColor("#ffffff"));
painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
}
}
painter.end();
// Add new QImage to frame
frame->AddImage(background_canvas);
}
// Apply keyframes to the source frame (if any)
void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
{
// Get actual frame image data
std::shared_ptr<QImage> source_image = frame->GetImage();
// Get image from clip
std::shared_ptr<QImage> source_image = frame->GetImage();
/* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
if (Waveform())
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());
// Get the color of the waveform
int red = wave_color.red.GetInt(frame->number);
@@ -1170,7 +1222,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
}
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
}
/* RESIZE SOURCE IMAGE - based on scale type */
@@ -1181,21 +1233,21 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
source_size.scale(width, height, Qt::KeepAspectRatio);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_STRETCH): {
source_size.scale(width, height, Qt::IgnoreAspectRatio);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_CROP): {
source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_NONE): {
@@ -1207,7 +1259,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
}
@@ -1258,7 +1310,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
}
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);
/* LOCATION, ROTATION, AND SCALE */
float r = rotation.GetValue(frame->number); // rotate in degrees
@@ -1272,7 +1324,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
QTransform transform;
// Transform source image (if needed)
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
if (!isEqual(x, 0) || !isEqual(y, 0)) {
// TRANSLATE/MOVE CLIP
@@ -1297,56 +1349,5 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
transform.scale(source_width_scale, source_height_scale);
}
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);
/* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
auto new_image = std::make_shared<QImage>(QSize(width, height), source_image->format());
new_image->fill(QColor(QString::fromStdString("#00000000")));
// Load timeline's new frame image into a QPainter
QPainter painter(new_image.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
// Apply transform (translate, rotate, scale)
painter.setTransform(transform);
// Composite a new layer onto the image
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
painter.drawImage(0, 0, *source_image);
if (timeline) {
Timeline *t = (Timeline *) timeline;
// Draw frame #'s on top of image (if needed)
if (display != FRAME_DISPLAY_NONE) {
std::stringstream frame_number_str;
switch (display) {
case (FRAME_DISPLAY_NONE):
// This is only here to prevent unused-enum warnings
break;
case (FRAME_DISPLAY_CLIP):
frame_number_str << frame->number;
break;
case (FRAME_DISPLAY_TIMELINE):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
break;
case (FRAME_DISPLAY_BOTH):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
break;
}
// Draw frame number on top of image
painter.setPen(QColor("#ffffff"));
painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
}
}
painter.end();
// Add new QImage to frame
frame->AddImage(new_image);
return transform;
}

View File

@@ -139,8 +139,11 @@ namespace openshot {
/// Apply effects to the source frame (if any)
void apply_effects(std::shared_ptr<openshot::Frame> frame);
/// Apply keyframes to the source frame (if any)
void apply_keyframes(std::shared_ptr<openshot::Frame> frame, int width, int height);
/// Apply keyframes to an openshot::Frame and use an existing QImage as a background image (if any)
void apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas);
/// Get QTransform from keyframes
QTransform get_transform(std::shared_ptr<Frame> frame, int width, int height);
/// Get file extension
std::string get_file_extension(std::string path);
@@ -226,9 +229,9 @@ namespace openshot {
/// rendered.
///
/// @returns The modified openshot::Frame object
/// @param frame This is ignored on Clip, due to caching optimizations. This frame instance is clobbered with the source frame.
/// @param background_frame The frame object to use as a background canvas (i.e. an existing Timeline openshot::Frame instance)
/// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
std::shared_ptr<openshot::Frame> GetFrame(std::shared_ptr<openshot::Frame> frame, int64_t frame_number) override;
std::shared_ptr<openshot::Frame> GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number);
/// Open the internal reader
void Open() override;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -754,13 +754,11 @@ void Frame::AddColor(int new_width, int new_height, std::string new_color)
// Create new image object, and fill with pixel data
const GenericScopedLock<juce::CriticalSection> lock(addingImageSection);
#pragma omp critical (AddImage)
{
image = std::make_shared<QImage>(new_width, new_height, QImage::Format_RGBA8888_Premultiplied);
image = std::make_shared<QImage>(new_width, new_height, QImage::Format_RGBA8888_Premultiplied);
// Fill with solid color
image->fill(QColor(QString::fromStdString(color)));
// Fill with solid color
image->fill(QColor(QString::fromStdString(color)));
}
// Update height and width
width = image->width();
height = image->height();
@@ -775,12 +773,7 @@ void Frame::AddImage(
// Create new buffer
{
const GenericScopedLock<juce::CriticalSection> lock(addingImageSection);
int buffer_size = new_width * new_height * bytes_per_pixel;
qbuffer = new unsigned char[buffer_size]();
// Copy buffer data
memcpy((unsigned char*)qbuffer, pixels_, buffer_size);
qbuffer = pixels_;
} // Release addingImageSection lock
// Create new image object from pixel data
@@ -804,19 +797,16 @@ void Frame::AddImage(std::shared_ptr<QImage> new_image)
// assign image data
const GenericScopedLock<juce::CriticalSection> lock(addingImageSection);
#pragma omp critical (AddImage)
{
image = new_image;
image = new_image;
// Always convert to Format_RGBA8888_Premultiplied (if different)
if (image->format() != QImage::Format_RGBA8888_Premultiplied)
*image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied);
// Always convert to Format_RGBA8888_Premultiplied (if different)
if (image->format() != QImage::Format_RGBA8888_Premultiplied)
*image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied);
// Update height and width
width = image->width();
height = image->height();
has_image_data = true;
}
// Update height and width
width = image->width();
height = image->height();
has_image_data = true;
}
// Add (or replace) pixel data to the frame (for only the odd or even lines)
@@ -834,15 +824,12 @@ void Frame::AddImage(std::shared_ptr<QImage> new_image, bool only_odd_lines)
} else {
// Ignore image of different sizes or formats
bool ret=false;
#pragma omp critical (AddImage)
{
if (image == new_image || image->size() != new_image->size()) {
ret = true;
}
else if (new_image->format() != QImage::Format_RGBA8888_Premultiplied) {
new_image = std::make_shared<QImage>(
if (image == new_image || image->size() != new_image->size()) {
ret = true;
}
else if (new_image->format() != QImage::Format_RGBA8888_Premultiplied) {
new_image = std::make_shared<QImage>(
new_image->convertToFormat(QImage::Format_RGBA8888_Premultiplied));
}
}
if (ret) {
return;
@@ -850,26 +837,23 @@ void Frame::AddImage(std::shared_ptr<QImage> new_image, bool only_odd_lines)
// Get the frame's image
const GenericScopedLock<juce::CriticalSection> lock(addingImageSection);
#pragma omp critical (AddImage)
{
unsigned char *pixels = image->bits();
const unsigned char *new_pixels = new_image->constBits();
unsigned char *pixels = image->bits();
const unsigned char *new_pixels = new_image->constBits();
// Loop through the scanlines of the image (even or odd)
int start = 0;
if (only_odd_lines)
start = 1;
// Loop through the scanlines of the image (even or odd)
int start = 0;
if (only_odd_lines)
start = 1;
for (int row = start; row < image->height(); row += 2) {
int offset = row * image->bytesPerLine();
memcpy(pixels + offset, new_pixels + offset, image->bytesPerLine());
}
// Update height and width
height = image->height();
width = image->width();
has_image_data = true;
for (int row = start; row < image->height(); row += 2) {
int offset = row * image->bytesPerLine();
memcpy(pixels + offset, new_pixels + offset, image->bytesPerLine());
}
// Update height and width
height = image->height();
width = image->width();
has_image_data = true;
}
}
@@ -891,31 +875,29 @@ void Frame::ResizeAudio(int channels, int length, int rate, ChannelLayout layout
// Add audio samples to a specific channel
void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float* source, int numSamples, float gainToApplyToSource = 1.0f) {
const GenericScopedLock<juce::CriticalSection> lock(addingAudioSection);
#pragma omp critical (adding_audio)
{
// Clamp starting sample to 0
int destStartSampleAdjusted = max(destStartSample, 0);
// Extend audio container to hold more (or less) samples and channels.. if needed
int new_length = destStartSampleAdjusted + numSamples;
int new_channel_length = audio->getNumChannels();
if (destChannel >= new_channel_length)
new_channel_length = destChannel + 1;
if (new_length > audio->getNumSamples() || new_channel_length > audio->getNumChannels())
audio->setSize(new_channel_length, new_length, true, true, false);
// Clamp starting sample to 0
int destStartSampleAdjusted = max(destStartSample, 0);
// Clear the range of samples first (if needed)
if (replaceSamples)
audio->clear(destChannel, destStartSampleAdjusted, numSamples);
// Extend audio container to hold more (or less) samples and channels.. if needed
int new_length = destStartSampleAdjusted + numSamples;
int new_channel_length = audio->getNumChannels();
if (destChannel >= new_channel_length)
new_channel_length = destChannel + 1;
if (new_length > audio->getNumSamples() || new_channel_length > audio->getNumChannels())
audio->setSize(new_channel_length, new_length, true, true, false);
// Add samples to frame's audio buffer
audio->addFrom(destChannel, destStartSampleAdjusted, source, numSamples, gainToApplyToSource);
has_audio_data = true;
// Clear the range of samples first (if needed)
if (replaceSamples)
audio->clear(destChannel, destStartSampleAdjusted, numSamples);
// Calculate max audio sample added
if (new_length > max_audio_sample)
max_audio_sample = new_length;
}
// Add samples to frame's audio buffer
audio->addFrom(destChannel, destStartSampleAdjusted, source, numSamples, gainToApplyToSource);
has_audio_data = true;
// Calculate max audio sample added
if (new_length > max_audio_sample)
max_audio_sample = new_length;
}
// Apply gain ramp (i.e. fading volume)
@@ -958,7 +940,7 @@ cv::Mat Frame::GetImageCV()
if (!image)
// Fill with black
AddColor(width, height, color);
// if (imagecv.empty())
// Convert Qimage to Mat
imagecv = Qimage2mat(image);

View File

@@ -97,6 +97,7 @@ namespace {
case CONSTANT: return left.co.Y;
case LINEAR: return InterpolateLinearCurve(left, right, target);
case BEZIER: return InterpolateBezierCurve(left, right, target, allowed_error);
default: return InterpolateLinearCurve(left, right, target);
}
}

View File

@@ -45,7 +45,6 @@ Settings *Settings::Instance()
m_pInstance = new Settings;
m_pInstance->HARDWARE_DECODER = 0;
m_pInstance->HIGH_QUALITY_SCALING = false;
m_pInstance->WAIT_FOR_VIDEO_PROCESSING_TASK = false;
m_pInstance->OMP_THREADS = 12;
m_pInstance->FF_THREADS = 8;
m_pInstance->DE_LIMIT_HEIGHT_MAX = 1100;

View File

@@ -94,15 +94,6 @@ namespace openshot {
/// Scale mode used in FFmpeg decoding and encoding (used as an optimization for faster previews)
bool HIGH_QUALITY_SCALING = false;
/// Maximum width for image data (useful for optimzing for a smaller preview or render)
int MAX_WIDTH = 0;
/// Maximum height for image data (useful for optimzing for a smaller preview or render)
int MAX_HEIGHT = 0;
/// Wait for OpenMP task to finish before continuing (used to limit threads on slower systems)
bool WAIT_FOR_VIDEO_PROCESSING_TASK = false;
/// Number of threads of OpenMP
int OMP_THREADS = 12;

View File

@@ -74,12 +74,6 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha
// Init cache
final_cache = new CacheMemory();
// Configure OpenMP parallelism
// Default number of threads per block
omp_set_num_threads(max_concurrent_frames);
// Allow nested parallel sections as deeply as supported
omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE);
// Init max image size
SetMaxSize(info.width, info.height);
}
@@ -207,12 +201,6 @@ Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths)
// Init cache
final_cache = new CacheMemory();
// Configure OpenMP parallelism
// Default number of threads per section
omp_set_num_threads(max_concurrent_frames);
// Allow nested parallel sections as deeply as supported
omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE);
// Init max image size
SetMaxSize(info.width, info.height);
}
@@ -442,7 +430,7 @@ std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, int
}
// Get or generate a blank frame
std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
std::shared_ptr<Frame> Timeline::GetOrCreateFrame(std::shared_ptr<Frame> background_frame, Clip* clip, int64_t number)
{
std::shared_ptr<Frame> new_frame;
@@ -454,8 +442,7 @@ std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame);
// Attempt to get a frame (but this could fail if a reader has just been closed)
#pragma omp critical (T_GetOtCreateFrame)
new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
new_frame = std::shared_ptr<Frame>(clip->GetFrame(background_frame, number));
// Return real frame
return new_frame;
@@ -470,23 +457,15 @@ std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame);
// Create blank frame
new_frame = std::make_shared<Frame>(number, preview_width, preview_height, "#000000", samples_in_frame, info.channels);
#pragma omp critical (T_GetOtCreateFrame)
{
new_frame->AddAudioSilence(samples_in_frame);
new_frame->SampleRate(info.sample_rate);
new_frame->ChannelsLayout(info.channel_layout);
}
return new_frame;
}
// Process a new layer of video or audio
void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume)
{
// Get the clip's frame & image
// Get the clip's frame, composited on top of the current timeline frame
std::shared_ptr<Frame> source_frame;
#pragma omp critical (T_addLayer)
source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number);
// No frame found... so bail
if (!source_frame)
@@ -498,13 +477,9 @@ void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, in
/* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
* effects on the top clip. */
if (is_top_clip) {
#pragma omp critical (T_addLayer)
source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
}
// Declare an image to hold the source frame's image
std::shared_ptr<QImage> source_image;
/* COPY AUDIO - with correct volume */
if (source_clip->Reader()->info.has_audio) {
// Debug output
@@ -553,51 +528,17 @@ void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, in
// This is a crude solution at best. =)
if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
// Force timeline frame to match the source frame
#pragma omp critical (T_addLayer)
new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
}
// Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
// be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
#pragma omp critical (T_addLayer)
new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
}
else
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
}
// Skip out if video was disabled or only an audio frame (no visualisation in use)
if (source_clip->has_video.GetInt(clip_frame_number) == 0 ||
(!source_clip->Waveform() && !source_clip->Reader()->info.has_video))
// Skip the rest of the image processing for performance reasons
return;
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number);
// Get actual frame image data
source_image = source_frame->GetImage();
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "source_image->width()", source_image->width());
/* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
std::shared_ptr<QImage> new_image;
new_image = new_frame->GetImage();
// Load timeline's new frame image into a QPainter
QPainter painter(new_image.get());
// Composite a new layer onto the image
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
painter.drawImage(0, 0, *source_image, 0, 0, source_image->width(), source_image->height());
painter.end();
// Add new QImage to frame
new_frame->AddImage(new_image);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width());
}
@@ -696,7 +637,6 @@ std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
// Check cache
std::shared_ptr<Frame> frame;
std::lock_guard<std::mutex> guard(get_frame_mutex);
#pragma omp critical (T_GetFrame)
frame = final_cache->GetFrame(requested_frame);
if (frame) {
// Debug output
@@ -715,7 +655,6 @@ std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.");
// Check cache again (due to locking)
#pragma omp critical (T_GetFrame)
frame = final_cache->GetFrame(requested_frame);
if (frame) {
// Debug output
@@ -725,146 +664,100 @@ std::shared_ptr<Frame> Timeline::GetFrame(int64_t requested_frame)
return frame;
}
// Minimum number of frames to process (for performance reasons)
// Too many causes stuttering, too few causes stuttering
int minimum_frames = std::min(max_concurrent_frames / 2, 4);
// Get a list of clips that intersect with the requested section of timeline
// This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
std::vector<Clip*> nearby_clips;
#pragma omp critical (T_GetFrame)
nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
nearby_clips = find_intersecting_clips(requested_frame, 1, true);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "max_concurrent_frames", max_concurrent_frames);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num());
// GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
// Determine all clip frames, and request them in order (to keep resampled audio in sequence)
for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
{
// Loop through clips
for (auto clip : nearby_clips)
{
long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
// Init some basic properties about this frame
int samples_in_frame = Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels);
bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
if (does_clip_intersect)
{
// Get clip frame #
long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
// Create blank frame (which will become the requested frame)
std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(requested_frame, preview_width, preview_height, "#000000", samples_in_frame, info.channels));
new_frame->AddAudioSilence(samples_in_frame);
new_frame->SampleRate(info.sample_rate);
new_frame->ChannelsLayout(info.channel_layout);
// Cache clip object
clip->GetFrame(clip_frame_number);
}
}
}
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "requested_frame", requested_frame, "info.width", info.width, "info.height", info.height);
#pragma omp parallel
{
// Loop through all requested frames
#pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1)
for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num());
// Add Background Color to 1st layer (if animated or not black)
if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
(color.red.GetValue(requested_frame) != 0.0 || color.green.GetValue(requested_frame) != 0.0 || color.blue.GetValue(requested_frame) != 0.0))
new_frame->AddColor(preview_width, preview_height, color.GetColorHex(requested_frame));
// Init some basic properties about this frame
int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "requested_frame", requested_frame, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size());
// Create blank frame (which will become the requested frame)
std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, preview_width, preview_height, "#000000", samples_in_frame, info.channels));
#pragma omp critical (T_GetFrame)
{
new_frame->AddAudioSilence(samples_in_frame);
new_frame->SampleRate(info.sample_rate);
new_frame->ChannelsLayout(info.channel_layout);
}
// Find Clips near this time
for (auto clip : nearby_clips)
{
long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
bool does_clip_intersect = (clip_start_position <= requested_frame && clip_end_position >= requested_frame);
// Add Background Color to 1st layer (if animated or not black)
if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
(color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
new_frame->AddColor(preview_width, preview_height, color.GetColorHex(frame_number));
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "requested_frame", requested_frame, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size());
// Clip is visible
if (does_clip_intersect)
{
// Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
bool is_top_clip = true;
float max_volume = 0.0;
for (auto nearby_clip : nearby_clips)
{
long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
long nearby_clip_frame_number = requested_frame - nearby_clip_start_position + nearby_clip_start_frame;
// Find Clips near this time
for (auto clip : nearby_clips)
{
long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
// Determine if top clip
if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame &&
nearby_clip_start_position > clip_start_position && is_top_clip == true) {
is_top_clip = false;
}
bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
// Determine max volume of overlapping clips
if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame) {
max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
}
}
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect);
// Determine the frame needed for this clip (based on the position on the timeline)
long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
long clip_frame_number = requested_frame - clip_start_position + clip_start_frame;
// Clip is visible
if (does_clip_intersect)
{
// Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
bool is_top_clip = true;
float max_volume = 0.0;
for (auto nearby_clip : nearby_clips)
{
long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
// Determine if top clip
if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
nearby_clip_start_position > clip_start_position && is_top_clip == true) {
is_top_clip = false;
}
// Add clip's frame as layer
add_layer(new_frame, clip, clip_frame_number, requested_frame, is_top_clip, max_volume);
// Determine max volume of overlapping clips
if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
}
}
} else {
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)",
"requested_frame", requested_frame, "does_clip_intersect",
does_clip_intersect);
}
// Determine the frame needed for this clip (based on the position on the timeline)
long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
} // end clip loop
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "requested_frame", requested_frame, "info.width", info.width, "info.height", info.height);
// Add clip's frame as layer
add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
// Set frame # on mapped frame
new_frame->SetFrameNumber(requested_frame);
} else
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect);
} // end clip loop
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
// Set frame # on mapped frame
#pragma omp ordered
{
new_frame->SetFrameNumber(frame_number);
// Add final frame to cache
final_cache->Add(new_frame);
}
} // end frame loop
} // end parallel
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num());
// Add final frame to cache
final_cache->Add(new_frame);
// Return frame (or blank frame)
return final_cache->GetFrame(requested_frame);
@@ -900,7 +793,6 @@ std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame, in
ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect);
// Open (or schedule for closing) this clip, based on if it's intersecting or not
#pragma omp critical (reader_lock)
update_open_clips(clip, does_clip_intersect);
// Clip is visible

View File

@@ -202,8 +202,8 @@ namespace openshot {
/// @param include Include or Exclude intersecting clips
std::vector<openshot::Clip*> find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include);
/// Get or generate a blank frame
std::shared_ptr<openshot::Frame> GetOrCreateFrame(openshot::Clip* clip, int64_t number);
/// Get a clip's frame or generate a blank frame
std::shared_ptr<openshot::Frame> GetOrCreateFrame(std::shared_ptr<Frame> background_frame, openshot::Clip* clip, int64_t number);
/// Apply effects to the source frame (if any)
std::shared_ptr<openshot::Frame> apply_effects(std::shared_ptr<openshot::Frame> frame, int64_t timeline_frame_number, int layer);

View File

@@ -126,16 +126,16 @@ void ZmqLogger::Log(std::string message)
// Create a scoped lock, allowing only a single thread to run the following code at one time
const juce::GenericScopedLock<juce::CriticalSection> lock(loggerCriticalSection);
// Send message over socket (ZeroMQ)
zmq::message_t reply (message.length());
std::memcpy (reply.data(), message.c_str(), message.length());
#if ZMQ_VERSION > ZMQ_MAKE_VERSION(4, 3, 1)
// Set flags for immediate delivery (new API)
publisher->send(reply, zmq::send_flags::dontwait);
#else
publisher->send(reply);
#endif
// // Send message over socket (ZeroMQ)
// zmq::message_t reply (message.length());
// std::memcpy (reply.data(), message.c_str(), message.length());
//
//#if ZMQ_VERSION > ZMQ_MAKE_VERSION(4, 3, 1)
// // Set flags for immediate delivery (new API)
// publisher->send(reply, zmq::send_flags::dontwait);
//#else
// publisher->send(reply);
//#endif
// Also log to file, if open
LogToFile(message);

View File

@@ -258,7 +258,7 @@ TEST(Verify_Parent_Timeline)
// Check size of frame image (with an associated timeline)
CHECK_EQUAL(c1.GetFrame(1)->GetImage()->width(), 640);
CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 480);
CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 360);
}
} // SUITE

View File

@@ -43,7 +43,6 @@ TEST(Settings_Default_Constructor)
CHECK_EQUAL(12, s->OMP_THREADS);
CHECK_EQUAL(false, s->HIGH_QUALITY_SCALING);
CHECK_EQUAL(false, s->WAIT_FOR_VIDEO_PROCESSING_TASK);
}
TEST(Settings_Change_Settings)
@@ -52,13 +51,10 @@ TEST(Settings_Change_Settings)
Settings *s = Settings::Instance();
s->OMP_THREADS = 8;
s->HIGH_QUALITY_SCALING = true;
s->WAIT_FOR_VIDEO_PROCESSING_TASK = true;
CHECK_EQUAL(8, s->OMP_THREADS);
CHECK_EQUAL(true, s->HIGH_QUALITY_SCALING);
CHECK_EQUAL(true, s->WAIT_FOR_VIDEO_PROCESSING_TASK);
CHECK_EQUAL(8, Settings::Instance()->OMP_THREADS);
CHECK_EQUAL(true, Settings::Instance()->HIGH_QUALITY_SCALING);
CHECK_EQUAL(true, Settings::Instance()->WAIT_FOR_VIDEO_PROCESSING_TASK);
}