From 5700b0ab7dcbf9dcb89d47174dd1fa427448df54 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 26 Aug 2020 13:12:42 -0500 Subject: [PATCH 01/14] - Refactoring all Timeline drawing code into the Clip class - Making Clip a proper Reader (so it can be used directly, instead of a Timeline) --- include/Clip.h | 21 +++- src/Clip.cpp | 299 ++++++++++++++++++++++++++++++++++++++++++++++- src/Timeline.cpp | 261 ++--------------------------------------- 3 files changed, 323 insertions(+), 258 deletions(-) diff --git a/include/Clip.h b/include/Clip.h index 0fbed159..78ffa4ab 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -92,7 +92,7 @@ namespace openshot { * c2.alpha.AddPoint(384, 1.0); // Animate the alpha to visible (between frame #360 and frame #384) * @endcode */ - class Clip : public openshot::ClipBase { + class Clip : public openshot::ClipBase, public openshot::ReaderBase { protected: /// Section lock for multiple threads juce::CriticalSection getFrameCriticalSection; @@ -100,6 +100,7 @@ namespace openshot { private: bool waveform; ///< Should a waveform be used instead of the clip's image std::list effects; /// Is Reader opened // Audio resampler (if time mapping) openshot::AudioResampler *resampler; @@ -117,6 +118,9 @@ namespace openshot { /// Apply effects to the source frame (if any) std::shared_ptr apply_effects(std::shared_ptr frame); + /// Apply keyframes to the source frame (if any) + std::shared_ptr apply_keyframes(std::shared_ptr frame); + /// Get file extension std::string get_file_extension(std::string path); @@ -132,6 +136,9 @@ namespace openshot { /// Update default rotation from reader void init_reader_rotation(); + /// Compare 2 floating point numbers + bool isEqual(double a, double b); + /// Sort effects by order void sort_effects(); @@ -159,6 +166,18 @@ namespace openshot { /// Destructor virtual ~Clip(); + + /// Get the cache object used by this reader (always returns NULL for this object) + CacheMemory* GetCache() override { return NULL; }; + + /// Determine if reader is open or closed + bool IsOpen() override { return is_open; }; + + /// Return the type name of the class + std::string Name() override { return "Clip"; }; + + + /// @brief Add an effect to the clip /// @param effect Add an effect to the clip. An effect can modify the audio or video of an openshot::Frame. void AddEffect(openshot::EffectBase* effect); diff --git a/src/Clip.cpp b/src/Clip.cpp index d9f69440..c3460ff8 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -134,14 +134,14 @@ void Clip::init_reader_rotation() { } // Default Constructor for a clip -Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL) +Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false) { // Init all default settings init_settings(); } // Constructor with reader -Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL) +Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false) { // Init all default settings init_settings(); @@ -158,7 +158,7 @@ Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), alloca } // Constructor with filepath -Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL) +Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false) { // Init all default settings init_settings(); @@ -262,6 +262,10 @@ void Clip::Open() { // Open the reader reader->Open(); + is_open = true; + + // Copy Reader info to Clip + info = reader->info; // Set some clip properties from the file reader if (end == 0.0) @@ -275,6 +279,7 @@ void Clip::Open() // Close the internal reader void Clip::Close() { + is_open = false; if (reader) { ZmqLogger::Instance()->AppendDebugMethod("Clip::Close"); @@ -311,6 +316,10 @@ float Clip::End() const // Get an openshot::Frame object for a specific frame number of this reader. std::shared_ptr Clip::GetFrame(int64_t requested_frame) { + // Check for open reader (or throw exception) + if (!is_open) + throw ReaderClosed("The Clip is closed. Call Open() before calling this method", "N/A"); + if (reader) { // Adjust out of bounds frame number @@ -360,6 +369,9 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame) // Apply effects to the frame (if any) apply_effects(frame); + // Apply keyframe / transforms + apply_keyframes(frame); + // Return processed 'frame' return frame; } @@ -1042,3 +1054,284 @@ std::shared_ptr Clip::apply_effects(std::shared_ptr frame) // Return modified frame return frame; } + +// Compare 2 floating point numbers for equality +bool Clip::isEqual(double a, double b) +{ + return fabs(a - b) < 0.000001; +} + + +// Apply keyframes to the source frame (if any) +std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) +{ + // Get actual frame image data + std::shared_ptr source_image = frame->GetImage(); + + /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */ + if (Waveform()) + { + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform()); + + // Get the color of the waveform + int red = wave_color.red.GetInt(frame->number); + int green = wave_color.green.GetInt(frame->number); + int blue = wave_color.blue.GetInt(frame->number); + int alpha = wave_color.alpha.GetInt(frame->number); + + // Generate Waveform Dynamically (the size of the timeline) + source_image = frame->GetWaveform(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, red, green, blue, alpha); + frame->AddImage(std::shared_ptr(source_image)); + } + + /* ALPHA & OPACITY */ + if (alpha.GetValue(frame->number) != 1.0) + { + float alpha_value = alpha.GetValue(frame->number); + + // Get source image's pixels + unsigned char *pixels = (unsigned char *) source_image->bits(); + + // Loop through pixels + for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4) + { + // Apply alpha to pixel + pixels[byte_index + 3] *= alpha_value; + } + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number); + } + + /* RESIZE SOURCE IMAGE - based on scale type */ + QSize source_size = source_image->size(); + switch (scale) + { + case (SCALE_FIT): { + // keep aspect ratio + source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::KeepAspectRatio); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + break; + } + case (SCALE_STRETCH): { + // ignore aspect ratio + source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::IgnoreAspectRatio); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + break; + } + case (SCALE_CROP): { + QSize width_size(Settings::Instance()->MAX_WIDTH, round(Settings::Instance()->MAX_WIDTH / (float(source_size.width()) / float(source_size.height())))); + QSize height_size(round(Settings::Instance()->MAX_HEIGHT / (float(source_size.height()) / float(source_size.width()))), Settings::Instance()->MAX_HEIGHT); + + // respect aspect ratio + if (width_size.width() >= Settings::Instance()->MAX_WIDTH && width_size.height() >= Settings::Instance()->MAX_HEIGHT) + source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio); + else + source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + break; + } + case (SCALE_NONE): { + // Calculate ratio of source size to project size + // Even with no scaling, previews need to be adjusted correctly + // (otherwise NONE scaling draws the frame image outside of the preview) + float source_width_ratio = source_size.width() / float(Settings::Instance()->MAX_WIDTH); + float source_height_ratio = source_size.height() / float(Settings::Instance()->MAX_HEIGHT); + source_size.scale(Settings::Instance()->MAX_WIDTH * source_width_ratio, Settings::Instance()->MAX_HEIGHT * source_height_ratio, Qt::KeepAspectRatio); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + break; + } + } + + float crop_x_value = crop_x.GetValue(frame->number); + float crop_y_value = crop_y.GetValue(frame->number); + float crop_w_value = crop_width.GetValue(frame->number); + float crop_h_value = crop_height.GetValue(frame->number); + switch(crop_gravity) + { + case (GRAVITY_TOP_LEFT): + // This is only here to prevent unused-enum warnings + break; + case (GRAVITY_TOP): + crop_x_value += 0.5; + break; + case (GRAVITY_TOP_RIGHT): + crop_x_value += 1.0; + break; + case (GRAVITY_LEFT): + crop_y_value += 0.5; + break; + case (GRAVITY_CENTER): + crop_x_value += 0.5; + crop_y_value += 0.5; + break; + case (GRAVITY_RIGHT): + crop_x_value += 1.0; + crop_y_value += 0.5; + break; + case (GRAVITY_BOTTOM_LEFT): + crop_y_value += 1.0; + break; + case (GRAVITY_BOTTOM): + crop_x_value += 0.5; + crop_y_value += 1.0; + break; + case (GRAVITY_BOTTOM_RIGHT): + crop_x_value += 1.0; + crop_y_value += 1.0; + break; + } + + /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */ + float x = 0.0; // left + float y = 0.0; // top + + // Adjust size for scale x and scale y + float sx = scale_x.GetValue(frame->number); // percentage X scale + float sy = scale_y.GetValue(frame->number); // percentage Y scale + float scaled_source_width = source_size.width() * sx; + float scaled_source_height = source_size.height() * sy; + + switch (gravity) + { + case (GRAVITY_TOP_LEFT): + // This is only here to prevent unused-enum warnings + break; + case (GRAVITY_TOP): + x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center + break; + case (GRAVITY_TOP_RIGHT): + x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right + break; + case (GRAVITY_LEFT): + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center + break; + case (GRAVITY_CENTER): + x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center + break; + case (GRAVITY_RIGHT): + x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center + break; + case (GRAVITY_BOTTOM_LEFT): + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom + break; + case (GRAVITY_BOTTOM): + x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom + break; + case (GRAVITY_BOTTOM_RIGHT): + x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right + y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom + break; + } + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height); + + /* LOCATION, ROTATION, AND SCALE */ + float r = rotation.GetValue(frame->number); // rotate in degrees + x += (Settings::Instance()->MAX_WIDTH * location_x.GetValue(frame->number)); // move in percentage of final width + y += (Settings::Instance()->MAX_HEIGHT * location_y.GetValue(frame->number)); // move in percentage of final height + float shear_x_value = shear_x.GetValue(frame->number); + float shear_y_value = shear_y.GetValue(frame->number); + float origin_x_value = origin_x.GetValue(frame->number); + float origin_y_value = origin_y.GetValue(frame->number); + + bool transformed = false; + QTransform transform; + + // Transform source image (if needed) + ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); + + if (!isEqual(x, 0) || !isEqual(y, 0)) { + // TRANSLATE/MOVE CLIP + transform.translate(x, y); + transformed = true; + } + + if (!isEqual(r, 0) || !isEqual(shear_x_value, 0) || !isEqual(shear_y_value, 0)) { + // ROTATE CLIP (around origin_x, origin_y) + float origin_x_offset = (scaled_source_width * origin_x_value); + float origin_y_offset = (scaled_source_height * origin_y_value); + transform.translate(origin_x_offset, origin_y_offset); + transform.rotate(r); + transform.shear(shear_x_value, shear_y_value); + transform.translate(-origin_x_offset,-origin_y_offset); + transformed = true; + } + + // SCALE CLIP (if needed) + float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx; + float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy; + + if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) { + transform.scale(source_width_scale, source_height_scale); + transformed = true; + } + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number, "transformed", transformed); + + /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ + std::shared_ptr new_image; + new_image = std::shared_ptr(new QImage(*source_image)); + new_image->fill(QColor(QString::fromStdString("#00000000"))); + + // Load timeline's new frame image into a QPainter + QPainter painter(new_image.get()); + painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true); + + // Apply transform (translate, rotate, scale)... if any + if (transformed) + painter.setTransform(transform); + + // Composite a new layer onto the image + painter.setCompositionMode(QPainter::CompositionMode_SourceOver); + painter.drawImage(0, 0, *source_image, crop_x_value * source_image->width(), crop_y_value * source_image->height(), crop_w_value * source_image->width(), crop_h_value * source_image->height()); + + // Draw frame #'s on top of image (if needed) + if (display != FRAME_DISPLAY_NONE) { + std::stringstream frame_number_str; + switch (display) + { + case (FRAME_DISPLAY_NONE): + // This is only here to prevent unused-enum warnings + break; + + case (FRAME_DISPLAY_CLIP): + frame_number_str << frame->number; + break; + + case (FRAME_DISPLAY_TIMELINE): + frame_number_str << "N/A"; + break; + + case (FRAME_DISPLAY_BOTH): + frame_number_str << "N/A" << " (" << frame->number << ")"; + break; + } + + // Draw frame number on top of image + painter.setPen(QColor("#ffffff")); + painter.drawText(20, 20, QString(frame_number_str.str().c_str())); + } + + painter.end(); + + // Add new QImage to frame + frame->AddImage(new_image); + + // Return modified frame + return frame; +} diff --git a/src/Timeline.cpp b/src/Timeline.cpp index 124058ac..5fad97fe 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -409,25 +409,6 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number); - /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */ - if (source_clip->Waveform()) - { - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number); - - // Get the color of the waveform - int red = source_clip->wave_color.red.GetInt(clip_frame_number); - int green = source_clip->wave_color.green.GetInt(clip_frame_number); - int blue = source_clip->wave_color.blue.GetInt(clip_frame_number); - int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number); - - // Generate Waveform Dynamically (the size of the timeline) - std::shared_ptr source_image; - #pragma omp critical (T_addLayer) - source_image = source_frame->GetWaveform(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, red, green, blue, alpha); - source_frame->AddImage(std::shared_ptr(source_image)); - } - /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the * effects on the top clip. */ if (is_top_clip && source_frame) { @@ -498,7 +479,6 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in else // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number); - } // Skip out if video was disabled or only an audio frame (no visualisation in use) @@ -513,253 +493,26 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // Get actual frame image data source_image = source_frame->GetImage(); - /* ALPHA & OPACITY */ - if (source_clip->alpha.GetValue(clip_frame_number) != 1.0) - { - float alpha = source_clip->alpha.GetValue(clip_frame_number); - - // Get source image's pixels - unsigned char *pixels = (unsigned char *) source_image->bits(); - - // Loop through pixels - for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4) - { - // Apply alpha to pixel - pixels[byte_index + 3] *= alpha; - } - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number); - } - - /* RESIZE SOURCE IMAGE - based on scale type */ - QSize source_size = source_image->size(); - switch (source_clip->scale) - { - case (SCALE_FIT): { - // keep aspect ratio - source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::KeepAspectRatio); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height()); - break; - } - case (SCALE_STRETCH): { - // ignore aspect ratio - source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::IgnoreAspectRatio); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height()); - break; - } - case (SCALE_CROP): { - QSize width_size(Settings::Instance()->MAX_WIDTH, round(Settings::Instance()->MAX_WIDTH / (float(source_size.width()) / float(source_size.height())))); - QSize height_size(round(Settings::Instance()->MAX_HEIGHT / (float(source_size.height()) / float(source_size.width()))), Settings::Instance()->MAX_HEIGHT); - - // respect aspect ratio - if (width_size.width() >= Settings::Instance()->MAX_WIDTH && width_size.height() >= Settings::Instance()->MAX_HEIGHT) - source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio); - else - source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height()); - break; - } - case (SCALE_NONE): { - // Calculate ratio of source size to project size - // Even with no scaling, previews need to be adjusted correctly - // (otherwise NONE scaling draws the frame image outside of the preview) - float source_width_ratio = source_size.width() / float(info.width); - float source_height_ratio = source_size.height() / float(info.height); - source_size.scale(Settings::Instance()->MAX_WIDTH * source_width_ratio, Settings::Instance()->MAX_HEIGHT * source_height_ratio, Qt::KeepAspectRatio); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_NONE)", "source_frame->number", source_frame->number, "source_width", source_size.width(), "source_height", source_size.height()); - break; - } - } - - float crop_x = source_clip->crop_x.GetValue(clip_frame_number); - float crop_y = source_clip->crop_y.GetValue(clip_frame_number); - float crop_w = source_clip->crop_width.GetValue(clip_frame_number); - float crop_h = source_clip->crop_height.GetValue(clip_frame_number); - switch(source_clip->crop_gravity) - { - case (GRAVITY_TOP_LEFT): - // This is only here to prevent unused-enum warnings - break; - case (GRAVITY_TOP): - crop_x += 0.5; - break; - case (GRAVITY_TOP_RIGHT): - crop_x += 1.0; - break; - case (GRAVITY_LEFT): - crop_y += 0.5; - break; - case (GRAVITY_CENTER): - crop_x += 0.5; - crop_y += 0.5; - break; - case (GRAVITY_RIGHT): - crop_x += 1.0; - crop_y += 0.5; - break; - case (GRAVITY_BOTTOM_LEFT): - crop_y += 1.0; - break; - case (GRAVITY_BOTTOM): - crop_x += 0.5; - crop_y += 1.0; - break; - case (GRAVITY_BOTTOM_RIGHT): - crop_x += 1.0; - crop_y += 1.0; - break; - } - - - /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */ - float x = 0.0; // left - float y = 0.0; // top - - // Adjust size for scale x and scale y - float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale - float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale - float scaled_source_width = source_size.width() * sx; - float scaled_source_height = source_size.height() * sy; - - switch (source_clip->gravity) - { - case (GRAVITY_TOP_LEFT): - // This is only here to prevent unused-enum warnings - break; - case (GRAVITY_TOP): - x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center - break; - case (GRAVITY_TOP_RIGHT): - x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right - break; - case (GRAVITY_LEFT): - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center - break; - case (GRAVITY_CENTER): - x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center - break; - case (GRAVITY_RIGHT): - x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center - break; - case (GRAVITY_BOTTOM_LEFT): - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom - break; - case (GRAVITY_BOTTOM): - x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom - break; - case (GRAVITY_BOTTOM_RIGHT): - x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom - break; - } - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "scaled_source_width", scaled_source_width, "info.height", info.height, "scaled_source_height", scaled_source_height); - - /* LOCATION, ROTATION, AND SCALE */ - float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees - x += (Settings::Instance()->MAX_WIDTH * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width - y += (Settings::Instance()->MAX_HEIGHT * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height - float shear_x = source_clip->shear_x.GetValue(clip_frame_number); - float shear_y = source_clip->shear_y.GetValue(clip_frame_number); - float origin_x = source_clip->origin_x.GetValue(clip_frame_number); - float origin_y = source_clip->origin_y.GetValue(clip_frame_number); - - bool transformed = false; - QTransform transform; - - // Transform source image (if needed) - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); - - if (!isEqual(x, 0) || !isEqual(y, 0)) { - // TRANSLATE/MOVE CLIP - transform.translate(x, y); - transformed = true; - } - - if (!isEqual(r, 0) || !isEqual(shear_x, 0) || !isEqual(shear_y, 0)) { - // ROTATE CLIP (around origin_x, origin_y) - float origin_x_value = (scaled_source_width * origin_x); - float origin_y_value = (scaled_source_height * origin_y); - transform.translate(origin_x_value, origin_y_value); - transform.rotate(r); - transform.shear(shear_x, shear_y); - transform.translate(-origin_x_value,-origin_y_value); - transformed = true; - } - - // SCALE CLIP (if needed) - float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx; - float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy; - - if (!isEqual(source_width_scale, 1.0) || !isEqual(source_height_scale, 1.0)) { - transform.scale(source_width_scale, source_height_scale); - transformed = true; - } - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed); + ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "source_image->width()", source_image->width()); /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ std::shared_ptr new_image; - #pragma omp critical (T_addLayer) - new_image = new_frame->GetImage(); + new_image = new_frame->GetImage(); // Load timeline's new frame image into a QPainter QPainter painter(new_image.get()); - painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true); - - // Apply transform (translate, rotate, scale)... if any - if (transformed) - painter.setTransform(transform); // Composite a new layer onto the image painter.setCompositionMode(QPainter::CompositionMode_SourceOver); - painter.drawImage(0, 0, *source_image, crop_x * source_image->width(), crop_y * source_image->height(), crop_w * source_image->width(), crop_h * source_image->height()); - - // Draw frame #'s on top of image (if needed) - if (source_clip->display != FRAME_DISPLAY_NONE) { - std::stringstream frame_number_str; - switch (source_clip->display) - { - case (FRAME_DISPLAY_NONE): - // This is only here to prevent unused-enum warnings - break; - - case (FRAME_DISPLAY_CLIP): - frame_number_str << clip_frame_number; - break; - - case (FRAME_DISPLAY_TIMELINE): - frame_number_str << timeline_frame_number; - break; - - case (FRAME_DISPLAY_BOTH): - frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")"; - break; - } - - // Draw frame number on top of image - painter.setPen(QColor("#ffffff")); - painter.drawText(20, 20, QString(frame_number_str.str().c_str())); - } - + painter.drawImage(0, 0, *source_image, 0, 0, source_image->width(), source_image->height()); painter.end(); + // Add new QImage to frame + new_frame->AddImage(new_image); + // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed); + ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width()); } // Update the list of 'opened' clips From 5a9d47a03d71fec8dba37ddb99bde3dbdd0a38e4 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 26 Aug 2020 17:05:50 -0500 Subject: [PATCH 02/14] Refactored the Settings::Instance()->MAX_WIDTH and Settings::Instance()->MAX_HEIGHT out of the Cilp class. GetFrame() now has an overload which specifies the width, height, and samples needed. Otherwise, it returns the Clip image based on the source reader (width, height, num samples). --- include/Clip.h | 16 +++++++- src/Clip.cpp | 96 +++++++++++++++++++++++++++++++----------------- src/Timeline.cpp | 6 ++- 3 files changed, 81 insertions(+), 37 deletions(-) diff --git a/include/Clip.h b/include/Clip.h index 78ffa4ab..71ab83c3 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -119,7 +119,7 @@ namespace openshot { std::shared_ptr apply_effects(std::shared_ptr frame); /// Apply keyframes to the source frame (if any) - std::shared_ptr apply_keyframes(std::shared_ptr frame); + std::shared_ptr apply_keyframes(std::shared_ptr frame, int width, int height); /// Get file extension std::string get_file_extension(std::string path); @@ -188,12 +188,24 @@ namespace openshot { /// Return the list of effects on the timeline std::list Effects() { return effects; }; - /// @brief Get an openshot::Frame object for a specific frame number of this timeline. + /// @brief Get an openshot::Frame object for a specific frame number of this timeline. The image size and number + /// of samples match the source reader. /// /// @returns The requested frame (containing the image) /// @param requested_frame The frame number that is requested std::shared_ptr GetFrame(int64_t requested_frame); + /// @brief Get an openshot::Frame object for a specific frame number of this timeline. The image size and number + /// of samples can be customized to match the Timeline, or any custom output. Extra samples will be moved to the + /// next Frame. Missing samples will be moved from the next Frame. + /// + /// @returns The requested frame (containing the image) + /// @param requested_frame The frame number that is requested + /// @param width The width of the image requested + /// @param height The height of the image requested + /// @param samples The number of samples requested + std::shared_ptr GetFrame(int64_t requested_frame, int width, int height, int samples); + /// Open the internal reader void Open(); diff --git a/src/Clip.cpp b/src/Clip.cpp index c3460ff8..0c664ebe 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -325,6 +325,33 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame) // Adjust out of bounds frame number requested_frame = adjust_frame_number_minimum(requested_frame); + // Is a time map detected + int64_t new_frame_number = requested_frame; + int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame)); + if (time.GetLength() > 1) + new_frame_number = time_mapped_number; + + // Get the # of audio samples from the time mapped Frame instance + std::shared_ptr time_mapped_original_frame = GetOrCreateFrame(new_frame_number); + return GetFrame(requested_frame, reader->info.width, reader->info.height, time_mapped_original_frame->GetAudioSamplesCount()); + } + else + // Throw error if reader not initialized + throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method."); +} + +// Get an openshot::Frame object for a specific frame number of this reader. +std::shared_ptr Clip::GetFrame(int64_t requested_frame, int width, int height, int samples) +{ + // Check for open reader (or throw exception) + if (!is_open) + throw ReaderClosed("The Clip is closed. Call Open() before calling this method", "N/A"); + + if (reader) + { + // Adjust out of bounds frame number + requested_frame = adjust_frame_number_minimum(requested_frame); + // Adjust has_video and has_audio overrides int enabled_audio = has_audio.GetInt(requested_frame); if (enabled_audio == -1 && reader && reader->info.has_audio) @@ -364,13 +391,17 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame) frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0); // Get time mapped frame number (used to increase speed, change direction, etc...) + // TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set) get_time_mapped_frame(frame, requested_frame); + // Adjust # of samples to match requested (the interaction with time curves will make this tricky) + // TODO: Implement move samples to/from next frame + // Apply effects to the frame (if any) apply_effects(frame); // Apply keyframe / transforms - apply_keyframes(frame); + apply_keyframes(frame, width, height); // Return processed 'frame' return frame; @@ -631,13 +662,9 @@ int64_t Clip::adjust_frame_number_minimum(int64_t frame_number) std::shared_ptr Clip::GetOrCreateFrame(int64_t number) { std::shared_ptr new_frame; - - // Init some basic properties about this frame - int samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels); - try { // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame); + ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number); // Attempt to get a frame (but this could fail if a reader has just been closed) new_frame = reader->GetFrame(number); @@ -654,14 +681,17 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number) // ... } + // Estimate # of samples needed for this frame + int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels); + // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame); + ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "estimated_samples_in_frame", estimated_samples_in_frame); // Create blank frame - new_frame = std::make_shared(number, reader->info.width, reader->info.height, "#000000", samples_in_frame, reader->info.channels); + new_frame = std::make_shared(number, reader->info.width, reader->info.height, "#000000", estimated_samples_in_frame, reader->info.channels); new_frame->SampleRate(reader->info.sample_rate); new_frame->ChannelsLayout(reader->info.channel_layout); - new_frame->AddAudioSilence(samples_in_frame); + new_frame->AddAudioSilence(estimated_samples_in_frame); return new_frame; } @@ -1063,7 +1093,7 @@ bool Clip::isEqual(double a, double b) // Apply keyframes to the source frame (if any) -std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) +std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame, int width, int height) { // Get actual frame image data std::shared_ptr source_image = frame->GetImage(); @@ -1081,7 +1111,7 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) int alpha = wave_color.alpha.GetInt(frame->number); // Generate Waveform Dynamically (the size of the timeline) - source_image = frame->GetWaveform(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, red, green, blue, alpha); + source_image = frame->GetWaveform(width, height, red, green, blue, alpha); frame->AddImage(std::shared_ptr(source_image)); } @@ -1110,7 +1140,7 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) { case (SCALE_FIT): { // keep aspect ratio - source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::KeepAspectRatio); + source_size.scale(width, height, Qt::KeepAspectRatio); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); @@ -1118,18 +1148,18 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) } case (SCALE_STRETCH): { // ignore aspect ratio - source_size.scale(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, Qt::IgnoreAspectRatio); + source_size.scale(width, height, Qt::IgnoreAspectRatio); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } case (SCALE_CROP): { - QSize width_size(Settings::Instance()->MAX_WIDTH, round(Settings::Instance()->MAX_WIDTH / (float(source_size.width()) / float(source_size.height())))); - QSize height_size(round(Settings::Instance()->MAX_HEIGHT / (float(source_size.height()) / float(source_size.width()))), Settings::Instance()->MAX_HEIGHT); + QSize width_size(width, round(width / (float(source_size.width()) / float(source_size.height())))); + QSize height_size(round(height / (float(source_size.height()) / float(source_size.width()))), height); // respect aspect ratio - if (width_size.width() >= Settings::Instance()->MAX_WIDTH && width_size.height() >= Settings::Instance()->MAX_HEIGHT) + if (width_size.width() >= width && width_size.height() >= height) source_size.scale(width_size.width(), width_size.height(), Qt::KeepAspectRatio); else source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio); @@ -1142,9 +1172,9 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) // Calculate ratio of source size to project size // Even with no scaling, previews need to be adjusted correctly // (otherwise NONE scaling draws the frame image outside of the preview) - float source_width_ratio = source_size.width() / float(Settings::Instance()->MAX_WIDTH); - float source_height_ratio = source_size.height() / float(Settings::Instance()->MAX_HEIGHT); - source_size.scale(Settings::Instance()->MAX_WIDTH * source_width_ratio, Settings::Instance()->MAX_HEIGHT * source_height_ratio, Qt::KeepAspectRatio); + float source_width_ratio = source_size.width() / float(width); + float source_height_ratio = source_size.height() / float(height); + source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); @@ -1207,32 +1237,32 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) // This is only here to prevent unused-enum warnings break; case (GRAVITY_TOP): - x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center + x = (width - scaled_source_width) / 2.0; // center break; case (GRAVITY_TOP_RIGHT): - x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right + x = width - scaled_source_width; // right break; case (GRAVITY_LEFT): - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center + y = (height - scaled_source_height) / 2.0; // center break; case (GRAVITY_CENTER): - x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center + x = (width - scaled_source_width) / 2.0; // center + y = (height - scaled_source_height) / 2.0; // center break; case (GRAVITY_RIGHT): - x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height) / 2.0; // center + x = width - scaled_source_width; // right + y = (height - scaled_source_height) / 2.0; // center break; case (GRAVITY_BOTTOM_LEFT): - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom + y = (height - scaled_source_height); // bottom break; case (GRAVITY_BOTTOM): - x = (Settings::Instance()->MAX_WIDTH - scaled_source_width) / 2.0; // center - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom + x = (width - scaled_source_width) / 2.0; // center + y = (height - scaled_source_height); // bottom break; case (GRAVITY_BOTTOM_RIGHT): - x = Settings::Instance()->MAX_WIDTH - scaled_source_width; // right - y = (Settings::Instance()->MAX_HEIGHT - scaled_source_height); // bottom + x = width - scaled_source_width; // right + y = (height - scaled_source_height); // bottom break; } @@ -1241,8 +1271,8 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame) /* LOCATION, ROTATION, AND SCALE */ float r = rotation.GetValue(frame->number); // rotate in degrees - x += (Settings::Instance()->MAX_WIDTH * location_x.GetValue(frame->number)); // move in percentage of final width - y += (Settings::Instance()->MAX_HEIGHT * location_y.GetValue(frame->number)); // move in percentage of final height + x += (width * location_x.GetValue(frame->number)); // move in percentage of final width + y += (height * location_y.GetValue(frame->number)); // move in percentage of final height float shear_x_value = shear_x.GetValue(frame->number); float shear_y_value = shear_y.GetValue(frame->number); float origin_x_value = origin_x.GetValue(frame->number); diff --git a/src/Timeline.cpp b/src/Timeline.cpp index 5fad97fe..7f068d1a 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -368,7 +368,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) // Attempt to get a frame (but this could fail if a reader has just been closed) #pragma omp critical (T_GetOtCreateFrame) - new_frame = std::shared_ptr(clip->GetFrame(number)); + new_frame = std::shared_ptr(clip->GetFrame(number, info.width, info.height, samples_in_frame)); // Return real frame return new_frame; @@ -668,8 +668,10 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) // Get clip frame # long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1; long clip_frame_number = frame_number - clip_start_position + clip_start_frame; + int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels); + // Cache clip object - clip->GetFrame(clip_frame_number); + clip->GetFrame(clip_frame_number, info.width, info.height, samples_in_frame); } } } From 453d55f41a36f6e1db74771d1cc017b0e477c3df Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 26 Aug 2020 22:47:31 -0500 Subject: [PATCH 03/14] Fixed a bug with cropping logic on Clip (disabled it temporarily). I need to replace the Crop functionality with a more robust cropping tool. Also, updated Timeline to use the MaxWidth/MaxHeight settings when calling the clip (since those are set when the screen is resized). --- include/Clip.h | 9 ++++++--- src/Clip.cpp | 39 +++++++++++++++++++++++++++++---------- src/Timeline.cpp | 4 ++-- 3 files changed, 37 insertions(+), 15 deletions(-) diff --git a/include/Clip.h b/include/Clip.h index 71ab83c3..51f9a859 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -116,10 +116,10 @@ namespace openshot { int64_t adjust_frame_number_minimum(int64_t frame_number); /// Apply effects to the source frame (if any) - std::shared_ptr apply_effects(std::shared_ptr frame); + void apply_effects(std::shared_ptr frame); /// Apply keyframes to the source frame (if any) - std::shared_ptr apply_keyframes(std::shared_ptr frame, int width, int height); + void apply_keyframes(std::shared_ptr frame, int width, int height); /// Get file extension std::string get_file_extension(std::string path); @@ -146,6 +146,9 @@ namespace openshot { void reverse_buffer(juce::AudioSampleBuffer* buffer); public: + /// Final cache object used to hold final frames + CacheMemory final_cache; + openshot::GravityType gravity; ///< The gravity of a clip determines where it snaps to its parent openshot::ScaleType scale; ///< The scale determines how a clip should be resized to fit its parent openshot::AnchorType anchor; ///< The anchor determines what parent a clip should snap to @@ -168,7 +171,7 @@ namespace openshot { /// Get the cache object used by this reader (always returns NULL for this object) - CacheMemory* GetCache() override { return NULL; }; + CacheMemory* GetCache() override { return &final_cache; }; /// Determine if reader is open or closed bool IsOpen() override { return is_open; }; diff --git a/src/Clip.cpp b/src/Clip.cpp index 0c664ebe..7a504cc9 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -107,6 +107,9 @@ void Clip::init_settings() // Init audio and video overrides has_audio = Keyframe(-1.0); has_video = Keyframe(-1.0); + + // Initialize Clip cache + final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels); } // Init reader's rotation (if any) @@ -352,6 +355,16 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame, int width, int he // Adjust out of bounds frame number requested_frame = adjust_frame_number_minimum(requested_frame); + // Check the cache for this frame + std::shared_ptr cached_frame = final_cache.GetFrame(requested_frame); + if (cached_frame) { + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::GetFrame", "returned cached frame", requested_frame); + + // Return the cached frame + return cached_frame; + } + // Adjust has_video and has_audio overrides int enabled_audio = has_audio.GetInt(requested_frame); if (enabled_audio == -1 && reader && reader->info.has_audio) @@ -403,6 +416,9 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame, int width, int he // Apply keyframe / transforms apply_keyframes(frame, width, height); + // Cache frame + final_cache.Add(frame); + // Return processed 'frame' return frame; } @@ -874,6 +890,9 @@ void Clip::SetJsonValue(const Json::Value root) { // Set parent data ClipBase::SetJsonValue(root); + // Clear cache + final_cache.Clear(); + // Set data from Json (if key is found) if (!root["gravity"].isNull()) gravity = (GravityType) root["gravity"].asInt(); @@ -1062,16 +1081,22 @@ void Clip::AddEffect(EffectBase* effect) // Sort effects sort_effects(); + + // Clear cache + final_cache.Clear(); } // Remove an effect from the clip void Clip::RemoveEffect(EffectBase* effect) { effects.remove(effect); + + // Clear cache + final_cache.Clear(); } // Apply effects to the source frame (if any) -std::shared_ptr Clip::apply_effects(std::shared_ptr frame) +void Clip::apply_effects(std::shared_ptr frame) { // Find Effects at this position and layer for (auto effect : effects) @@ -1080,9 +1105,6 @@ std::shared_ptr Clip::apply_effects(std::shared_ptr frame) frame = effect->GetFrame(frame, frame->number); } // end effect loop - - // Return modified frame - return frame; } // Compare 2 floating point numbers for equality @@ -1093,7 +1115,7 @@ bool Clip::isEqual(double a, double b) // Apply keyframes to the source frame (if any) -std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame, int width, int height) +void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) { // Get actual frame image data std::shared_ptr source_image = frame->GetImage(); @@ -1315,7 +1337,7 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame, int w /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ std::shared_ptr new_image; - new_image = std::shared_ptr(new QImage(*source_image)); + new_image = std::shared_ptr(new QImage(QSize(width, height), source_image->format())); new_image->fill(QColor(QString::fromStdString("#00000000"))); // Load timeline's new frame image into a QPainter @@ -1328,7 +1350,7 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame, int w // Composite a new layer onto the image painter.setCompositionMode(QPainter::CompositionMode_SourceOver); - painter.drawImage(0, 0, *source_image, crop_x_value * source_image->width(), crop_y_value * source_image->height(), crop_w_value * source_image->width(), crop_h_value * source_image->height()); + painter.drawImage(0, 0, *source_image); // Draw frame #'s on top of image (if needed) if (display != FRAME_DISPLAY_NONE) { @@ -1361,7 +1383,4 @@ std::shared_ptr Clip::apply_keyframes(std::shared_ptr frame, int w // Add new QImage to frame frame->AddImage(new_image); - - // Return modified frame - return frame; } diff --git a/src/Timeline.cpp b/src/Timeline.cpp index 7f068d1a..d9b23ca5 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -368,7 +368,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) // Attempt to get a frame (but this could fail if a reader has just been closed) #pragma omp critical (T_GetOtCreateFrame) - new_frame = std::shared_ptr(clip->GetFrame(number, info.width, info.height, samples_in_frame)); + new_frame = std::shared_ptr(clip->GetFrame(number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, samples_in_frame)); // Return real frame return new_frame; @@ -671,7 +671,7 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels); // Cache clip object - clip->GetFrame(clip_frame_number, info.width, info.height, samples_in_frame); + clip->GetFrame(clip_frame_number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, samples_in_frame); } } } From f9a717ef4b338db5d7188b6994b63f06bfb2aea9 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Sun, 4 Oct 2020 16:59:21 -0500 Subject: [PATCH 04/14] Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class. - Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed - Added caching to Clip class, to optimize previewing of cached frames (much faster than previous) --- include/Clip.h | 45 +++--- include/ClipBase.h | 26 ++++ include/EffectBase.h | 21 ++- include/OpenShot.h | 1 + include/ReaderBase.h | 10 +- include/Timeline.h | 4 +- include/TimelineBase.h | 47 +++++++ include/effects/Bars.h | 20 ++- include/effects/Blur.h | 20 ++- include/effects/Brightness.h | 20 ++- include/effects/ChromaKey.h | 20 ++- include/effects/ColorShift.h | 20 ++- include/effects/Crop.h | 20 ++- include/effects/Deinterlace.h | 20 ++- include/effects/Hue.h | 20 ++- include/effects/Mask.h | 20 ++- include/effects/Negate.h | 20 ++- include/effects/Pixelate.h | 20 ++- include/effects/Saturation.h | 20 ++- include/effects/Shift.h | 20 ++- include/effects/Wave.h | 20 ++- src/CMakeLists.txt | 1 + src/ChunkReader.cpp | 2 - src/ChunkWriter.cpp | 8 -- src/Clip.cpp | 242 +++++++++++++-------------------- src/EffectBase.cpp | 10 ++ src/FFmpegReader.cpp | 15 +- src/Qt/PlayerDemo.cpp | 5 - src/QtImageReader.cpp | 15 +- src/ReaderBase.cpp | 10 +- src/Settings.cpp | 2 - src/Timeline.cpp | 81 ++++++----- src/TimelineBase.cpp | 33 +++++ src/bindings/python/openshot.i | 2 + src/bindings/ruby/openshot.i | 2 + 35 files changed, 517 insertions(+), 345 deletions(-) create mode 100644 include/TimelineBase.h create mode 100644 src/TimelineBase.cpp diff --git a/include/Clip.h b/include/Clip.h index 51f9a859..2f1c5cf5 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -42,6 +42,7 @@ #include "Effects.h" #include "EffectInfo.h" #include "Fraction.h" +#include "Frame.h" #include "KeyFrame.h" #include "ReaderBase.h" #include "JuceHeader.h" @@ -146,9 +147,6 @@ namespace openshot { void reverse_buffer(juce::AudioSampleBuffer* buffer); public: - /// Final cache object used to hold final frames - CacheMemory final_cache; - openshot::GravityType gravity; ///< The gravity of a clip determines where it snaps to its parent openshot::ScaleType scale; ///< The scale determines how a clip should be resized to fit its parent openshot::AnchorType anchor; ///< The anchor determines what parent a clip should snap to @@ -169,9 +167,8 @@ namespace openshot { /// Destructor virtual ~Clip(); - - /// Get the cache object used by this reader (always returns NULL for this object) - CacheMemory* GetCache() override { return &final_cache; }; + /// Get the cache object used by this clip + CacheMemory* GetCache() { return &cache; }; /// Determine if reader is open or closed bool IsOpen() override { return is_open; }; @@ -191,23 +188,24 @@ namespace openshot { /// Return the list of effects on the timeline std::list Effects() { return effects; }; - /// @brief Get an openshot::Frame object for a specific frame number of this timeline. The image size and number - /// of samples match the source reader. + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. /// - /// @returns The requested frame (containing the image) - /// @param requested_frame The frame number that is requested - std::shared_ptr GetFrame(int64_t requested_frame); + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number); - /// @brief Get an openshot::Frame object for a specific frame number of this timeline. The image size and number - /// of samples can be customized to match the Timeline, or any custom output. Extra samples will be moved to the - /// next Frame. Missing samples will be moved from the next Frame. + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object /// - /// @returns The requested frame (containing the image) - /// @param requested_frame The frame number that is requested - /// @param width The width of the image requested - /// @param height The height of the image requested - /// @param samples The number of samples requested - std::shared_ptr GetFrame(int64_t requested_frame, int width, int height, int samples); + /// A new openshot::Frame objects is returned, based on a copy from the source image, with all keyframes and clip effects + /// rendered. + /// + /// @returns The modified openshot::Frame object + /// @param frame This is ignored on Clip, due to caching optimizations. This frame instance is clobbered with the source frame. + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number); /// Open the internal reader void Open(); @@ -262,13 +260,6 @@ namespace openshot { /// Curve representing the color of the audio wave form openshot::Color wave_color; - // Crop settings and curves - openshot::GravityType crop_gravity; ///< Cropping needs to have a gravity to determine what side we are cropping - openshot::Keyframe crop_width; ///< Curve representing width in percent (0.0=0%, 1.0=100%) - openshot::Keyframe crop_height; ///< Curve representing height in percent (0.0=0%, 1.0=100%) - openshot::Keyframe crop_x; ///< Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%) - openshot::Keyframe crop_y; ///< Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%) - // Perspective curves openshot::Keyframe perspective_c1_x; ///< Curves representing X for coordinate 1 openshot::Keyframe perspective_c1_y; ///< Curves representing Y for coordinate 1 diff --git a/include/ClipBase.h b/include/ClipBase.h index 1f7f55c4..e335c501 100644 --- a/include/ClipBase.h +++ b/include/ClipBase.h @@ -33,10 +33,13 @@ #include #include +#include "CacheMemory.h" #include "Exceptions.h" +#include "Frame.h" #include "Point.h" #include "KeyFrame.h" #include "Json.h" +#include "TimelineBase.h" namespace openshot { @@ -54,6 +57,7 @@ namespace openshot { float start; ///< The position in seconds to start playing (used to trim the beginning of a clip) float end; ///< The position in seconds to end playing (used to trim the ending of a clip) std::string previous_properties; ///< This string contains the previous JSON properties + openshot::TimelineBase* timeline; ///< Pointer to the parent timeline instance (if any) /// Generate JSON for a property Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe* keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const; @@ -62,6 +66,7 @@ namespace openshot { Json::Value add_property_choice_json(std::string name, int value, int selected_value) const; public: + CacheMemory cache; /// Constructor for the base clip ClipBase() { }; @@ -72,6 +77,25 @@ namespace openshot { bool operator> ( ClipBase& a) { return (Position() > a.Position()); } bool operator>= ( ClipBase& a) { return (Position() >= a.Position()); } + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + virtual std::shared_ptr GetFrame(int64_t frame_number) = 0; + + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// modified openshot::Frame object + /// + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. + /// + /// @returns The modified openshot::Frame object + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + virtual std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) = 0; + /// Get basic properties std::string Id() const { return id; } ///< Get the Id of this clip object float Position() const { return position; } ///< Get position on timeline (in seconds) @@ -79,6 +103,7 @@ namespace openshot { float Start() const { return start; } ///< Get start position (in seconds) of clip (trim start of video) float End() const { return end; } ///< Get end position (in seconds) of clip (trim end of video) float Duration() const { return end - start; } ///< Get the length of this clip (in seconds) + openshot::TimelineBase* ParentTimeline() { return timeline; } ///< Get the associated Timeline pointer (if any) /// Set basic properties void Id(std::string value) { id = value; } ///> Set the Id of this clip object @@ -86,6 +111,7 @@ namespace openshot { void Layer(int value) { layer = value; } ///< Set layer of clip on timeline (lower number is covered by higher numbers) void Start(float value) { start = value; } ///< Set start position (in seconds) of clip (trim start of video) void End(float value) { end = value; } ///< Set end position (in seconds) of clip (trim end of video) + void ParentTimeline(openshot::TimelineBase* new_timeline) { timeline = new_timeline; } ///< Set associated Timeline pointer /// Get and Set JSON methods virtual std::string Json() const = 0; ///< Generate JSON string of this object diff --git a/include/EffectBase.h b/include/EffectBase.h index 1f967a02..353e1817 100644 --- a/include/EffectBase.h +++ b/include/EffectBase.h @@ -67,6 +67,10 @@ namespace openshot { private: int order; ///< The order to evaluate this effect. Effects are processed in this order (when more than one overlap). + + protected: + openshot::ClipBase* clip; ///< Pointer to the parent clip instance (if any) + public: /// Information about the current effect @@ -78,21 +82,16 @@ namespace openshot /// Constrain a color value from 0 to 255 int constrain(int color_value); - /// @brief This method is required for all derived classes of EffectBase, and returns a - /// modified openshot::Frame object - /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). - /// - /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - virtual std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) = 0; - /// Initialize the values of the EffectInfo struct. It is important for derived classes to call /// this method, or the EffectInfo struct values will not be initialized. void InitEffectInfo(); + /// Parent clip object of this effect (which can be unparented and NULL) + openshot::ClipBase* ParentClip(); + + /// Set parent clip object of this effect + void ParentClip(openshot::ClipBase* new_clip); + /// Get and Set JSON methods virtual std::string Json() const = 0; ///< Generate JSON string of this object virtual void SetJson(const std::string value) = 0; ///< Load JSON string into this object diff --git a/include/OpenShot.h b/include/OpenShot.h index 5273ff0d..56f847d1 100644 --- a/include/OpenShot.h +++ b/include/OpenShot.h @@ -138,6 +138,7 @@ #include "QtHtmlReader.h" #include "QtImageReader.h" #include "QtTextReader.h" +#include "TimelineBase.h" #include "Timeline.h" #include "Settings.h" diff --git a/include/ReaderBase.h b/include/ReaderBase.h index ab765753..e2d93841 100644 --- a/include/ReaderBase.h +++ b/include/ReaderBase.h @@ -98,9 +98,9 @@ namespace openshot { protected: /// Section lock for multiple threads - juce::CriticalSection getFrameCriticalSection; - juce::CriticalSection processingCriticalSection; - openshot::ClipBase* parent; + juce::CriticalSection getFrameCriticalSection; + juce::CriticalSection processingCriticalSection; + openshot::ClipBase* clip; ///< Pointer to the parent clip instance (if any) public: @@ -111,10 +111,10 @@ namespace openshot openshot::ReaderInfo info; /// Parent clip object of this reader (which can be unparented and NULL) - openshot::ClipBase* GetClip(); + openshot::ClipBase* ParentClip(); /// Set parent clip object of this reader - void SetClip(openshot::ClipBase* clip); + void ParentClip(openshot::ClipBase* new_clip); /// Close the reader (and any resources it was consuming) virtual void Close() = 0; diff --git a/include/Timeline.h b/include/Timeline.h index 932b04ac..21cf0bf6 100644 --- a/include/Timeline.h +++ b/include/Timeline.h @@ -54,6 +54,8 @@ #include "OpenMPUtilities.h" #include "ReaderBase.h" #include "Settings.h" +#include "TimelineBase.h" + namespace openshot { @@ -146,7 +148,7 @@ namespace openshot { * t.Close(); * @endcode */ - class Timeline : public ReaderBase { + class Timeline : public TimelineBase, public ReaderBase { private: bool is_open; /// + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_TIMELINE_BASE_H +#define OPENSHOT_TIMELINE_BASE_H + + +namespace openshot { + /** + * @brief This class represents a timeline (used for building generic timeline implementations) + */ + class TimelineBase { + + public: + int preview_width; ///< Optional preview width of timeline image. If your preview window is smaller than the timeline, it's recommended to set this. + int preview_height; ///< Optional preview width of timeline image. If your preview window is smaller than the timeline, it's recommended to set this. + }; +} + +#endif diff --git a/include/effects/Bars.h b/include/effects/Bars.h index 7c92255a..a781a520 100644 --- a/include/effects/Bars.h +++ b/include/effects/Bars.h @@ -77,16 +77,24 @@ namespace openshot /// @param bottom The curve to adjust the bottom bar size (between 0 and 1) Bars(Color color, Keyframe left, Keyframe top, Keyframe right, Keyframe bottom); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Blur.h b/include/effects/Blur.h index 60a0cd08..a3211e76 100644 --- a/include/effects/Blur.h +++ b/include/effects/Blur.h @@ -89,16 +89,24 @@ namespace openshot /// @param new_iterations The curve to adjust the # of iterations (between 1 and 100) Blur(Keyframe new_horizontal_radius, Keyframe new_vertical_radius, Keyframe new_sigma, Keyframe new_iterations); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Brightness.h b/include/effects/Brightness.h index 5f25b94a..5e36671f 100644 --- a/include/effects/Brightness.h +++ b/include/effects/Brightness.h @@ -77,16 +77,24 @@ namespace openshot /// @param new_contrast The curve to adjust the contrast (3 is typical, 20 is a lot, 100 is max. 0 is invalid) Brightness(Keyframe new_brightness, Keyframe new_contrast); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/ChromaKey.h b/include/effects/ChromaKey.h index fcc8c3b1..a59e582d 100644 --- a/include/effects/ChromaKey.h +++ b/include/effects/ChromaKey.h @@ -74,16 +74,24 @@ namespace openshot /// @param fuzz The fuzz factor (or threshold) ChromaKey(Color color, Keyframe fuzz); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/ColorShift.h b/include/effects/ColorShift.h index 4ef56dc6..b6c596a9 100644 --- a/include/effects/ColorShift.h +++ b/include/effects/ColorShift.h @@ -81,16 +81,24 @@ namespace openshot /// @param alpha_y The curve to adjust the Alpha y shift (between -1 and 1, percentage) ColorShift(Keyframe red_x, Keyframe red_y, Keyframe green_x, Keyframe green_y, Keyframe blue_x, Keyframe blue_y, Keyframe alpha_x, Keyframe alpha_y); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Crop.h b/include/effects/Crop.h index f5ba07f2..ccdba3f1 100644 --- a/include/effects/Crop.h +++ b/include/effects/Crop.h @@ -76,16 +76,24 @@ namespace openshot /// @param bottom The curve to adjust the bottom bar size (between 0 and 1) Crop(Keyframe left, Keyframe top, Keyframe right, Keyframe bottom); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Deinterlace.h b/include/effects/Deinterlace.h index 97c77853..83a9f2b2 100644 --- a/include/effects/Deinterlace.h +++ b/include/effects/Deinterlace.h @@ -70,16 +70,24 @@ namespace openshot /// Default constructor Deinterlace(bool isOdd); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Hue.h b/include/effects/Hue.h index 833bf087..e561bbf7 100644 --- a/include/effects/Hue.h +++ b/include/effects/Hue.h @@ -67,16 +67,24 @@ namespace openshot /// @param hue The curve to adjust the hue shift (between 0 and 1) Hue(Keyframe hue); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Mask.h b/include/effects/Mask.h index 8156b843..910f1308 100644 --- a/include/effects/Mask.h +++ b/include/effects/Mask.h @@ -89,16 +89,24 @@ namespace openshot /// @param mask_contrast The curve to adjust the contrast of the wipe's mask (3 is typical, 20 is a lot, 0 is invalid) Mask(ReaderBase *mask_reader, Keyframe mask_brightness, Keyframe mask_contrast); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Negate.h b/include/effects/Negate.h index c691a86c..cab98f0a 100644 --- a/include/effects/Negate.h +++ b/include/effects/Negate.h @@ -58,16 +58,24 @@ namespace openshot /// Default constructor Negate(); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Pixelate.h b/include/effects/Pixelate.h index 4cdd440f..50f9b673 100644 --- a/include/effects/Pixelate.h +++ b/include/effects/Pixelate.h @@ -76,16 +76,24 @@ namespace openshot /// @param bottom The curve to adjust the bottom margin size (between 0 and 1) Pixelate(Keyframe pixelization, Keyframe left, Keyframe top, Keyframe right, Keyframe bottom); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Saturation.h b/include/effects/Saturation.h index f272305f..127f6829 100644 --- a/include/effects/Saturation.h +++ b/include/effects/Saturation.h @@ -74,16 +74,24 @@ namespace openshot /// @param new_saturation The curve to adjust the saturation of the frame's image (0.0 = black and white, 1.0 = normal, 2.0 = double saturation) Saturation(Keyframe new_saturation); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Shift.h b/include/effects/Shift.h index 7a7efbea..b2c3242d 100644 --- a/include/effects/Shift.h +++ b/include/effects/Shift.h @@ -70,16 +70,24 @@ namespace openshot /// @param y The curve to adjust the y shift (between -1 and 1, percentage) Shift(Keyframe x, Keyframe y); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/include/effects/Wave.h b/include/effects/Wave.h index d4759c24..3b922eb3 100644 --- a/include/effects/Wave.h +++ b/include/effects/Wave.h @@ -76,16 +76,24 @@ namespace openshot /// @param speed_y The curve to adjust the vertical speed (0 to 10) Wave(Keyframe wavelength, Keyframe amplitude, Keyframe multiplier, Keyframe shift_x, Keyframe speed_y); - /// @brief This method is required for all derived classes of EffectBase, and returns a + /// @brief This method is required for all derived classes of ClipBase, and returns a + /// new openshot::Frame object. All Clip keyframes and effects are resolved into + /// pixels. + /// + /// @returns A new openshot::Frame object + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(int64_t frame_number) override { return GetFrame(std::shared_ptr (new Frame()), frame_number); } + + /// @brief This method is required for all derived classes of ClipBase, and returns a /// modified openshot::Frame object /// - /// The frame object is passed into this method, and a frame_number is passed in which - /// tells the effect which settings to use from its keyframes (starting at 1). + /// The frame object is passed into this method and used as a starting point (pixels and audio). + /// All Clip keyframes and effects are resolved into pixels. /// /// @returns The modified openshot::Frame object - /// @param frame The frame object that needs the effect applied to it - /// @param frame_number The frame number (starting at 1) of the effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + /// @param frame The frame object that needs the clip or effect applied to it + /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. + std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; /// Get and Set JSON methods std::string Json() const override; ///< Generate JSON string of this object diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index dec7c853..e1dfc36a 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -176,6 +176,7 @@ set(OPENSHOT_SOURCES QtPlayer.cpp QtTextReader.cpp Settings.cpp + TimelineBase.cpp Timeline.cpp) # Video effects diff --git a/src/ChunkReader.cpp b/src/ChunkReader.cpp index c194ce33..fb45e8f6 100644 --- a/src/ChunkReader.cpp +++ b/src/ChunkReader.cpp @@ -222,7 +222,6 @@ std::shared_ptr ChunkReader::GetFrame(int64_t requested_frame) // Close existing reader (if needed) if (local_reader) { - std::cout << "Close READER" << std::endl; // Close and delete old reader local_reader->Close(); delete local_reader; @@ -230,7 +229,6 @@ std::shared_ptr ChunkReader::GetFrame(int64_t requested_frame) try { - std::cout << "Load READER: " << chunk_video_path << std::endl; // Load new FFmpegReader local_reader = new FFmpegReader(chunk_video_path); local_reader->Open(); // open reader diff --git a/src/ChunkWriter.cpp b/src/ChunkWriter.cpp index f9d653b0..b6256eaf 100644 --- a/src/ChunkWriter.cpp +++ b/src/ChunkWriter.cpp @@ -157,10 +157,6 @@ void ChunkWriter::WriteFrame(std::shared_ptr frame) // Write the frames once it reaches the correct chunk size if (frame_count % chunk_size == 0 && frame_count >= chunk_size) { - std::cout << "Done with chunk" << std::endl; - std::cout << "frame_count: " << frame_count << std::endl; - std::cout << "chunk_size: " << chunk_size << std::endl; - // Pad an additional 12 frames for (int z = 0; z<12; z++) { @@ -229,10 +225,6 @@ void ChunkWriter::Close() // Write the frames once it reaches the correct chunk size if (is_writing) { - std::cout << "Final chunk" << std::endl; - std::cout << "frame_count: " << frame_count << std::endl; - std::cout << "chunk_size: " << chunk_size << std::endl; - // Pad an additional 12 frames for (int z = 0; z<12; z++) { diff --git a/src/Clip.cpp b/src/Clip.cpp index 7a504cc9..91510576 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -79,13 +79,6 @@ void Clip::init_settings() // Init audio waveform color wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255); - // Init crop settings - crop_gravity = GRAVITY_TOP_LEFT; - crop_width = Keyframe(1.0); - crop_height = Keyframe(1.0); - crop_x = Keyframe(0.0); - crop_y = Keyframe(0.0); - // Init shear and perspective curves shear_x = Keyframe(0.0); shear_y = Keyframe(0.0); @@ -109,7 +102,7 @@ void Clip::init_settings() has_video = Keyframe(-1.0); // Initialize Clip cache - final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels); + cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels); } // Init reader's rotation (if any) @@ -156,7 +149,7 @@ Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), alloca // Update duration and set parent if (reader) { End(reader->info.duration); - reader->SetClip(this); + reader->ParentClip(this); } } @@ -177,7 +170,7 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N try { // Open common video format - reader = new FFmpegReader(path); + reader = new openshot::FFmpegReader(path); } catch(...) { } } @@ -186,7 +179,7 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N try { // Open common video format - reader = new Timeline(path, true); + reader = new openshot::Timeline(path, true); } catch(...) { } } @@ -198,13 +191,13 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N try { // Try an image reader - reader = new QtImageReader(path); + reader = new openshot::QtImageReader(path); } catch(...) { try { // Try a video reader - reader = new FFmpegReader(path); + reader = new openshot::FFmpegReader(path); } catch(...) { } } @@ -213,7 +206,7 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N // Update duration and set parent if (reader) { End(reader->info.duration); - reader->SetClip(this); + reader->ParentClip(this); allocated_reader = reader; init_reader_rotation(); } @@ -242,7 +235,7 @@ void Clip::Reader(ReaderBase* new_reader) reader = new_reader; // set parent - reader->SetClip(this); + reader->ParentClip(this); // Init rotation (if any) init_reader_rotation(); @@ -316,8 +309,8 @@ float Clip::End() const return end; } -// Get an openshot::Frame object for a specific frame number of this reader. -std::shared_ptr Clip::GetFrame(int64_t requested_frame) +// Create an openshot::Frame object for a specific frame number of this reader. +std::shared_ptr Clip::GetFrame(int64_t frame_number) { // Check for open reader (or throw exception) if (!is_open) @@ -326,25 +319,19 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame) if (reader) { // Adjust out of bounds frame number - requested_frame = adjust_frame_number_minimum(requested_frame); + frame_number = adjust_frame_number_minimum(frame_number); - // Is a time map detected - int64_t new_frame_number = requested_frame; - int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame)); - if (time.GetLength() > 1) - new_frame_number = time_mapped_number; - - // Get the # of audio samples from the time mapped Frame instance - std::shared_ptr time_mapped_original_frame = GetOrCreateFrame(new_frame_number); - return GetFrame(requested_frame, reader->info.width, reader->info.height, time_mapped_original_frame->GetAudioSamplesCount()); + // Get the original frame and pass it to GetFrame overload + std::shared_ptr original_frame = GetOrCreateFrame(frame_number); + return GetFrame(original_frame, frame_number); } else // Throw error if reader not initialized throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method."); } -// Get an openshot::Frame object for a specific frame number of this reader. -std::shared_ptr Clip::GetFrame(int64_t requested_frame, int width, int height, int samples) +// Use an existing openshot::Frame object and draw this Clip's frame onto it +std::shared_ptr Clip::GetFrame(std::shared_ptr frame, int64_t frame_number) { // Check for open reader (or throw exception) if (!is_open) @@ -353,33 +340,33 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame, int width, int he if (reader) { // Adjust out of bounds frame number - requested_frame = adjust_frame_number_minimum(requested_frame); + frame_number = adjust_frame_number_minimum(frame_number); // Check the cache for this frame - std::shared_ptr cached_frame = final_cache.GetFrame(requested_frame); + std::shared_ptr cached_frame = cache.GetFrame(frame_number); if (cached_frame) { // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::GetFrame", "returned cached frame", requested_frame); + ZmqLogger::Instance()->AppendDebugMethod("Clip::GetFrame", "returned cached frame", frame_number); // Return the cached frame return cached_frame; } // Adjust has_video and has_audio overrides - int enabled_audio = has_audio.GetInt(requested_frame); + int enabled_audio = has_audio.GetInt(frame_number); if (enabled_audio == -1 && reader && reader->info.has_audio) enabled_audio = 1; else if (enabled_audio == -1 && reader && !reader->info.has_audio) enabled_audio = 0; - int enabled_video = has_video.GetInt(requested_frame); + int enabled_video = has_video.GetInt(frame_number); if (enabled_video == -1 && reader && reader->info.has_video) enabled_video = 1; else if (enabled_video == -1 && reader && !reader->info.has_audio) enabled_video = 0; // Is a time map detected - int64_t new_frame_number = requested_frame; - int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame)); + int64_t new_frame_number = frame_number; + int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(frame_number)); if (time.GetLength() > 1) new_frame_number = time_mapped_number; @@ -387,13 +374,6 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame, int width, int he std::shared_ptr original_frame; original_frame = GetOrCreateFrame(new_frame_number); - // Create a new frame - std::shared_ptr frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount())); - { - frame->SampleRate(original_frame->SampleRate()); - frame->ChannelsLayout(original_frame->ChannelsLayout()); - } - // Copy the image from the odd field if (enabled_video) frame->AddImage(std::shared_ptr(new QImage(*original_frame->GetImage()))); @@ -404,20 +384,29 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame, int width, int he frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0); // Get time mapped frame number (used to increase speed, change direction, etc...) - // TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set) - get_time_mapped_frame(frame, requested_frame); - - // Adjust # of samples to match requested (the interaction with time curves will make this tricky) - // TODO: Implement move samples to/from next frame + get_time_mapped_frame(frame, frame_number); // Apply effects to the frame (if any) apply_effects(frame); + // Determine size of image (from Timeline or Reader) + int width = 0; + int height = 0; + if (timeline) { + // Use timeline size (if available) + width = timeline->preview_width; + height = timeline->preview_height; + } else { + // Fallback to clip size + width = reader->info.width; + height = reader->info.height; + } + // Apply keyframe / transforms apply_keyframes(frame, width, height); // Cache frame - final_cache.Add(frame); + cache.Add(frame); // Return processed 'frame' return frame; @@ -677,17 +666,26 @@ int64_t Clip::adjust_frame_number_minimum(int64_t frame_number) // Get or generate a blank frame std::shared_ptr Clip::GetOrCreateFrame(int64_t number) { - std::shared_ptr new_frame; try { // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number); // Attempt to get a frame (but this could fail if a reader has just been closed) - new_frame = reader->GetFrame(number); + std::shared_ptr reader_frame = reader->GetFrame(number); // Return real frame - if (new_frame) - return new_frame; + if (reader_frame) { + // Create a new copy of reader frame + // This allows a clip to modify the pixels and audio of this frame without + // changing the underlying reader's frame data + //std::shared_ptr reader_copy(new Frame(number, 1, 1, "#000000", reader_frame->GetAudioSamplesCount(), reader_frame->GetAudioChannelsCount())); + std::shared_ptr reader_copy(new Frame(*reader_frame.get())); + { + reader_copy->SampleRate(reader_frame->SampleRate()); + reader_copy->ChannelsLayout(reader_frame->ChannelsLayout()); + } + return reader_copy; + } } catch (const ReaderClosed & e) { // ... @@ -704,7 +702,7 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number) ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "estimated_samples_in_frame", estimated_samples_in_frame); // Create blank frame - new_frame = std::make_shared(number, reader->info.width, reader->info.height, "#000000", estimated_samples_in_frame, reader->info.channels); + std::shared_ptr new_frame = std::make_shared(number, reader->info.width, reader->info.height, "#000000", estimated_samples_in_frame, reader->info.channels); new_frame->SampleRate(reader->info.sample_rate); new_frame->ChannelsLayout(reader->info.channel_layout); new_frame->AddAudioSilence(estimated_samples_in_frame); @@ -793,11 +791,6 @@ std::string Clip::PropertiesJSON(int64_t requested_frame) const { root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame))); root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame))); - root["crop_x"] = add_property_json("Crop X", crop_x.GetValue(requested_frame), "float", "", &crop_x, -1.0, 1.0, false, requested_frame); - root["crop_y"] = add_property_json("Crop Y", crop_y.GetValue(requested_frame), "float", "", &crop_y, -1.0, 1.0, false, requested_frame); - root["crop_width"] = add_property_json("Crop Width", crop_width.GetValue(requested_frame), "float", "", &crop_width, 0.0, 1.0, false, requested_frame); - root["crop_height"] = add_property_json("Crop Height", crop_height.GetValue(requested_frame), "float", "", &crop_height, 0.0, 1.0, false, requested_frame); - root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame); root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame); root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame); @@ -828,10 +821,6 @@ Json::Value Clip::JsonValue() const { root["time"] = time.JsonValue(); root["volume"] = volume.JsonValue(); root["wave_color"] = wave_color.JsonValue(); - root["crop_width"] = crop_width.JsonValue(); - root["crop_height"] = crop_height.JsonValue(); - root["crop_x"] = crop_x.JsonValue(); - root["crop_y"] = crop_y.JsonValue(); root["shear_x"] = shear_x.JsonValue(); root["shear_y"] = shear_y.JsonValue(); root["origin_x"] = origin_x.JsonValue(); @@ -891,7 +880,7 @@ void Clip::SetJsonValue(const Json::Value root) { ClipBase::SetJsonValue(root); // Clear cache - final_cache.Clear(); + cache.Clear(); // Set data from Json (if key is found) if (!root["gravity"].isNull()) @@ -924,14 +913,6 @@ void Clip::SetJsonValue(const Json::Value root) { volume.SetJsonValue(root["volume"]); if (!root["wave_color"].isNull()) wave_color.SetJsonValue(root["wave_color"]); - if (!root["crop_width"].isNull()) - crop_width.SetJsonValue(root["crop_width"]); - if (!root["crop_height"].isNull()) - crop_height.SetJsonValue(root["crop_height"]); - if (!root["crop_x"].isNull()) - crop_x.SetJsonValue(root["crop_x"]); - if (!root["crop_y"].isNull()) - crop_y.SetJsonValue(root["crop_y"]); if (!root["shear_x"].isNull()) shear_x.SetJsonValue(root["shear_x"]); if (!root["shear_y"].isNull()) @@ -1010,13 +991,13 @@ void Clip::SetJsonValue(const Json::Value root) { if (type == "FFmpegReader") { // Create new reader - reader = new FFmpegReader(root["reader"]["path"].asString(), false); + reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false); reader->SetJsonValue(root["reader"]); } else if (type == "QtImageReader") { // Create new reader - reader = new QtImageReader(root["reader"]["path"].asString(), false); + reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false); reader->SetJsonValue(root["reader"]); #ifdef USE_IMAGEMAGICK @@ -1036,25 +1017,25 @@ void Clip::SetJsonValue(const Json::Value root) { } else if (type == "ChunkReader") { // Create new reader - reader = new ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt()); + reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt()); reader->SetJsonValue(root["reader"]); } else if (type == "DummyReader") { // Create new reader - reader = new DummyReader(); + reader = new openshot::DummyReader(); reader->SetJsonValue(root["reader"]); } else if (type == "Timeline") { // Create new reader (always load from file again) // This prevents FrameMappers from being loaded on accident - reader = new Timeline(root["reader"]["path"].asString(), true); + reader = new openshot::Timeline(root["reader"]["path"].asString(), true); } // mark as managed reader and set parent if (reader) { - reader->SetClip(this); + reader->ParentClip(this); allocated_reader = reader; } @@ -1076,6 +1057,9 @@ void Clip::sort_effects() // Add an effect to the clip void Clip::AddEffect(EffectBase* effect) { + // Set parent clip pointer + effect->ParentClip(this); + // Add effect to list effects.push_back(effect); @@ -1083,7 +1067,7 @@ void Clip::AddEffect(EffectBase* effect) sort_effects(); // Clear cache - final_cache.Clear(); + cache.Clear(); } // Remove an effect from the clip @@ -1092,7 +1076,7 @@ void Clip::RemoveEffect(EffectBase* effect) effects.remove(effect); // Clear cache - final_cache.Clear(); + cache.Clear(); } // Apply effects to the source frame (if any) @@ -1165,7 +1149,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) source_size.scale(width, height, Qt::KeepAspectRatio); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } case (SCALE_STRETCH): { @@ -1173,7 +1157,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) source_size.scale(width, height, Qt::IgnoreAspectRatio); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } case (SCALE_CROP): { @@ -1187,7 +1171,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) source_size.scale(height_size.width(), height_size.height(), Qt::KeepAspectRatio); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } case (SCALE_NONE): { @@ -1199,50 +1183,11 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } } - float crop_x_value = crop_x.GetValue(frame->number); - float crop_y_value = crop_y.GetValue(frame->number); - float crop_w_value = crop_width.GetValue(frame->number); - float crop_h_value = crop_height.GetValue(frame->number); - switch(crop_gravity) - { - case (GRAVITY_TOP_LEFT): - // This is only here to prevent unused-enum warnings - break; - case (GRAVITY_TOP): - crop_x_value += 0.5; - break; - case (GRAVITY_TOP_RIGHT): - crop_x_value += 1.0; - break; - case (GRAVITY_LEFT): - crop_y_value += 0.5; - break; - case (GRAVITY_CENTER): - crop_x_value += 0.5; - crop_y_value += 0.5; - break; - case (GRAVITY_RIGHT): - crop_x_value += 1.0; - crop_y_value += 0.5; - break; - case (GRAVITY_BOTTOM_LEFT): - crop_y_value += 1.0; - break; - case (GRAVITY_BOTTOM): - crop_x_value += 0.5; - crop_y_value += 1.0; - break; - case (GRAVITY_BOTTOM_RIGHT): - crop_x_value += 1.0; - crop_y_value += 1.0; - break; - } - /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */ float x = 0.0; // left float y = 0.0; // top @@ -1289,7 +1234,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) } // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height); + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height); /* LOCATION, ROTATION, AND SCALE */ float r = rotation.GetValue(frame->number); // rotate in degrees @@ -1304,7 +1249,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) QTransform transform; // Transform source image (if needed) - ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); if (!isEqual(x, 0) || !isEqual(y, 0)) { // TRANSLATE/MOVE CLIP @@ -1333,7 +1278,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) } // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::add_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number, "transformed", transformed); + ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number, "transformed", transformed); /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ std::shared_ptr new_image; @@ -1352,31 +1297,34 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) painter.setCompositionMode(QPainter::CompositionMode_SourceOver); painter.drawImage(0, 0, *source_image); - // Draw frame #'s on top of image (if needed) - if (display != FRAME_DISPLAY_NONE) { - std::stringstream frame_number_str; - switch (display) - { - case (FRAME_DISPLAY_NONE): - // This is only here to prevent unused-enum warnings - break; + if (timeline) { + Timeline *t = (Timeline *) timeline; - case (FRAME_DISPLAY_CLIP): - frame_number_str << frame->number; - break; + // Draw frame #'s on top of image (if needed) + if (display != FRAME_DISPLAY_NONE) { + std::stringstream frame_number_str; + switch (display) { + case (FRAME_DISPLAY_NONE): + // This is only here to prevent unused-enum warnings + break; - case (FRAME_DISPLAY_TIMELINE): - frame_number_str << "N/A"; - break; + case (FRAME_DISPLAY_CLIP): + frame_number_str << frame->number; + break; - case (FRAME_DISPLAY_BOTH): - frame_number_str << "N/A" << " (" << frame->number << ")"; - break; + case (FRAME_DISPLAY_TIMELINE): + frame_number_str << (position * t->info.fps.ToFloat()) + frame->number; + break; + + case (FRAME_DISPLAY_BOTH): + frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")"; + break; + } + + // Draw frame number on top of image + painter.setPen(QColor("#ffffff")); + painter.drawText(20, 20, QString(frame_number_str.str().c_str())); } - - // Draw frame number on top of image - painter.setPen(QColor("#ffffff")); - painter.drawText(20, 20, QString(frame_number_str.str().c_str())); } painter.end(); diff --git a/src/EffectBase.cpp b/src/EffectBase.cpp index 05ed97c2..fcf00645 100644 --- a/src/EffectBase.cpp +++ b/src/EffectBase.cpp @@ -138,3 +138,13 @@ Json::Value EffectBase::JsonInfo() const { // return JsonValue return root; } + +/// Parent clip object of this reader (which can be unparented and NULL) +openshot::ClipBase* EffectBase::ParentClip() { + return clip; +} + +/// Set parent clip object of this reader +void EffectBase::ParentClip(openshot::ClipBase* new_clip) { + clip = new_clip; +} \ No newline at end of file diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index c8ce141f..aa116e3a 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -1286,15 +1286,16 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in // the future. - int max_width = openshot::Settings::Instance()->MAX_WIDTH; - if (max_width <= 0) - max_width = info.width; - int max_height = openshot::Settings::Instance()->MAX_HEIGHT; - if (max_height <= 0) - max_height = info.height; + int max_width = info.width; + int max_height = info.height; - Clip *parent = (Clip *) GetClip(); + Clip *parent = (Clip *) ParentClip(); if (parent) { + if (parent->ParentTimeline()) { + // Set max width/height based on parent clip's timeline (if attached to a timeline) + max_width = parent->ParentTimeline()->preview_width; + max_height = parent->ParentTimeline()->preview_height; + } if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; diff --git a/src/Qt/PlayerDemo.cpp b/src/Qt/PlayerDemo.cpp index e5f0e11d..54813b37 100644 --- a/src/Qt/PlayerDemo.cpp +++ b/src/Qt/PlayerDemo.cpp @@ -105,7 +105,6 @@ void PlayerDemo::keyPressEvent(QKeyEvent *event) } else if (event->key() == Qt::Key_J) { - std::cout << "BACKWARD" << player->Speed() - 1 << std::endl; if (player->Speed() - 1 != 0) player->Speed(player->Speed() - 1); else @@ -115,7 +114,6 @@ void PlayerDemo::keyPressEvent(QKeyEvent *event) player->Play(); } else if (event->key() == Qt::Key_L) { - std::cout << "FORWARD" << player->Speed() + 1 << std::endl; if (player->Speed() + 1 != 0) player->Speed(player->Speed() + 1); else @@ -126,19 +124,16 @@ void PlayerDemo::keyPressEvent(QKeyEvent *event) } else if (event->key() == Qt::Key_Left) { - std::cout << "FRAME STEP -1" << std::endl; if (player->Speed() != 0) player->Speed(0); player->Seek(player->Position() - 1); } else if (event->key() == Qt::Key_Right) { - std::cout << "FRAME STEP +1" << std::endl; if (player->Speed() != 0) player->Speed(0); player->Seek(player->Position() + 1); } else if (event->key() == Qt::Key_Escape) { - std::cout << "QUIT PLAYER" << std::endl; QWidget *pWin = QApplication::activeWindow(); pWin->hide(); diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp index cf64ef93..725d8818 100644 --- a/src/QtImageReader.cpp +++ b/src/QtImageReader.cpp @@ -180,15 +180,16 @@ std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in // the future. - int max_width = Settings::Instance()->MAX_WIDTH; - if (max_width <= 0) - max_width = info.width; - int max_height = Settings::Instance()->MAX_HEIGHT; - if (max_height <= 0) - max_height = info.height; + int max_width = info.width; + int max_height = info.height; - Clip* parent = (Clip*) GetClip(); + Clip* parent = (Clip*) ParentClip(); if (parent) { + if (parent->ParentTimeline()) { + // Set max width/height based on parent clip's timeline (if attached to a timeline) + max_width = parent->ParentTimeline()->preview_width; + max_height = parent->ParentTimeline()->preview_height; + } if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; diff --git a/src/ReaderBase.cpp b/src/ReaderBase.cpp index 474dc624..653756c5 100644 --- a/src/ReaderBase.cpp +++ b/src/ReaderBase.cpp @@ -63,7 +63,7 @@ ReaderBase::ReaderBase() info.audio_timebase = Fraction(); // Init parent clip - parent = NULL; + clip = NULL; } // Display file information @@ -251,11 +251,11 @@ void ReaderBase::SetJsonValue(const Json::Value root) { } /// Parent clip object of this reader (which can be unparented and NULL) -openshot::ClipBase* ReaderBase::GetClip() { - return parent; +openshot::ClipBase* ReaderBase::ParentClip() { + return clip; } /// Set parent clip object of this reader -void ReaderBase::SetClip(openshot::ClipBase* clip) { - parent = clip; +void ReaderBase::ParentClip(openshot::ClipBase* new_clip) { + clip = new_clip; } diff --git a/src/Settings.cpp b/src/Settings.cpp index e48fd981..d946d227 100644 --- a/src/Settings.cpp +++ b/src/Settings.cpp @@ -45,8 +45,6 @@ Settings *Settings::Instance() m_pInstance = new Settings; m_pInstance->HARDWARE_DECODER = 0; m_pInstance->HIGH_QUALITY_SCALING = false; - m_pInstance->MAX_WIDTH = 0; - m_pInstance->MAX_HEIGHT = 0; m_pInstance->WAIT_FOR_VIDEO_PROCESSING_TASK = false; m_pInstance->OMP_THREADS = 12; m_pInstance->FF_THREADS = 8; diff --git a/src/Timeline.cpp b/src/Timeline.cpp index d9b23ca5..8c1429b2 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -52,6 +52,8 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha // Init FileInfo struct (clear all values) info.width = width; info.height = height; + preview_width = info.width; + preview_height = info.height; info.fps = fps; info.sample_rate = sample_rate; info.channels = channels; @@ -229,6 +231,9 @@ Timeline::~Timeline() { // Add an openshot::Clip to the timeline void Timeline::AddClip(Clip* clip) { + // Assign timeline to clip + clip->ParentTimeline(this); + // All clips should be converted to the frame rate of this timeline if (auto_map_clips) // Apply framemapper (or update existing framemapper) @@ -244,6 +249,9 @@ void Timeline::AddClip(Clip* clip) // Add an effect to the timeline void Timeline::AddEffect(EffectBase* effect) { + // Assign timeline to effect + effect->ParentTimeline(this); + // Add effect to list effects.push_back(effect); @@ -368,7 +376,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) // Attempt to get a frame (but this could fail if a reader has just been closed) #pragma omp critical (T_GetOtCreateFrame) - new_frame = std::shared_ptr(clip->GetFrame(number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, samples_in_frame)); + new_frame = std::shared_ptr(clip->GetFrame(number)); // Return real frame return new_frame; @@ -385,7 +393,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame); // Create blank frame - new_frame = std::make_shared(number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels); + new_frame = std::make_shared(number, preview_width, preview_height, "#000000", samples_in_frame, info.channels); #pragma omp critical (T_GetOtCreateFrame) { new_frame->SampleRate(info.sample_rate); @@ -487,29 +495,8 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // Skip the rest of the image processing for performance reasons return; - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number); - - // Get actual frame image data - source_image = source_frame->GetImage(); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "source_image->width()", source_image->width()); - - /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ - std::shared_ptr new_image; - new_image = new_frame->GetImage(); - - // Load timeline's new frame image into a QPainter - QPainter painter(new_image.get()); - - // Composite a new layer onto the image - painter.setCompositionMode(QPainter::CompositionMode_SourceOver); - painter.drawImage(0, 0, *source_image, 0, 0, source_image->width(), source_image->height()); - painter.end(); - // Add new QImage to frame - new_frame->AddImage(new_image); + new_frame->AddImage(source_frame->GetImage()); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width()); @@ -668,10 +655,9 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) // Get clip frame # long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1; long clip_frame_number = frame_number - clip_start_position + clip_start_frame; - int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels); // Cache clip object - clip->GetFrame(clip_frame_number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, samples_in_frame); + clip->GetFrame(clip_frame_number); } } } @@ -689,7 +675,7 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels); // Create blank frame (which will become the requested frame) - std::shared_ptr new_frame(std::make_shared(frame_number, Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, "#000000", samples_in_frame, info.channels)); + std::shared_ptr new_frame(std::make_shared(frame_number, preview_width, preview_height, "#000000", samples_in_frame, info.channels)); #pragma omp critical (T_GetFrame) { new_frame->AddAudioSilence(samples_in_frame); @@ -703,7 +689,7 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) // Add Background Color to 1st layer (if animated or not black) if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) || (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0)) - new_frame->AddColor(Settings::Instance()->MAX_WIDTH, Settings::Instance()->MAX_HEIGHT, color.GetColorHex(frame_number)); + new_frame->AddColor(preview_width, preview_height, color.GetColorHex(frame_number)); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size()); @@ -967,6 +953,10 @@ void Timeline::SetJsonValue(const Json::Value root) { info.video_length = info.fps.ToFloat() * info.duration; } + // Update preview settings + preview_width = info.width; + preview_height = info.height; + // Re-open if needed if (was_open) Open(); @@ -1195,6 +1185,12 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef // Add Effect to Timeline AddEffect(e); + + // Clear cache on parent clip (if any) + Clip* parent_clip = (Clip*) e->ParentClip(); + if (parent_clip && parent_clip->GetCache()) { + parent_clip->GetCache()->Clear(); + } } } else if (change_type == "update") { @@ -1207,6 +1203,12 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1; final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8); + // Clear cache on parent clip (if any) + Clip* parent_clip = (Clip*) existing_effect->ParentClip(); + if (parent_clip && parent_clip->GetCache()) { + parent_clip->GetCache()->Clear(); + } + // Update effect properties from JSON existing_effect->SetJsonValue(change["value"]); } @@ -1221,6 +1223,12 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1; final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8); + // Clear cache on parent clip (if any) + Clip* parent_clip = (Clip*) existing_effect->ParentClip(); + if (parent_clip && parent_clip->GetCache()) { + parent_clip->GetCache()->Clear(); + } + // Remove effect from timeline RemoveEffect(existing_effect); } @@ -1239,7 +1247,7 @@ void Timeline::apply_json_to_timeline(Json::Value change) { sub_key = change["key"][(uint)1].asString(); // Clear entire cache - final_cache->Clear(); + ClearAllCache(); // Determine type of change operation if (change_type == "insert" || change_type == "update") { @@ -1263,12 +1271,16 @@ void Timeline::apply_json_to_timeline(Json::Value change) { info.duration = change["value"].asDouble(); info.video_length = info.fps.ToFloat() * info.duration; } - else if (root_key == "width") + else if (root_key == "width") { // Set width info.width = change["value"].asInt(); - else if (root_key == "height") + preview_width = info.width; + } + else if (root_key == "height") { // Set height info.height = change["value"].asInt(); + preview_height = info.height; + } else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) { // Set fps fraction if (!change["value"]["num"].isNull()) @@ -1360,6 +1372,7 @@ void Timeline::ClearAllCache() { for (auto clip : clips) { // Clear cache on clip + clip->GetCache()->Clear(); clip->Reader()->GetCache()->Clear(); // Clear nested Reader (if any) @@ -1382,7 +1395,7 @@ void Timeline::SetMaxSize(int width, int height) { // Scale QSize up to proposed size display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio); - // Set max size - Settings::Instance()->MAX_WIDTH = display_ratio_size.width(); - Settings::Instance()->MAX_HEIGHT = display_ratio_size.height(); + // Update preview settings + preview_width = display_ratio_size.width(); + preview_height = display_ratio_size.height(); } diff --git a/src/TimelineBase.cpp b/src/TimelineBase.cpp new file mode 100644 index 00000000..f75e1ddb --- /dev/null +++ b/src/TimelineBase.cpp @@ -0,0 +1,33 @@ +/** + * @file + * @brief Source file for Timeline class + * @author Jonathan Thomas + * + * @ref License + */ + +/* LICENSE + * + * Copyright (c) 2008-2019 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#include "../include/TimelineBase.h" + +using namespace openshot; diff --git a/src/bindings/python/openshot.i b/src/bindings/python/openshot.i index 53e514c1..b5be39c4 100644 --- a/src/bindings/python/openshot.i +++ b/src/bindings/python/openshot.i @@ -92,6 +92,7 @@ #include "KeyFrame.h" #include "RendererBase.h" #include "Settings.h" +#include "TimelineBase.h" #include "Timeline.h" #include "ZmqLogger.h" #include "AudioDeviceInfo.h" @@ -203,6 +204,7 @@ %include "KeyFrame.h" %include "RendererBase.h" %include "Settings.h" +%include "TimelineBase.h" %include "Timeline.h" %include "ZmqLogger.h" %include "AudioDeviceInfo.h" diff --git a/src/bindings/ruby/openshot.i b/src/bindings/ruby/openshot.i index 2f24d220..d36990dd 100644 --- a/src/bindings/ruby/openshot.i +++ b/src/bindings/ruby/openshot.i @@ -103,6 +103,7 @@ namespace std { #include "KeyFrame.h" #include "RendererBase.h" #include "Settings.h" +#include "TimelineBase.h" #include "Timeline.h" #include "ZmqLogger.h" #include "AudioDeviceInfo.h" @@ -192,6 +193,7 @@ namespace std { %include "KeyFrame.h" %include "RendererBase.h" %include "Settings.h" +%include "TimelineBase.h" %include "Timeline.h" %include "ZmqLogger.h" %include "AudioDeviceInfo.h" From 206578df3fd31ddd2122e99a6d3cde6ff546456c Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Mon, 5 Oct 2020 23:08:31 -0500 Subject: [PATCH 05/14] Fixing some regressions on image merging --- include/ClipBase.h | 10 +++++++++- include/TimelineBase.h | 3 +++ src/Timeline.cpp | 23 ++++++++++++++++++++++- src/TimelineBase.cpp | 8 ++++++++ 4 files changed, 42 insertions(+), 2 deletions(-) diff --git a/include/ClipBase.h b/include/ClipBase.h index e335c501..11d2271f 100644 --- a/include/ClipBase.h +++ b/include/ClipBase.h @@ -69,7 +69,15 @@ namespace openshot { CacheMemory cache; /// Constructor for the base clip - ClipBase() { }; + ClipBase() { + // Initialize values + position = 0.0; + layer = 0; + start = 0.0; + end = 0.0; + previous_properties = ""; + timeline = NULL; + }; // Compare a clip using the Position() property bool operator< ( ClipBase& a) { return (Position() < a.Position()); } diff --git a/include/TimelineBase.h b/include/TimelineBase.h index d065a6de..af6a65a2 100644 --- a/include/TimelineBase.h +++ b/include/TimelineBase.h @@ -41,6 +41,9 @@ namespace openshot { public: int preview_width; ///< Optional preview width of timeline image. If your preview window is smaller than the timeline, it's recommended to set this. int preview_height; ///< Optional preview width of timeline image. If your preview window is smaller than the timeline, it's recommended to set this. + + /// Constructor for the base timeline + TimelineBase(); }; } diff --git a/src/Timeline.cpp b/src/Timeline.cpp index 8c1429b2..f75becaf 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -495,8 +495,29 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // Skip the rest of the image processing for performance reasons return; + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number); + + // Get actual frame image data + source_image = source_frame->GetImage(); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "source_image->width()", source_image->width()); + + /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ + std::shared_ptr new_image; + new_image = new_frame->GetImage(); + + // Load timeline's new frame image into a QPainter + QPainter painter(new_image.get()); + + // Composite a new layer onto the image + painter.setCompositionMode(QPainter::CompositionMode_SourceOver); + painter.drawImage(0, 0, *source_image, 0, 0, source_image->width(), source_image->height()); + painter.end(); + // Add new QImage to frame - new_frame->AddImage(source_frame->GetImage()); + new_frame->AddImage(new_image); // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width()); diff --git a/src/TimelineBase.cpp b/src/TimelineBase.cpp index f75e1ddb..cc59a243 100644 --- a/src/TimelineBase.cpp +++ b/src/TimelineBase.cpp @@ -31,3 +31,11 @@ #include "../include/TimelineBase.h" using namespace openshot; + +/// Constructor for the base timeline +TimelineBase::TimelineBase() +{ + // Init preview size (default) + preview_width = 1920; + preview_height = 1080; +} From 9eb859fa2c1c0c26b28bb5e32fc20ac30999feb8 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Thu, 8 Oct 2020 14:44:01 -0500 Subject: [PATCH 06/14] Initialize parent clip variable --- src/EffectBase.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/EffectBase.cpp b/src/EffectBase.cpp index fcf00645..fb7b2269 100644 --- a/src/EffectBase.cpp +++ b/src/EffectBase.cpp @@ -41,6 +41,7 @@ void EffectBase::InitEffectInfo() Start(0.0); End(0.0); Order(0); + ParentClip(NULL); info.has_video = false; info.has_audio = false; From 58cedb612c0c56ae76b58a574231b470590873c8 Mon Sep 17 00:00:00 2001 From: Brenno Date: Sat, 10 Oct 2020 17:01:24 -0300 Subject: [PATCH 07/14] Implemented position remapper inside FrameMapper to fix audio noise when exporting to different fps The FrameMapper class now receives the updated clip position and returns the correct amount of samples for a given frame number --- include/FrameMapper.h | 8 +- src/FrameMapper.cpp | 39 +++++-- src/Timeline.cpp | 10 +- tests/FrameMapper_Tests.cpp | 197 ++++++++++++++++++++++++++++++++++-- 4 files changed, 229 insertions(+), 25 deletions(-) diff --git a/include/FrameMapper.h b/include/FrameMapper.h index e78401a9..85c933d2 100644 --- a/include/FrameMapper.h +++ b/include/FrameMapper.h @@ -147,6 +147,9 @@ namespace openshot bool is_dirty; // When this is true, the next call to GetFrame will re-init the mapping SWRCONTEXT *avr; // Audio resampling context object + float position; + float start; + // Internal methods used by init void AddField(int64_t frame); void AddField(Field field); @@ -166,13 +169,13 @@ namespace openshot std::vector frames; // List of all frames /// Default constructor for openshot::FrameMapper class - FrameMapper(ReaderBase *reader, Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout); + FrameMapper(ReaderBase *reader, Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart); /// Destructor virtual ~FrameMapper(); /// Change frame rate or audio mapping details - void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout); + void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart); /// Close the openshot::FrameMapper and internal reader void Close() override; @@ -218,6 +221,7 @@ namespace openshot /// Resample audio and map channels (if needed) void ResampleMappedAudio(std::shared_ptr frame, int64_t original_frame_number); + int64_t ConvPositon(int64_t clip_frame_number); }; } diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index 4c561f8f..d05cf7f6 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -33,7 +33,7 @@ using namespace std; using namespace openshot; -FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout) : +FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart) : reader(reader), target(target), pulldown(target_pulldown), is_dirty(true), avr(NULL) { // Set the original frame rate from the reader @@ -52,6 +52,8 @@ FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType targe info.width = reader->info.width; info.height = reader->info.height; + position = clipPosition; + start = clipStart; // Used to toggle odd / even fields field_toggle = true; @@ -257,12 +259,12 @@ void FrameMapper::Init() // the original sample rate. int64_t end_samples_frame = start_samples_frame; int end_samples_position = start_samples_position; - int remaining_samples = Frame::GetSamplesPerFrame(frame_number, target, reader->info.sample_rate, reader->info.channels); + int remaining_samples = Frame::GetSamplesPerFrame(ConvPositon(frame_number), target, reader->info.sample_rate, reader->info.channels); while (remaining_samples > 0) { // get original samples - int original_samples = Frame::GetSamplesPerFrame(end_samples_frame, original, reader->info.sample_rate, reader->info.channels) - end_samples_position; + int original_samples = Frame::GetSamplesPerFrame(ConvPositon(end_samples_frame), original, reader->info.sample_rate, reader->info.channels) - end_samples_position; // Enough samples if (original_samples >= remaining_samples) @@ -282,12 +284,12 @@ void FrameMapper::Init() // Create the sample mapping struct - SampleRange Samples = {start_samples_frame, start_samples_position, end_samples_frame, end_samples_position, Frame::GetSamplesPerFrame(frame_number, target, reader->info.sample_rate, reader->info.channels)}; + SampleRange Samples = {start_samples_frame, start_samples_position, end_samples_frame, end_samples_position, Frame::GetSamplesPerFrame(ConvPositon(frame_number), target, reader->info.sample_rate, reader->info.channels)}; // Reset the audio variables start_samples_frame = end_samples_frame; start_samples_position = end_samples_position + 1; - if (start_samples_position >= Frame::GetSamplesPerFrame(start_samples_frame, original, reader->info.sample_rate, reader->info.channels)) + if (start_samples_position >= Frame::GetSamplesPerFrame(ConvPositon(start_samples_frame), original, reader->info.sample_rate, reader->info.channels)) { start_samples_frame += 1; // increment the frame (since we need to wrap onto the next one) start_samples_position = 0; // reset to 0, since we wrapped @@ -354,7 +356,7 @@ std::shared_ptr FrameMapper::GetOrCreateFrame(int64_t number) std::shared_ptr new_frame; // Init some basic properties about this frame (keep sample rate and # channels the same as the original reader for now) - int samples_in_frame = Frame::GetSamplesPerFrame(number, target, reader->info.sample_rate, reader->info.channels); + int samples_in_frame = Frame::GetSamplesPerFrame(ConvPositon(number), target, reader->info.sample_rate, reader->info.channels); try { // Debug output @@ -427,7 +429,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) // Get # of channels in the actual frame int channels_in_frame = mapped_frame->GetAudioChannelsCount(); - int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, target, mapped_frame->SampleRate(), channels_in_frame); + int samples_in_frame = Frame::GetSamplesPerFrame(ConvPositon(frame_number), target, mapped_frame->SampleRate(), channels_in_frame); // Determine if mapped frame is identical to source frame // including audio sample distribution according to mapped.Samples, @@ -687,6 +689,7 @@ Json::Value FrameMapper::JsonValue() const { // Create root json object Json::Value root = ReaderBase::JsonValue(); // get parent properties root["type"] = "FrameMapper"; + root["position"] = position; // return JsonValue return root; @@ -715,6 +718,10 @@ void FrameMapper::SetJsonValue(const Json::Value root) { // Set parent data ReaderBase::SetJsonValue(root); + if(!root["position"].isNull()){ + position = root["position"].asDouble(); + } + // Re-Open path, and re-init everything (if needed) if (reader) { @@ -724,7 +731,7 @@ void FrameMapper::SetJsonValue(const Json::Value root) { } // Change frame rate or audio mapping details -void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout) +void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart) { ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ChangeMapping", "target_fps.num", target_fps.num, "target_fps.den", target_fps.den, "target_pulldown", target_pulldown, "target_sample_rate", target_sample_rate, "target_channels", target_channels, "target_channel_layout", target_channel_layout); @@ -743,6 +750,9 @@ void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldow info.channels = target_channels; info.channel_layout = target_channel_layout; + position = clipPosition; + start = clipStart; + // Clear cache final_cache.Clear(); @@ -826,7 +836,7 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig } // Update total samples & input frame size (due to bigger or smaller data types) - total_frame_samples = Frame::GetSamplesPerFrame(frame->number, target, info.sample_rate, info.channels); + total_frame_samples = Frame::GetSamplesPerFrame(ConvPositon(frame->number), target, info.sample_rate, info.channels); ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (adjust # of samples)", "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "sample_rate_in_frame", sample_rate_in_frame, "info.channels", info.channels, "channels_in_frame", channels_in_frame, "original_frame_number", original_frame_number); @@ -935,3 +945,14 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig delete[] resampled_samples; resampled_samples = NULL; } + +int64_t FrameMapper::ConvPositon(int64_t clip_frame_number){ + + int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1; + int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1; + + int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame; + + ///std::cout << "Conv Position " << round(position * info.fps.ToDouble()) << " position: " << position << " info::fps: " << info.fps.ToDouble() << std::endl; + return frame_number; +} \ No newline at end of file diff --git a/src/Timeline.cpp b/src/Timeline.cpp index b2f46519..f40272d8 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -340,14 +340,14 @@ void Timeline::apply_mapper_to_clip(Clip* clip) } else { // Create a new FrameMapper to wrap the current reader - FrameMapper* mapper = new FrameMapper(clip->Reader(), info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout); + FrameMapper* mapper = new FrameMapper(clip->Reader(), info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout, clip->Position(), clip->Start()); allocated_frame_mappers.insert(mapper); clip_reader = (ReaderBase*) mapper; } // Update the mapping FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader; - clip_mapped_reader->ChangeMapping(info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout); + clip_mapped_reader->ChangeMapping(info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout, clip->Position(), clip->Start()); // Update clip reader clip->Reader(clip_reader); @@ -545,11 +545,12 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the // number of samples returned is variable... and does not match the number expected. // This is a crude solution at best. =) - if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()) + if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){ // Force timeline frame to match the source frame #pragma omp critical (T_addLayer) - new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout); + new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout); + } // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen). #pragma omp critical (T_addLayer) @@ -909,6 +910,7 @@ bool Timeline::isEqual(double a, double b) // Get an openshot::Frame object for a specific frame number of this reader. std::shared_ptr Timeline::GetFrame(int64_t requested_frame) { + // Adjust out of bounds frame number if (requested_frame < 1) requested_frame = 1; diff --git a/tests/FrameMapper_Tests.cpp b/tests/FrameMapper_Tests.cpp index 921f3a15..02634c37 100644 --- a/tests/FrameMapper_Tests.cpp +++ b/tests/FrameMapper_Tests.cpp @@ -42,7 +42,7 @@ TEST(FrameMapper_Get_Valid_Frame) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping between 24 fps and 29.97 fps using classic pulldown - FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); try { @@ -63,7 +63,7 @@ TEST(FrameMapper_Invalid_Frame_Too_Small) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 29.97 fps - FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); // Check invalid frame number CHECK_THROW(mapping.GetMappedFrame(0), OutOfBoundsFrame); @@ -76,7 +76,7 @@ TEST(FrameMapper_24_fps_to_30_fps_Pulldown_Classic) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 30 fps - FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); MappedFrame frame2 = mapping.GetMappedFrame(2); MappedFrame frame3 = mapping.GetMappedFrame(3); @@ -93,7 +93,7 @@ TEST(FrameMapper_24_fps_to_30_fps_Pulldown_Advanced) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 30 fps - FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); MappedFrame frame2 = mapping.GetMappedFrame(2); MappedFrame frame3 = mapping.GetMappedFrame(3); MappedFrame frame4 = mapping.GetMappedFrame(4); @@ -113,7 +113,7 @@ TEST(FrameMapper_24_fps_to_30_fps_Pulldown_None) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 30 fps - FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); MappedFrame frame4 = mapping.GetMappedFrame(4); MappedFrame frame5 = mapping.GetMappedFrame(5); @@ -130,7 +130,7 @@ TEST(FrameMapper_30_fps_to_24_fps_Pulldown_Classic) DummyReader r(Fraction(30, 1), 720, 480, 22000, 2, 5.0); // Create mapping between 30 fps and 24 fps - FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); MappedFrame frame3 = mapping.GetMappedFrame(3); MappedFrame frame4 = mapping.GetMappedFrame(4); MappedFrame frame5 = mapping.GetMappedFrame(5); @@ -150,7 +150,7 @@ TEST(FrameMapper_30_fps_to_24_fps_Pulldown_Advanced) DummyReader r(Fraction(30, 1), 720, 480, 22000, 2, 5.0); // Create mapping between 30 fps and 24 fps - FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); MappedFrame frame2 = mapping.GetMappedFrame(2); MappedFrame frame3 = mapping.GetMappedFrame(3); MappedFrame frame4 = mapping.GetMappedFrame(4); @@ -170,7 +170,7 @@ TEST(FrameMapper_30_fps_to_24_fps_Pulldown_None) DummyReader r(Fraction(30, 1), 720, 480, 22000, 2, 5.0); // Create mapping between 30 fps and 24 fps - FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO); + FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); MappedFrame frame4 = mapping.GetMappedFrame(4); MappedFrame frame5 = mapping.GetMappedFrame(5); @@ -189,7 +189,7 @@ TEST(FrameMapper_resample_audio_48000_to_41000) FFmpegReader r(path.str()); // Map to 30 fps, 3 channels surround, 44100 sample rate - FrameMapper map(&r, Fraction(30,1), PULLDOWN_NONE, 44100, 3, LAYOUT_SURROUND); + FrameMapper map(&r, Fraction(30,1), PULLDOWN_NONE, 44100, 3, LAYOUT_SURROUND, 0.0, 0.0); map.Open(); // Check details @@ -199,7 +199,7 @@ TEST(FrameMapper_resample_audio_48000_to_41000) CHECK_EQUAL(1470, map.GetFrame(50)->GetAudioSamplesCount()); // Change mapping data - map.ChangeMapping(Fraction(25,1), PULLDOWN_NONE, 22050, 1, LAYOUT_MONO); + map.ChangeMapping(Fraction(25,1), PULLDOWN_NONE, 22050, 1, LAYOUT_MONO, 0.0, 0.0); // Check details CHECK_EQUAL(1, map.GetFrame(1)->GetAudioChannelsCount()); @@ -210,3 +210,180 @@ TEST(FrameMapper_resample_audio_48000_to_41000) // Close mapper map.Close(); } + +TEST(FrameMapper_AudioSample_Distribution) +{ + + CacheMemory cache; + int OFFSET = 0; + float AMPLITUDE = 0.2; + double ANGLE = 0.0; + int NUM_SAMPLES = 100; + std::cout << "Starting Resample Test" << std::endl; + + for (int64_t frame_number = 1; frame_number <= 90; frame_number++) + { + + // Create blank frame (with specific frame #, samples, and channels) + // Sample count should be 44100 / 30 fps = 1470 samples per frame + + int sample_count = 1470; + std::shared_ptr f(new openshot::Frame(frame_number, sample_count, 2)); + + // Create test samples with sin wave (predictable values) + float *audio_buffer = new float[sample_count * 2]; + + for (int sample_number = 0; sample_number < sample_count; sample_number++) + { + // Calculate sin wave + // TODO: I'm using abs(), because calling AddAudio only seems to be adding the positive values and it's bizarre + float sample_value = float(AMPLITUDE * sin(ANGLE) + OFFSET); + audio_buffer[sample_number] = sample_value;//abs(sample_value); + ANGLE += (2 * M_PI) / NUM_SAMPLES; + + // Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source, + f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1 + f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2 + + // Add test frame to dummy reader + cache.Add(f); + } + } + // Create a default fraction (should be 1/1) + openshot::DummyReader r(openshot::Fraction(30, 1), 1920, 1080, 44100, 2, 30.0, &cache); + r.info.has_audio = true; + r.Open(); // Open the reader + + // Map to 24 fps, which should create a variable # of samples per frame + ///FrameMapper map(&r, Fraction(24, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO); + //map.info.has_audio = true; + //map.Open(); + + Timeline t1(1920, 1080, Fraction(24, 1), 44100, 2, LAYOUT_STEREO); + + Clip c1; + c1.Reader(&r); + c1.Layer(1); + c1.Position(0.0); + c1.Start(0.0); + c1.End(10.0); + + // Create 2nd map to 24 fps, which should create a variable # of samples per frame + + //FrameMapper map2(&r, Fraction(24, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO); + + //map2.info.has_audio = true; + //map2.Open(); + + Clip c2; + c2.Reader(&r); + c2.Layer(2); + + // Position 1 frame into the video, this should mis-align the audio and create situations + // which overlapping Frame instances have different # of samples for the Timeline. + // TODO: Moving to 0.0 position, to simplify this test for now + + + c2.Position(0.041666667 * 14); + c2.Start(1.0); + c2.End(10.0); + + // Add clips + + t1.AddClip(&c1); + t1.AddClip(&c2); + + std::string json_val = t1.Json(); + + std::cout << json_val << std::endl; + + //t1.SetJson(t1.Json()); + t1.Open(); + + FFmpegWriter w("output-resample.mp4"); + + // Set options + w.SetAudioOptions("aac", 44100, 192000); + w.SetVideoOptions("libx264", 1280, 720, Fraction(24,1), 5000000); + + // Open writer + w.Open(); + + + w.WriteFrame(&t1, 5, 50); + + //for (int64_t frame_number = 1; frame_number <= 90; frame_number++){ + // w.WriteFrame(t1.GetFrame(frame_number)); + //} + + // Close writer & reader + w.Close(); + + //map.Close(); + //map2.Close(); + + t1.Close(); + + // Clean up + cache.Clear(); + + r.Close(); + +} + + +/* +TEST(FrameMapperVideoEdition){ + + stringstream path; + path << TEST_MEDIA_PATH << "baseline.mkv"; + FFmpegReader r(path.str()); + r.Open(); + + Clip c1; + c1.Reader(&r); + c1.Layer(1); + c1.Position(0.0); + c1.Start(0.0); + c1.End(45.0); + + Clip c2; + c2.Reader(&r); + c2.Layer(1); + c2.Position(30.0); + c2.Start(0.0); + c2.End(45.0); + + Timeline t1(1280, 720, Fraction(24, 1), 44100, 2, LAYOUT_STEREO); + t1.AddClip(&c1); + t1.AddClip(&c2); + + t1.Open(); + + + FFmpegWriter w("simple-edit.mp4"); + + // Set options + w.SetAudioOptions("aac", 44100, 192000); + w.SetVideoOptions("libx264", 1280, 720, Fraction(24,1), 5000000); + + // Open writer + w.Open(); + + + w.WriteFrame(&t1, 1, t1.GetMaxFrame()); + + // Close writer & reader + w.Close(); + + //map.Close(); + //map2.Close(); + + t1.Close(); + + + r.Close(); + + + +}*/ \ No newline at end of file From 57b48f31a2ccf92aa5d5afa4fe9e3d5f595f3672 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Tue, 13 Oct 2020 14:55:25 -0500 Subject: [PATCH 08/14] Initializing Clip info struct, and fixing clip cache settings --- include/Clip.h | 15 +++++++++------ src/Clip.cpp | 29 ++++++++++++++++++++--------- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/include/Clip.h b/include/Clip.h index fd28c162..0bc99e84 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -98,6 +98,15 @@ namespace openshot { /// Section lock for multiple threads juce::CriticalSection getFrameCriticalSection; + /// Init default settings for a clip + void init_settings(); + + /// Init reader info details + void init_reader_settings(); + + /// Update default rotation from reader + void init_reader_rotation(); + private: bool waveform; ///< Should a waveform be used instead of the clip's image std::list effects; /// frame, int64_t frame_number); - /// Init default settings for a clip - void init_settings(); - - /// Update default rotation from reader - void init_reader_rotation(); - /// Compare 2 floating point numbers bool isEqual(double a, double b); diff --git a/src/Clip.cpp b/src/Clip.cpp index 1d2b392d..d81b5c61 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -69,9 +69,6 @@ void Clip::init_settings() // Init alpha alpha = Keyframe(1.0); - // Init rotation - init_reader_rotation(); - // Init time & volume time = Keyframe(1.0); volume = Keyframe(1.0); @@ -101,8 +98,22 @@ void Clip::init_settings() has_audio = Keyframe(-1.0); has_video = Keyframe(-1.0); - // Initialize Clip cache - cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels); + // Init reader info struct and cache size + init_reader_settings(); +} + +// Init reader info details +void Clip::init_reader_settings() { + if (reader) { + // Init rotation (if any) + init_reader_rotation(); + + // Initialize info struct + info = reader->info; + + // Initialize Clip cache + cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels); + } } // Init reader's rotation (if any) @@ -208,8 +219,8 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N End(reader->info.duration); reader->ParentClip(this); allocated_reader = reader; - init_reader_rotation(); - } + // Init reader info struct and cache size + init_reader_settings(); } } // Destructor @@ -237,8 +248,8 @@ void Clip::Reader(ReaderBase* new_reader) // set parent reader->ParentClip(this); - // Init rotation (if any) - init_reader_rotation(); + // Init reader info struct and cache size + init_reader_settings(); } /// Get the current reader From 91945f03dc3d1ee9848e542db5189bad97c640de Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Tue, 13 Oct 2020 17:08:27 -0500 Subject: [PATCH 09/14] Replacing audio fix implementation with ParentClip(), to access clip start and position (if any) --- include/FrameMapper.h | 12 +++--- src/FrameMapper.cpp | 85 +++++++++++++++++++++---------------- src/Timeline.cpp | 4 +- tests/FrameMapper_Tests.cpp | 85 ++++++------------------------------- 4 files changed, 67 insertions(+), 119 deletions(-) diff --git a/include/FrameMapper.h b/include/FrameMapper.h index 85c933d2..7f6c9c16 100644 --- a/include/FrameMapper.h +++ b/include/FrameMapper.h @@ -147,9 +147,6 @@ namespace openshot bool is_dirty; // When this is true, the next call to GetFrame will re-init the mapping SWRCONTEXT *avr; // Audio resampling context object - float position; - float start; - // Internal methods used by init void AddField(int64_t frame); void AddField(Field field); @@ -157,6 +154,9 @@ namespace openshot // Get Frame or Generate Blank Frame std::shared_ptr GetOrCreateFrame(int64_t number); + /// Adjust frame number for Clip position and start (which can result in a different number) + int64_t AdjustFrameNumber(int64_t clip_frame_number, float position, float start); + // Use the original and target frame rates and a pull-down technique to create // a mapping between the original fields and frames or a video to a new frame rate. // This might repeat or skip fields and frames of the original video, depending on @@ -169,13 +169,13 @@ namespace openshot std::vector frames; // List of all frames /// Default constructor for openshot::FrameMapper class - FrameMapper(ReaderBase *reader, Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart); + FrameMapper(ReaderBase *reader, Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout); /// Destructor virtual ~FrameMapper(); /// Change frame rate or audio mapping details - void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart); + void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout); /// Close the openshot::FrameMapper and internal reader void Close() override; @@ -220,8 +220,6 @@ namespace openshot /// Resample audio and map channels (if needed) void ResampleMappedAudio(std::shared_ptr frame, int64_t original_frame_number); - - int64_t ConvPositon(int64_t clip_frame_number); }; } diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index d05cf7f6..33221173 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -29,11 +29,12 @@ */ #include "../include/FrameMapper.h" +#include "../include/Clip.h" using namespace std; using namespace openshot; -FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart) : +FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout) : reader(reader), target(target), pulldown(target_pulldown), is_dirty(true), avr(NULL) { // Set the original frame rate from the reader @@ -52,8 +53,6 @@ FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType targe info.width = reader->info.width; info.height = reader->info.height; - position = clipPosition; - start = clipStart; // Used to toggle odd / even fields field_toggle = true; @@ -118,6 +117,15 @@ void FrameMapper::Init() // Clear cache final_cache.Clear(); + // Get clip position from parent clip (if any) + float clipPosition = 0.0; + float clipStart = 0.0; + Clip *parent = (Clip *) ParentClip(); + if (parent) { + clipPosition = parent->Position(); + clipStart = parent->Start(); + } + // Some framerates are handled special, and some use a generic Keyframe curve to // map the framerates. These are the special framerates: if ((fabs(original.ToFloat() - 24.0) < 1e-7 || fabs(original.ToFloat() - 25.0) < 1e-7 || fabs(original.ToFloat() - 30.0) < 1e-7) && @@ -259,12 +267,12 @@ void FrameMapper::Init() // the original sample rate. int64_t end_samples_frame = start_samples_frame; int end_samples_position = start_samples_position; - int remaining_samples = Frame::GetSamplesPerFrame(ConvPositon(frame_number), target, reader->info.sample_rate, reader->info.channels); + int remaining_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number, clipPosition, clipStart), target, reader->info.sample_rate, reader->info.channels); while (remaining_samples > 0) { // get original samples - int original_samples = Frame::GetSamplesPerFrame(ConvPositon(end_samples_frame), original, reader->info.sample_rate, reader->info.channels) - end_samples_position; + int original_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(end_samples_frame, clipPosition, clipStart), original, reader->info.sample_rate, reader->info.channels) - end_samples_position; // Enough samples if (original_samples >= remaining_samples) @@ -284,12 +292,12 @@ void FrameMapper::Init() // Create the sample mapping struct - SampleRange Samples = {start_samples_frame, start_samples_position, end_samples_frame, end_samples_position, Frame::GetSamplesPerFrame(ConvPositon(frame_number), target, reader->info.sample_rate, reader->info.channels)}; + SampleRange Samples = {start_samples_frame, start_samples_position, end_samples_frame, end_samples_position, Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number, clipPosition, clipStart), target, reader->info.sample_rate, reader->info.channels)}; // Reset the audio variables start_samples_frame = end_samples_frame; start_samples_position = end_samples_position + 1; - if (start_samples_position >= Frame::GetSamplesPerFrame(ConvPositon(start_samples_frame), original, reader->info.sample_rate, reader->info.channels)) + if (start_samples_position >= Frame::GetSamplesPerFrame(AdjustFrameNumber(start_samples_frame, clipPosition, clipStart), original, reader->info.sample_rate, reader->info.channels)) { start_samples_frame += 1; // increment the frame (since we need to wrap onto the next one) start_samples_position = 0; // reset to 0, since we wrapped @@ -355,8 +363,17 @@ std::shared_ptr FrameMapper::GetOrCreateFrame(int64_t number) { std::shared_ptr new_frame; + // Get clip position from parent clip (if any) + float clipPosition = 0.0; + float clipStart = 0.0; + Clip *parent = (Clip *) ParentClip(); + if (parent) { + clipPosition = parent->Position(); + clipStart = parent->Start(); + } + // Init some basic properties about this frame (keep sample rate and # channels the same as the original reader for now) - int samples_in_frame = Frame::GetSamplesPerFrame(ConvPositon(number), target, reader->info.sample_rate, reader->info.channels); + int samples_in_frame = Frame::GetSamplesPerFrame(AdjustFrameNumber(number, clipPosition, clipStart), target, reader->info.sample_rate, reader->info.channels); try { // Debug output @@ -406,6 +423,15 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) final_frame = final_cache.GetFrame(requested_frame); if (final_frame) return final_frame; + // Get clip position from parent clip (if any) + float clipPosition = 0.0; + float clipStart = 0.0; + Clip *parent = (Clip *) ParentClip(); + if (parent) { + clipPosition = parent->Position(); + clipStart = parent->Start(); + } + // Minimum number of frames to process (for performance reasons) // Dialing this down to 1 for now, as it seems to improve performance, and reduce export crashes int minimum_frames = 1; @@ -429,7 +455,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) // Get # of channels in the actual frame int channels_in_frame = mapped_frame->GetAudioChannelsCount(); - int samples_in_frame = Frame::GetSamplesPerFrame(ConvPositon(frame_number), target, mapped_frame->SampleRate(), channels_in_frame); + int samples_in_frame = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame_number, clipPosition, clipStart), target, mapped_frame->SampleRate(), channels_in_frame); // Determine if mapped frame is identical to source frame // including audio sample distribution according to mapped.Samples, @@ -596,21 +622,6 @@ void FrameMapper::PrintMapping() // Recalculate mappings Init(); - // Get the difference (in frames) between the original and target frame rates - float difference = target.ToInt() - original.ToInt(); - - int field_interval = 0; - int frame_interval = 0; - - if (difference != 0) - { - // Find the number (i.e. interval) of fields that need to be skipped or repeated - field_interval = round(fabs(original.ToInt() / difference)); - - // Get frame interval (2 fields per frame) - frame_interval = field_interval * 2.0f; - } - // Loop through frame mappings for (float map = 1; map <= frames.size(); map++) { @@ -621,7 +632,6 @@ void FrameMapper::PrintMapping() } - // Determine if reader is open or closed bool FrameMapper::IsOpen() { if (reader) @@ -630,7 +640,6 @@ bool FrameMapper::IsOpen() { return false; } - // Open the internal reader void FrameMapper::Open() { @@ -689,7 +698,6 @@ Json::Value FrameMapper::JsonValue() const { // Create root json object Json::Value root = ReaderBase::JsonValue(); // get parent properties root["type"] = "FrameMapper"; - root["position"] = position; // return JsonValue return root; @@ -718,10 +726,6 @@ void FrameMapper::SetJsonValue(const Json::Value root) { // Set parent data ReaderBase::SetJsonValue(root); - if(!root["position"].isNull()){ - position = root["position"].asDouble(); - } - // Re-Open path, and re-init everything (if needed) if (reader) { @@ -731,7 +735,7 @@ void FrameMapper::SetJsonValue(const Json::Value root) { } // Change frame rate or audio mapping details -void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout, float clipPosition, float clipStart) +void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout) { ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ChangeMapping", "target_fps.num", target_fps.num, "target_fps.den", target_fps.den, "target_pulldown", target_pulldown, "target_sample_rate", target_sample_rate, "target_channels", target_channels, "target_channel_layout", target_channel_layout); @@ -750,9 +754,6 @@ void FrameMapper::ChangeMapping(Fraction target_fps, PulldownType target_pulldow info.channels = target_channels; info.channel_layout = target_channel_layout; - position = clipPosition; - start = clipStart; - // Clear cache final_cache.Clear(); @@ -775,6 +776,15 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig // Recalculate mappings Init(); + // Get clip position from parent clip (if any) + float clipPosition = 0.0; + float clipStart = 0.0; + Clip *parent = (Clip *) ParentClip(); + if (parent) { + clipPosition = parent->Position(); + clipStart = parent->Start(); + } + // Init audio buffers / variables int total_frame_samples = 0; int channels_in_frame = frame->GetAudioChannelsCount(); @@ -836,7 +846,7 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig } // Update total samples & input frame size (due to bigger or smaller data types) - total_frame_samples = Frame::GetSamplesPerFrame(ConvPositon(frame->number), target, info.sample_rate, info.channels); + total_frame_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(frame->number, clipPosition, clipStart), target, info.sample_rate, info.channels); ZmqLogger::Instance()->AppendDebugMethod("FrameMapper::ResampleMappedAudio (adjust # of samples)", "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "sample_rate_in_frame", sample_rate_in_frame, "info.channels", info.channels, "channels_in_frame", channels_in_frame, "original_frame_number", original_frame_number); @@ -946,7 +956,8 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr frame, int64_t orig resampled_samples = NULL; } -int64_t FrameMapper::ConvPositon(int64_t clip_frame_number){ +// Adjust frame number for Clip position and start (which can result in a different number) +int64_t FrameMapper::AdjustFrameNumber(int64_t clip_frame_number, float position, float start) { int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1; int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1; diff --git a/src/Timeline.cpp b/src/Timeline.cpp index e45a0991..736bee15 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -348,14 +348,14 @@ void Timeline::apply_mapper_to_clip(Clip* clip) } else { // Create a new FrameMapper to wrap the current reader - FrameMapper* mapper = new FrameMapper(clip->Reader(), info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout, clip->Position(), clip->Start()); + FrameMapper* mapper = new FrameMapper(clip->Reader(), info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout); allocated_frame_mappers.insert(mapper); clip_reader = (ReaderBase*) mapper; } // Update the mapping FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader; - clip_mapped_reader->ChangeMapping(info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout, clip->Position(), clip->Start()); + clip_mapped_reader->ChangeMapping(info.fps, PULLDOWN_NONE, info.sample_rate, info.channels, info.channel_layout); // Update clip reader clip->Reader(clip_reader); diff --git a/tests/FrameMapper_Tests.cpp b/tests/FrameMapper_Tests.cpp index 02634c37..54e76e64 100644 --- a/tests/FrameMapper_Tests.cpp +++ b/tests/FrameMapper_Tests.cpp @@ -42,7 +42,7 @@ TEST(FrameMapper_Get_Valid_Frame) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping between 24 fps and 29.97 fps using classic pulldown - FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); try { @@ -63,7 +63,7 @@ TEST(FrameMapper_Invalid_Frame_Too_Small) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 29.97 fps - FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(30000, 1001), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); // Check invalid frame number CHECK_THROW(mapping.GetMappedFrame(0), OutOfBoundsFrame); @@ -76,7 +76,7 @@ TEST(FrameMapper_24_fps_to_30_fps_Pulldown_Classic) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 30 fps - FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); MappedFrame frame2 = mapping.GetMappedFrame(2); MappedFrame frame3 = mapping.GetMappedFrame(3); @@ -93,7 +93,7 @@ TEST(FrameMapper_24_fps_to_30_fps_Pulldown_Advanced) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 30 fps - FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO); MappedFrame frame2 = mapping.GetMappedFrame(2); MappedFrame frame3 = mapping.GetMappedFrame(3); MappedFrame frame4 = mapping.GetMappedFrame(4); @@ -113,7 +113,7 @@ TEST(FrameMapper_24_fps_to_30_fps_Pulldown_None) DummyReader r(Fraction(24,1), 720, 480, 22000, 2, 5.0); // Create mapping 24 fps and 30 fps - FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(30, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO); MappedFrame frame4 = mapping.GetMappedFrame(4); MappedFrame frame5 = mapping.GetMappedFrame(5); @@ -130,7 +130,7 @@ TEST(FrameMapper_30_fps_to_24_fps_Pulldown_Classic) DummyReader r(Fraction(30, 1), 720, 480, 22000, 2, 5.0); // Create mapping between 30 fps and 24 fps - FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_CLASSIC, 22000, 2, LAYOUT_STEREO); MappedFrame frame3 = mapping.GetMappedFrame(3); MappedFrame frame4 = mapping.GetMappedFrame(4); MappedFrame frame5 = mapping.GetMappedFrame(5); @@ -150,7 +150,7 @@ TEST(FrameMapper_30_fps_to_24_fps_Pulldown_Advanced) DummyReader r(Fraction(30, 1), 720, 480, 22000, 2, 5.0); // Create mapping between 30 fps and 24 fps - FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_ADVANCED, 22000, 2, LAYOUT_STEREO); MappedFrame frame2 = mapping.GetMappedFrame(2); MappedFrame frame3 = mapping.GetMappedFrame(3); MappedFrame frame4 = mapping.GetMappedFrame(4); @@ -170,7 +170,7 @@ TEST(FrameMapper_30_fps_to_24_fps_Pulldown_None) DummyReader r(Fraction(30, 1), 720, 480, 22000, 2, 5.0); // Create mapping between 30 fps and 24 fps - FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO, 0.0, 0.0); + FrameMapper mapping(&r, Fraction(24, 1), PULLDOWN_NONE, 22000, 2, LAYOUT_STEREO); MappedFrame frame4 = mapping.GetMappedFrame(4); MappedFrame frame5 = mapping.GetMappedFrame(5); @@ -189,7 +189,7 @@ TEST(FrameMapper_resample_audio_48000_to_41000) FFmpegReader r(path.str()); // Map to 30 fps, 3 channels surround, 44100 sample rate - FrameMapper map(&r, Fraction(30,1), PULLDOWN_NONE, 44100, 3, LAYOUT_SURROUND, 0.0, 0.0); + FrameMapper map(&r, Fraction(30,1), PULLDOWN_NONE, 44100, 3, LAYOUT_SURROUND); map.Open(); // Check details @@ -199,7 +199,7 @@ TEST(FrameMapper_resample_audio_48000_to_41000) CHECK_EQUAL(1470, map.GetFrame(50)->GetAudioSamplesCount()); // Change mapping data - map.ChangeMapping(Fraction(25,1), PULLDOWN_NONE, 22050, 1, LAYOUT_MONO, 0.0, 0.0); + map.ChangeMapping(Fraction(25,1), PULLDOWN_NONE, 22050, 1, LAYOUT_MONO); // Check details CHECK_EQUAL(1, map.GetFrame(1)->GetAudioChannelsCount()); @@ -213,13 +213,12 @@ TEST(FrameMapper_resample_audio_48000_to_41000) TEST(FrameMapper_AudioSample_Distribution) { - CacheMemory cache; int OFFSET = 0; float AMPLITUDE = 0.2; double ANGLE = 0.0; int NUM_SAMPLES = 100; - std::cout << "Starting Resample Test" << std::endl; + //std::cout << "Starting Resample Test" << std::endl; for (int64_t frame_number = 1; frame_number <= 90; frame_number++) { @@ -283,7 +282,6 @@ TEST(FrameMapper_AudioSample_Distribution) // which overlapping Frame instances have different # of samples for the Timeline. // TODO: Moving to 0.0 position, to simplify this test for now - c2.Position(0.041666667 * 14); c2.Start(1.0); c2.End(10.0); @@ -295,7 +293,7 @@ TEST(FrameMapper_AudioSample_Distribution) std::string json_val = t1.Json(); - std::cout << json_val << std::endl; + //std::cout << json_val << std::endl; //t1.SetJson(t1.Json()); t1.Open(); @@ -309,7 +307,6 @@ TEST(FrameMapper_AudioSample_Distribution) // Open writer w.Open(); - w.WriteFrame(&t1, 5, 50); //for (int64_t frame_number = 1; frame_number <= 90; frame_number++){ @@ -328,62 +325,4 @@ TEST(FrameMapper_AudioSample_Distribution) cache.Clear(); r.Close(); - } - - -/* -TEST(FrameMapperVideoEdition){ - - stringstream path; - path << TEST_MEDIA_PATH << "baseline.mkv"; - FFmpegReader r(path.str()); - r.Open(); - - Clip c1; - c1.Reader(&r); - c1.Layer(1); - c1.Position(0.0); - c1.Start(0.0); - c1.End(45.0); - - Clip c2; - c2.Reader(&r); - c2.Layer(1); - c2.Position(30.0); - c2.Start(0.0); - c2.End(45.0); - - Timeline t1(1280, 720, Fraction(24, 1), 44100, 2, LAYOUT_STEREO); - t1.AddClip(&c1); - t1.AddClip(&c2); - - t1.Open(); - - - FFmpegWriter w("simple-edit.mp4"); - - // Set options - w.SetAudioOptions("aac", 44100, 192000); - w.SetVideoOptions("libx264", 1280, 720, Fraction(24,1), 5000000); - - // Open writer - w.Open(); - - - w.WriteFrame(&t1, 1, t1.GetMaxFrame()); - - // Close writer & reader - w.Close(); - - //map.Close(); - //map2.Close(); - - t1.Close(); - - - r.Close(); - - - -}*/ \ No newline at end of file From 94059828d563e7511e424d044a8dfac1d0fb146e Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Tue, 13 Oct 2020 18:18:10 -0500 Subject: [PATCH 10/14] Converting RGB8888 to ARGB32_Premultiplied (for performance reasons) --- src/CacheDisk.cpp | 2 +- src/FFmpegReader.cpp | 4 ++-- src/Frame.cpp | 28 ++++++++++++++-------------- src/QtHtmlReader.cpp | 2 +- src/QtImageReader.cpp | 5 ----- src/QtTextReader.cpp | 2 +- src/effects/Bars.cpp | 2 +- src/effects/Crop.cpp | 2 +- src/effects/Deinterlace.cpp | 2 +- 9 files changed, 22 insertions(+), 27 deletions(-) diff --git a/src/CacheDisk.cpp b/src/CacheDisk.cpp index 91153c8f..789ab25c 100644 --- a/src/CacheDisk.cpp +++ b/src/CacheDisk.cpp @@ -235,7 +235,7 @@ std::shared_ptr CacheDisk::GetFrame(int64_t frame_number) image->load(frame_path); // Set pixel formatimage-> - image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_RGBA8888))); + image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_ARGB32_Premultiplied))); // Create frame object std::shared_ptr frame(new Frame()); diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index 0b9ef740..d336654a 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -1363,7 +1363,7 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { scale_mode = SWS_BICUBIC; } SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width, - height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL); + height, AV_PIX_FMT_RGB32, scale_mode, NULL, NULL, NULL); // Resize / Convert to RGB sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0, @@ -1373,7 +1373,7 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { std::shared_ptr f = CreateFrame(current_frame); // Add Image data to frame - f->AddImage(width, height, 4, QImage::Format_RGBA8888, buffer); + f->AddImage(width, height, 4, QImage::Format_ARGB32_Premultiplied, buffer); // Update working cache working_cache.Add(f); diff --git a/src/Frame.cpp b/src/Frame.cpp index 483df768..80519292 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -231,7 +231,7 @@ std::shared_ptr Frame::GetWaveform(int width, int height, int Red, int G } // Create blank image - wave_image = std::shared_ptr(new QImage(total_width, total_height, QImage::Format_RGBA8888)); + wave_image = std::shared_ptr(new QImage(total_width, total_height, QImage::Format_ARGB32_Premultiplied)); wave_image->fill(QColor(0,0,0,0)); // Load QPainter with wave_image device @@ -262,7 +262,7 @@ std::shared_ptr Frame::GetWaveform(int width, int height, int Red, int G else { // No audio samples present - wave_image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888)); + wave_image = std::shared_ptr(new QImage(width, height, QImage::Format_ARGB32_Premultiplied)); wave_image->fill(QColor(QString::fromStdString("#000000"))); } @@ -618,7 +618,7 @@ void Frame::Thumbnail(std::string path, int new_width, int new_height, std::stri std::string background_color, bool ignore_aspect, std::string format, int quality, float rotate) { // Create blank thumbnail image & fill background color - std::shared_ptr thumbnail = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_RGBA8888)); + std::shared_ptr thumbnail = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_ARGB32_Premultiplied)); thumbnail->fill(QColor(QString::fromStdString(background_color))); // Create painter @@ -673,7 +673,7 @@ void Frame::Thumbnail(std::string path, int new_width, int new_height, std::stri overlay->load(QString::fromStdString(overlay_path)); // Set pixel format - overlay = std::shared_ptr(new QImage(overlay->convertToFormat(QImage::Format_RGBA8888))); + overlay = std::shared_ptr(new QImage(overlay->convertToFormat(QImage::Format_ARGB32_Premultiplied))); // Resize to fit overlay = std::shared_ptr(new QImage(overlay->scaled(new_width, new_height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation))); @@ -691,7 +691,7 @@ void Frame::Thumbnail(std::string path, int new_width, int new_height, std::stri mask->load(QString::fromStdString(mask_path)); // Set pixel format - mask = std::shared_ptr(new QImage(mask->convertToFormat(QImage::Format_RGBA8888))); + mask = std::shared_ptr(new QImage(mask->convertToFormat(QImage::Format_ARGB32_Premultiplied))); // Resize to fit mask = std::shared_ptr(new QImage(mask->scaled(new_width, new_height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation))); @@ -747,7 +747,7 @@ void Frame::AddColor(int new_width, int new_height, std::string new_color) const GenericScopedLock lock(addingImageSection); #pragma omp critical (AddImage) { - image = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_RGBA8888)); + image = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_ARGB32_Premultiplied)); // Fill with solid color image->fill(QColor(QString::fromStdString(color))); @@ -775,8 +775,8 @@ void Frame::AddImage(int new_width, int new_height, int bytes_per_pixel, QImage: image = std::shared_ptr(new QImage(qbuffer, new_width, new_height, new_width * bytes_per_pixel, type, (QImageCleanupFunction) &openshot::Frame::cleanUpBuffer, (void*) qbuffer)); // Always convert to RGBA8888 (if different) - if (image->format() != QImage::Format_RGBA8888) - *image = image->convertToFormat(QImage::Format_RGBA8888); + if (image->format() != QImage::Format_ARGB32_Premultiplied) + *image = image->convertToFormat(QImage::Format_ARGB32_Premultiplied); // Update height and width width = image->width(); @@ -798,9 +798,9 @@ void Frame::AddImage(std::shared_ptr new_image) { image = new_image; - // Always convert to RGBA8888 (if different) - if (image->format() != QImage::Format_RGBA8888) - *image = image->convertToFormat(QImage::Format_RGBA8888); + // Always convert to Format_ARGB32_Premultiplied (if different) + if (image->format() != QImage::Format_ARGB32_Premultiplied) + *image = image->convertToFormat(QImage::Format_ARGB32_Premultiplied); // Update height and width width = image->width(); @@ -830,8 +830,8 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines) if (image == new_image || image->size() != new_image->size()) { ret = true; } - else if (new_image->format() != image->format()) { - new_image = std::shared_ptr(new QImage(new_image->convertToFormat(image->format()))); + else if (new_image->format() != QImage::Format_ARGB32_Premultiplied) { + new_image = std::shared_ptr(new QImage(new_image->convertToFormat(QImage::Format_ARGB32_Premultiplied))); } } if (ret) { @@ -970,7 +970,7 @@ void Frame::AddMagickImage(std::shared_ptr new_image) MagickCore::ExportImagePixels(new_image->constImage(), 0, 0, new_image->columns(), new_image->rows(), "RGBA", Magick::CharPixel, buffer, &exception); // Create QImage of frame data - image = std::shared_ptr(new QImage(qbuffer, width, height, width * BPP, QImage::Format_RGBA8888, (QImageCleanupFunction) &cleanUpBuffer, (void*) qbuffer)); + image = std::shared_ptr(new QImage(qbuffer, width, height, width * BPP, QImage::Format_ARGB32_Premultiplied, (QImageCleanupFunction) &cleanUpBuffer, (void*) qbuffer)); // Update height and width width = image->width(); diff --git a/src/QtHtmlReader.cpp b/src/QtHtmlReader.cpp index 6b502fbd..776fb142 100644 --- a/src/QtHtmlReader.cpp +++ b/src/QtHtmlReader.cpp @@ -62,7 +62,7 @@ void QtHtmlReader::Open() if (!is_open) { // create image - image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888)); + image = std::shared_ptr(new QImage(width, height, QImage::Format_ARGB32_Premultiplied)); image->fill(QColor(background_color.c_str())); //start painting diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp index 725d8818..0f3e789b 100644 --- a/src/QtImageReader.cpp +++ b/src/QtImageReader.cpp @@ -104,9 +104,6 @@ void QtImageReader::Open() throw InvalidFile("File could not be opened.", path.toStdString()); } - // Convert to proper format - image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_RGBA8888))); - // Update image properties info.has_audio = false; info.has_video = true; @@ -257,8 +254,6 @@ std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) cached_image = std::shared_ptr(new QImage(image->scaled(max_width, max_height, Qt::KeepAspectRatio, Qt::SmoothTransformation))); } - cached_image = std::shared_ptr(new QImage(cached_image->convertToFormat(QImage::Format_RGBA8888))); - // Set max size (to later determine if max_size is changed) max_size.setWidth(max_width); max_size.setHeight(max_height); diff --git a/src/QtTextReader.cpp b/src/QtTextReader.cpp index d91d164e..9a048feb 100644 --- a/src/QtTextReader.cpp +++ b/src/QtTextReader.cpp @@ -67,7 +67,7 @@ void QtTextReader::Open() if (!is_open) { // create image - image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888)); + image = std::shared_ptr(new QImage(width, height, QImage::Format_ARGB32_Premultiplied)); image->fill(QColor(background_color.c_str())); QPainter painter; diff --git a/src/effects/Bars.cpp b/src/effects/Bars.cpp index 3f9aac34..14064a71 100644 --- a/src/effects/Bars.cpp +++ b/src/effects/Bars.cpp @@ -68,7 +68,7 @@ std::shared_ptr Bars::GetFrame(std::shared_ptr frame, int64_t fram std::shared_ptr frame_image = frame->GetImage(); // Get bar color (and create small color image) - std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_RGBA8888)); + std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_ARGB32_Premultiplied)); tempColor->fill(QColor(QString::fromStdString(color.GetColorHex(frame_number)))); // Get current keyframe values diff --git a/src/effects/Crop.cpp b/src/effects/Crop.cpp index b1c3d38d..5ef7f7e6 100644 --- a/src/effects/Crop.cpp +++ b/src/effects/Crop.cpp @@ -68,7 +68,7 @@ std::shared_ptr Crop::GetFrame(std::shared_ptr frame, int64_t fram std::shared_ptr frame_image = frame->GetImage(); // Get transparent color (and create small transparent image) - std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_RGBA8888)); + std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_ARGB32_Premultiplied)); tempColor->fill(QColor(QString::fromStdString("transparent"))); // Get current keyframe values diff --git a/src/effects/Deinterlace.cpp b/src/effects/Deinterlace.cpp index 39b3316a..c9b8e17e 100644 --- a/src/effects/Deinterlace.cpp +++ b/src/effects/Deinterlace.cpp @@ -73,7 +73,7 @@ std::shared_ptr Deinterlace::GetFrame(std::shared_ptr frame, int64 const unsigned char* pixels = image->bits(); // Create a smaller, new image - QImage deinterlaced_image(image->width(), image->height() / 2, QImage::Format_RGBA8888); + QImage deinterlaced_image(image->width(), image->height() / 2, QImage::Format_ARGB32_Premultiplied); const unsigned char* deinterlaced_pixels = deinterlaced_image.bits(); // Loop through the scanlines of the image (even or odd) From 6bd7fb72353a7f113739790228c4fed50e0cb22e Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 14 Oct 2020 03:06:30 -0500 Subject: [PATCH 11/14] Replacing ARGB32_Premultiplied with Format_RGBA8888_Premultiplied, which still seems to benefit from performance, but keeps the byte order the same as before. win win --- src/CacheDisk.cpp | 2 +- src/FFmpegReader.cpp | 4 ++-- src/Frame.cpp | 28 ++++++++++++++-------------- src/QtHtmlReader.cpp | 2 +- src/QtImageReader.cpp | 4 ++-- src/QtTextReader.cpp | 2 +- src/effects/Bars.cpp | 2 +- src/effects/Crop.cpp | 2 +- src/effects/Deinterlace.cpp | 2 +- 9 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/CacheDisk.cpp b/src/CacheDisk.cpp index 789ab25c..e4ef4c9d 100644 --- a/src/CacheDisk.cpp +++ b/src/CacheDisk.cpp @@ -235,7 +235,7 @@ std::shared_ptr CacheDisk::GetFrame(int64_t frame_number) image->load(frame_path); // Set pixel formatimage-> - image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_ARGB32_Premultiplied))); + image = std::shared_ptr(new QImage(image->convertToFormat(QImage::Format_RGBA8888_Premultiplied))); // Create frame object std::shared_ptr frame(new Frame()); diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index d336654a..19bca924 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -1363,7 +1363,7 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { scale_mode = SWS_BICUBIC; } SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width, - height, AV_PIX_FMT_RGB32, scale_mode, NULL, NULL, NULL); + height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL); // Resize / Convert to RGB sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0, @@ -1373,7 +1373,7 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { std::shared_ptr f = CreateFrame(current_frame); // Add Image data to frame - f->AddImage(width, height, 4, QImage::Format_ARGB32_Premultiplied, buffer); + f->AddImage(width, height, 4, QImage::Format_RGBA8888_Premultiplied, buffer); // Update working cache working_cache.Add(f); diff --git a/src/Frame.cpp b/src/Frame.cpp index 80519292..dcd26dd8 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -231,7 +231,7 @@ std::shared_ptr Frame::GetWaveform(int width, int height, int Red, int G } // Create blank image - wave_image = std::shared_ptr(new QImage(total_width, total_height, QImage::Format_ARGB32_Premultiplied)); + wave_image = std::shared_ptr(new QImage(total_width, total_height, QImage::Format_RGBA8888_Premultiplied)); wave_image->fill(QColor(0,0,0,0)); // Load QPainter with wave_image device @@ -262,7 +262,7 @@ std::shared_ptr Frame::GetWaveform(int width, int height, int Red, int G else { // No audio samples present - wave_image = std::shared_ptr(new QImage(width, height, QImage::Format_ARGB32_Premultiplied)); + wave_image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888_Premultiplied)); wave_image->fill(QColor(QString::fromStdString("#000000"))); } @@ -618,7 +618,7 @@ void Frame::Thumbnail(std::string path, int new_width, int new_height, std::stri std::string background_color, bool ignore_aspect, std::string format, int quality, float rotate) { // Create blank thumbnail image & fill background color - std::shared_ptr thumbnail = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_ARGB32_Premultiplied)); + std::shared_ptr thumbnail = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_RGBA8888_Premultiplied)); thumbnail->fill(QColor(QString::fromStdString(background_color))); // Create painter @@ -673,7 +673,7 @@ void Frame::Thumbnail(std::string path, int new_width, int new_height, std::stri overlay->load(QString::fromStdString(overlay_path)); // Set pixel format - overlay = std::shared_ptr(new QImage(overlay->convertToFormat(QImage::Format_ARGB32_Premultiplied))); + overlay = std::shared_ptr(new QImage(overlay->convertToFormat(QImage::Format_RGBA8888_Premultiplied))); // Resize to fit overlay = std::shared_ptr(new QImage(overlay->scaled(new_width, new_height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation))); @@ -691,7 +691,7 @@ void Frame::Thumbnail(std::string path, int new_width, int new_height, std::stri mask->load(QString::fromStdString(mask_path)); // Set pixel format - mask = std::shared_ptr(new QImage(mask->convertToFormat(QImage::Format_ARGB32_Premultiplied))); + mask = std::shared_ptr(new QImage(mask->convertToFormat(QImage::Format_RGBA8888_Premultiplied))); // Resize to fit mask = std::shared_ptr(new QImage(mask->scaled(new_width, new_height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation))); @@ -747,7 +747,7 @@ void Frame::AddColor(int new_width, int new_height, std::string new_color) const GenericScopedLock lock(addingImageSection); #pragma omp critical (AddImage) { - image = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_ARGB32_Premultiplied)); + image = std::shared_ptr(new QImage(new_width, new_height, QImage::Format_RGBA8888_Premultiplied)); // Fill with solid color image->fill(QColor(QString::fromStdString(color))); @@ -775,8 +775,8 @@ void Frame::AddImage(int new_width, int new_height, int bytes_per_pixel, QImage: image = std::shared_ptr(new QImage(qbuffer, new_width, new_height, new_width * bytes_per_pixel, type, (QImageCleanupFunction) &openshot::Frame::cleanUpBuffer, (void*) qbuffer)); // Always convert to RGBA8888 (if different) - if (image->format() != QImage::Format_ARGB32_Premultiplied) - *image = image->convertToFormat(QImage::Format_ARGB32_Premultiplied); + if (image->format() != QImage::Format_RGBA8888_Premultiplied) + *image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied); // Update height and width width = image->width(); @@ -798,9 +798,9 @@ void Frame::AddImage(std::shared_ptr new_image) { image = new_image; - // Always convert to Format_ARGB32_Premultiplied (if different) - if (image->format() != QImage::Format_ARGB32_Premultiplied) - *image = image->convertToFormat(QImage::Format_ARGB32_Premultiplied); + // Always convert to Format_RGBA8888_Premultiplied (if different) + if (image->format() != QImage::Format_RGBA8888_Premultiplied) + *image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied); // Update height and width width = image->width(); @@ -830,8 +830,8 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines) if (image == new_image || image->size() != new_image->size()) { ret = true; } - else if (new_image->format() != QImage::Format_ARGB32_Premultiplied) { - new_image = std::shared_ptr(new QImage(new_image->convertToFormat(QImage::Format_ARGB32_Premultiplied))); + else if (new_image->format() != QImage::Format_RGBA8888_Premultiplied) { + new_image = std::shared_ptr(new QImage(new_image->convertToFormat(QImage::Format_RGBA8888_Premultiplied))); } } if (ret) { @@ -970,7 +970,7 @@ void Frame::AddMagickImage(std::shared_ptr new_image) MagickCore::ExportImagePixels(new_image->constImage(), 0, 0, new_image->columns(), new_image->rows(), "RGBA", Magick::CharPixel, buffer, &exception); // Create QImage of frame data - image = std::shared_ptr(new QImage(qbuffer, width, height, width * BPP, QImage::Format_ARGB32_Premultiplied, (QImageCleanupFunction) &cleanUpBuffer, (void*) qbuffer)); + image = std::shared_ptr(new QImage(qbuffer, width, height, width * BPP, QImage::Format_RGBA8888_Premultiplied, (QImageCleanupFunction) &cleanUpBuffer, (void*) qbuffer)); // Update height and width width = image->width(); diff --git a/src/QtHtmlReader.cpp b/src/QtHtmlReader.cpp index 776fb142..e27f1c0f 100644 --- a/src/QtHtmlReader.cpp +++ b/src/QtHtmlReader.cpp @@ -62,7 +62,7 @@ void QtHtmlReader::Open() if (!is_open) { // create image - image = std::shared_ptr(new QImage(width, height, QImage::Format_ARGB32_Premultiplied)); + image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888_Premultiplied)); image->fill(QColor(background_color.c_str())); //start painting diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp index 0f3e789b..0f4fdc38 100644 --- a/src/QtImageReader.cpp +++ b/src/QtImageReader.cpp @@ -82,7 +82,7 @@ void QtImageReader::Open() ResvgRenderer renderer(path); if (renderer.isValid()) { - image = std::shared_ptr(new QImage(renderer.defaultSize(), QImage::Format_ARGB32_Premultiplied)); + image = std::shared_ptr(new QImage(renderer.defaultSize(), QImage::Format_RGBA8888_Premultiplied)); image->fill(Qt::transparent); QPainter p(image.get()); @@ -236,7 +236,7 @@ std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) svg_size.scale(max_width, max_height, Qt::KeepAspectRatio); // Create empty QImage - cached_image = std::shared_ptr(new QImage(QSize(svg_size.width(), svg_size.height()), QImage::Format_ARGB32_Premultiplied)); + cached_image = std::shared_ptr(new QImage(QSize(svg_size.width(), svg_size.height()), QImage::Format_RGBA8888_Premultiplied)); cached_image->fill(Qt::transparent); // Render SVG into QImage diff --git a/src/QtTextReader.cpp b/src/QtTextReader.cpp index 9a048feb..4764cd0d 100644 --- a/src/QtTextReader.cpp +++ b/src/QtTextReader.cpp @@ -67,7 +67,7 @@ void QtTextReader::Open() if (!is_open) { // create image - image = std::shared_ptr(new QImage(width, height, QImage::Format_ARGB32_Premultiplied)); + image = std::shared_ptr(new QImage(width, height, QImage::Format_RGBA8888_Premultiplied)); image->fill(QColor(background_color.c_str())); QPainter painter; diff --git a/src/effects/Bars.cpp b/src/effects/Bars.cpp index 14064a71..7180bdcd 100644 --- a/src/effects/Bars.cpp +++ b/src/effects/Bars.cpp @@ -68,7 +68,7 @@ std::shared_ptr Bars::GetFrame(std::shared_ptr frame, int64_t fram std::shared_ptr frame_image = frame->GetImage(); // Get bar color (and create small color image) - std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_ARGB32_Premultiplied)); + std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_RGBA8888_Premultiplied)); tempColor->fill(QColor(QString::fromStdString(color.GetColorHex(frame_number)))); // Get current keyframe values diff --git a/src/effects/Crop.cpp b/src/effects/Crop.cpp index 5ef7f7e6..170f6fa5 100644 --- a/src/effects/Crop.cpp +++ b/src/effects/Crop.cpp @@ -68,7 +68,7 @@ std::shared_ptr Crop::GetFrame(std::shared_ptr frame, int64_t fram std::shared_ptr frame_image = frame->GetImage(); // Get transparent color (and create small transparent image) - std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_ARGB32_Premultiplied)); + std::shared_ptr tempColor = std::shared_ptr(new QImage(frame_image->width(), 1, QImage::Format_RGBA8888_Premultiplied)); tempColor->fill(QColor(QString::fromStdString("transparent"))); // Get current keyframe values diff --git a/src/effects/Deinterlace.cpp b/src/effects/Deinterlace.cpp index c9b8e17e..24402288 100644 --- a/src/effects/Deinterlace.cpp +++ b/src/effects/Deinterlace.cpp @@ -73,7 +73,7 @@ std::shared_ptr Deinterlace::GetFrame(std::shared_ptr frame, int64 const unsigned char* pixels = image->bits(); // Create a smaller, new image - QImage deinterlaced_image(image->width(), image->height() / 2, QImage::Format_ARGB32_Premultiplied); + QImage deinterlaced_image(image->width(), image->height() / 2, QImage::Format_RGBA8888_Premultiplied); const unsigned char* deinterlaced_pixels = deinterlaced_image.bits(); // Loop through the scanlines of the image (even or odd) From 1eecda3d4ef3d49285acca580e571f7c5813be33 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 14 Oct 2020 14:19:26 -0500 Subject: [PATCH 12/14] Fix alpha and mask effects, so they correctly multiply the alpha to all colors (since we have switched to a premulitplied alpha format) --- src/Clip.cpp | 6 +++++- src/effects/Mask.cpp | 16 ++++++++++++---- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/Clip.cpp b/src/Clip.cpp index d81b5c61..166f716c 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -1155,7 +1155,11 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) // Loop through pixels for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4) { - // Apply alpha to pixel + // Apply alpha to pixel values (since we use a premultiplied value, we must + // multiply the alpha with all colors). + pixels[byte_index + 0] *= alpha_value; + pixels[byte_index + 1] *= alpha_value; + pixels[byte_index + 2] *= alpha_value; pixels[byte_index + 3] *= alpha_value; } diff --git a/src/effects/Mask.cpp b/src/effects/Mask.cpp index 11c37f05..c5ed16c1 100644 --- a/src/effects/Mask.cpp +++ b/src/effects/Mask.cpp @@ -117,6 +117,7 @@ std::shared_ptr Mask::GetFrame(std::shared_ptr frame, int64_t fram R = mask_pixels[byte_index]; G = mask_pixels[byte_index + 1]; B = mask_pixels[byte_index + 2]; + A = mask_pixels[byte_index + 3]; // Get the average luminosity gray_value = qGray(R, G, B); @@ -131,16 +132,23 @@ std::shared_ptr Mask::GetFrame(std::shared_ptr frame, int64_t fram // Constrain the value from 0 to 255 gray_value = constrain(gray_value); + // Calculate the % change in alpha + float alpha_percent = float(constrain(A - gray_value)) / 255.0; + // Set the alpha channel to the gray value if (replace_image) { - // Replace frame pixels with gray value + // Replace frame pixels with gray value (including alpha channel) pixels[byte_index + 0] = gray_value; pixels[byte_index + 1] = gray_value; pixels[byte_index + 2] = gray_value; + pixels[byte_index + 3] = gray_value; } else { - // Set alpha channel - A = pixels[byte_index + 3]; - pixels[byte_index + 3] = constrain(A - gray_value); + // Mulitply new alpha value with all the colors (since we are using a premultiplied + // alpha format) + pixels[byte_index + 0] *= alpha_percent; + pixels[byte_index + 1] *= alpha_percent; + pixels[byte_index + 2] *= alpha_percent; + pixels[byte_index + 3] *= alpha_percent; } } From f4d0d9d7bb4bf97184bd8692e0ee619562ba5c66 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Fri, 16 Oct 2020 18:04:10 -0500 Subject: [PATCH 13/14] Fixing some cpp_test complaints --- src/Clip.cpp | 3 ++- tests/FrameMapper_Tests.cpp | 5 ----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/src/Clip.cpp b/src/Clip.cpp index 1970884d..0035d766 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -1150,7 +1150,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) float alpha_value = alpha.GetValue(frame->number); // Get source image's pixels - unsigned char *pixels = (unsigned char *) source_image->bits(); + unsigned char *pixels = source_image->bits(); // Loop through pixels for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4) @@ -1162,6 +1162,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) pixels[byte_index + 2] *= alpha_value; pixels[byte_index + 3] *= alpha_value; } + pixels = NULL; // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number); diff --git a/tests/FrameMapper_Tests.cpp b/tests/FrameMapper_Tests.cpp index 54e76e64..9caa2a18 100644 --- a/tests/FrameMapper_Tests.cpp +++ b/tests/FrameMapper_Tests.cpp @@ -287,13 +287,8 @@ TEST(FrameMapper_AudioSample_Distribution) c2.End(10.0); // Add clips - t1.AddClip(&c1); t1.AddClip(&c2); - - std::string json_val = t1.Json(); - - //std::cout << json_val << std::endl; //t1.SetJson(t1.Json()); t1.Open(); From 29107bc427d467ed4a0018e496fa2318ecbe77cd Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Fri, 16 Oct 2020 18:22:42 -0500 Subject: [PATCH 14/14] Fixing some additional cpp_test complaints --- src/Clip.cpp | 1 - src/Timeline.cpp | 2 +- src/effects/Mask.cpp | 18 ++++++------------ 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/src/Clip.cpp b/src/Clip.cpp index 0035d766..3c2c0519 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -1162,7 +1162,6 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) pixels[byte_index + 2] *= alpha_value; pixels[byte_index + 3] *= alpha_value; } - pixels = NULL; // Debug output ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number); diff --git a/src/Timeline.cpp b/src/Timeline.cpp index f7f018e7..4d12b137 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -492,7 +492,7 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the * effects on the top clip. */ - if (is_top_clip && source_frame) { + if (is_top_clip) { #pragma omp critical (T_addLayer) source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer()); } diff --git a/src/effects/Mask.cpp b/src/effects/Mask.cpp index d0e86cd1..a631775f 100644 --- a/src/effects/Mask.cpp +++ b/src/effects/Mask.cpp @@ -102,12 +102,6 @@ std::shared_ptr Mask::GetFrame(std::shared_ptr frame, int64_t fram unsigned char *pixels = (unsigned char *) frame_image->bits(); unsigned char *mask_pixels = (unsigned char *) original_mask->bits(); - int R = 0; - int G = 0; - int B = 0; - int A = 0; - int gray_value = 0; - float factor = 0.0; double contrast_value = (contrast.GetValue(frame_number)); double brightness_value = (brightness.GetValue(frame_number)); @@ -115,16 +109,16 @@ std::shared_ptr Mask::GetFrame(std::shared_ptr frame, int64_t fram for (int pixel = 0, byte_index=0; pixel < original_mask->width() * original_mask->height(); pixel++, byte_index+=4) { // Get the RGB values from the pixel - R = mask_pixels[byte_index]; - G = mask_pixels[byte_index + 1]; - B = mask_pixels[byte_index + 2]; - A = mask_pixels[byte_index + 3]; + int R = mask_pixels[byte_index]; + int G = mask_pixels[byte_index + 1]; + int B = mask_pixels[byte_index + 2]; + int A = mask_pixels[byte_index + 3]; // Get the average luminosity - gray_value = qGray(R, G, B); + int gray_value = qGray(R, G, B); // Adjust the contrast - factor = (259 * (contrast_value + 255)) / (255 * (259 - contrast_value)); + float factor = (259 * (contrast_value + 255)) / (255 * (259 - contrast_value)); gray_value = constrain((factor * (gray_value - 128)) + 128); // Adjust the brightness