diff --git a/src/Clip.cpp b/src/Clip.cpp
index 506d5081..b876741a 100644
--- a/src/Clip.cpp
+++ b/src/Clip.cpp
@@ -694,7 +694,6 @@ string Clip::PropertiesJSON(int64_t requested_frame) {
root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
- root["anchor"] = add_property_json("Anchor", anchor, "int", "", NULL, 0, 1, false, requested_frame);
root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
@@ -715,10 +714,6 @@ string Clip::PropertiesJSON(int64_t requested_frame) {
root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
- // Add anchor choices (dropdown style)
- root["anchor"]["choices"].append(add_property_choice_json("Canvas", ANCHOR_CANVAS, anchor));
- root["anchor"]["choices"].append(add_property_choice_json("Viewport", ANCHOR_VIEWPORT, anchor));
-
// Add frame number display choices (dropdown style)
root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
index 57dffc23..f4e61958 100644
--- a/src/FFmpegReader.cpp
+++ b/src/FFmpegReader.cpp
@@ -44,7 +44,7 @@ FFmpegReader::FFmpegReader(string path)
avcodec_register_all();
// Init cache
- working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 30, info.width, info.height, info.sample_rate, info.channels);
+ working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
@@ -65,7 +65,7 @@ FFmpegReader::FFmpegReader(string path, bool inspect_reader)
avcodec_register_all();
// Init cache
- working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 30, info.width, info.height, info.sample_rate, info.channels);
+ working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
@@ -207,7 +207,7 @@ void FFmpegReader::Open()
previous_packet_location.sample_start = 0;
// Adjust cache size based on size of frame and audio
- working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 30, info.width, info.height, info.sample_rate, info.channels);
+ working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
@@ -399,7 +399,7 @@ void FFmpegReader::UpdateVideoInfo()
}
// Override an invalid framerate
- if (info.fps.ToFloat() > 120.0f || (info.fps.num == 0 || info.fps.den == 0))
+ if (info.fps.ToFloat() > 240.0f || (info.fps.num == 0 || info.fps.den == 0))
{
// Set a few important default video settings (so audio can be divided into frames)
info.fps.num = 24;
@@ -568,16 +568,16 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame)
num_packets_since_video_frame = 0;
// Check the status of a seek (if any)
- if (is_seeking)
+ if (is_seeking)
#pragma omp critical (openshot_seek)
- check_seek = CheckSeek(true);
- else
- check_seek = false;
+ check_seek = CheckSeek(true);
+ else
+ check_seek = false;
- if (check_seek) {
- // Jump to the next iteration of this loop
- continue;
- }
+ if (check_seek) {
+ // Jump to the next iteration of this loop
+ continue;
+ }
// Get the AVFrame from the current packet
frame_finished = GetAVFrame();
@@ -600,16 +600,16 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame)
num_packets_since_video_frame++;
// Check the status of a seek (if any)
- if (is_seeking)
+ if (is_seeking)
#pragma omp critical (openshot_seek)
- check_seek = CheckSeek(false);
- else
- check_seek = false;
+ check_seek = CheckSeek(false);
+ else
+ check_seek = false;
- if (check_seek) {
- // Jump to the next iteration of this loop
- continue;
- }
+ if (check_seek) {
+ // Jump to the next iteration of this loop
+ continue;
+ }
// Update PTS / Frame Offset (if any)
UpdatePTSOffset(false);
@@ -794,7 +794,7 @@ bool FFmpegReader::CheckSeek(bool is_video)
ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::CheckSeek (Too far, seek again)", "is_video_seek", is_video_seek, "max_seeked_frame", max_seeked_frame, "seeking_frame", seeking_frame, "seeking_pts", seeking_pts, "seek_video_frame_found", seek_video_frame_found, "seek_audio_frame_found", seek_audio_frame_found);
// Seek again... to the nearest Keyframe
- Seek(seeking_frame - (20 * seek_count * seek_count));
+ Seek(seeking_frame - (10 * seek_count * seek_count));
}
else
{
@@ -1258,7 +1258,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
}
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::Seek", "requested_frame", requested_frame, "seek_count", seek_count, "last_frame", last_frame, "processing_video_frames_size", processing_video_frames_size, "processing_audio_frames_size", processing_audio_frames_size, "", -1);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::Seek", "requested_frame", requested_frame, "seek_count", seek_count, "last_frame", last_frame, "processing_video_frames_size", processing_video_frames_size, "processing_audio_frames_size", processing_audio_frames_size, "video_pts_offset", video_pts_offset);
// Wait for any processing frames to complete
while (processing_video_frames_size + processing_audio_frames_size > 0) {
@@ -1300,7 +1300,7 @@ void FFmpegReader::Seek(int64_t requested_frame)
seek_count++;
// If seeking near frame 1, we need to close and re-open the file (this is more reliable than seeking)
- int buffer_amount = 6;
+ int buffer_amount = 8;
if (requested_frame - buffer_amount < 20)
{
// Close and re-open file (basically seeking to frame 1)
@@ -1730,6 +1730,11 @@ void FFmpegReader::CheckWorkingFrames(bool end_of_stream, int64_t requested_fram
// No frames found
break;
+ // Remove frames which are too old
+ if (f && f->number < requested_frame) {
+ working_cache.Remove(f->number);
+ }
+
// Check if this frame is 'missing'
CheckMissingFrame(f->number);
diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
index 5f50b85c..8330dee0 100644
--- a/src/FFmpegWriter.cpp
+++ b/src/FFmpegWriter.cpp
@@ -378,10 +378,6 @@ void FFmpegWriter::WriteFrame(std::shared_ptr frame)
else
{
- // YES, WRITING... so wait until it finishes, before writing again
- while (is_writing)
- usleep(250000); // sleep for 250 milliseconds
-
// Write frames to video file
write_queued_frames();
}
diff --git a/src/Frame.cpp b/src/Frame.cpp
index c0abb39e..0c3b7974 100644
--- a/src/Frame.cpp
+++ b/src/Frame.cpp
@@ -848,6 +848,11 @@ void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample,
audio->clear(destChannel, destStartSample, numSamples);
// Get max volume of the current audio data
+ // TODO: This always appears to be 0, which is probably not expected since that means gainFactor is always multiplied by 1.0 below.
+ // "sum_volumes = current_max_volume + new_max_volume" is then alwasy "sum_volumes = 0 + new_max_volume",
+ // which makes "gainFactor *= ((current_max_volume + new_max_volume) - (current_max_volume * new_max_volume)) / sum_volumes;"
+ // which simplifies to "gainFactor *= new_max_volume / new_max_volume;" aka "gainFactor *= 1.0"
+ // - Rich Alloway
float current_max_volume = audio->getMagnitude(destChannel, destStartSample, numSamples);
// Determine max volume of new audio data (before we add them together)
@@ -862,8 +867,9 @@ void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample,
float gainFactor = gainToApplyToSource;
if (sum_volumes > 0.0) {
// Reduce both sources by this amount (existing samples and new samples)
- gainFactor = ((current_max_volume + new_max_volume) - (current_max_volume * new_max_volume)) / sum_volumes;
+ gainFactor *= ((current_max_volume + new_max_volume) - (current_max_volume * new_max_volume)) / sum_volumes;
audio->applyGain(gainFactor);
+ ZmqLogger::Instance()->AppendDebugMethod("Frame::AddAudio", "gainToApplyToSource", gainToApplyToSource, "gainFactor", gainFactor, "sum_volumes", sum_volumes, "current_max_volume", current_max_volume, "new_max_volume", new_max_volume, "((current_max_volume + new_max_volume) - (current_max_volume * new_max_volume)) / sum_volumes", ((current_max_volume + new_max_volume) - (current_max_volume * new_max_volume)) / sum_volumes);
}
// Add samples to frame's audio buffer
diff --git a/src/Qt/AudioPlaybackThread.cpp b/src/Qt/AudioPlaybackThread.cpp
index 60090bf9..fac2e3fc 100644
--- a/src/Qt/AudioPlaybackThread.cpp
+++ b/src/Qt/AudioPlaybackThread.cpp
@@ -165,8 +165,7 @@ namespace openshot
transport.start();
while (!threadShouldExit() && transport.isPlaying() && is_playing)
- sleep(100);
-
+ usleep(2500);
// Stop audio and shutdown transport
Stop();
diff --git a/src/Qt/PlayerPrivate.cpp b/src/Qt/PlayerPrivate.cpp
index a3835a23..63d20e99 100644
--- a/src/Qt/PlayerPrivate.cpp
+++ b/src/Qt/PlayerPrivate.cpp
@@ -129,7 +129,7 @@ namespace openshot
}
// Sleep (leaving the video frame on the screen for the correct amount of time)
- if (sleep_time > 0) sleep(sleep_time);
+ if (sleep_time > 0) usleep(sleep_time * 1000);
}
}
diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp
index 5653e84b..ed224de5 100644
--- a/src/Qt/VideoCacheThread.cpp
+++ b/src/Qt/VideoCacheThread.cpp
@@ -107,7 +107,7 @@ namespace openshot
}
// Sleep for 1 frame length
- sleep(frame_time);
+ usleep(frame_time * 1000);
}
return;
diff --git a/src/ZmqLogger.cpp b/src/ZmqLogger.cpp
index 0c466246..5a608050 100644
--- a/src/ZmqLogger.cpp
+++ b/src/ZmqLogger.cpp
@@ -221,4 +221,4 @@ void ZmqLogger::AppendDebugMethod(string method_name, string arg1_name, float ar
// Send message through ZMQ
Log(message.str());
}
-}
+}
\ No newline at end of file
diff --git a/src/effects/Blur.cpp b/src/effects/Blur.cpp
index d7910230..b54929c7 100644
--- a/src/effects/Blur.cpp
+++ b/src/effects/Blur.cpp
@@ -122,6 +122,12 @@ std::shared_ptr Blur::GetFrame(std::shared_ptr frame, int64_t fram
// Remove boxes
delete[] bxs;
+
+ // Copy blur_ back to for vertical blur or next iteration
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) red[i] = blur_red[i];
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) green[i] = blur_green[i];
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) blue[i] = blur_blue[i];
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) alpha[i] = blur_alpha[i];
}
// VERTICAL BLUR (if any)
@@ -137,6 +143,12 @@ std::shared_ptr Blur::GetFrame(std::shared_ptr frame, int64_t fram
// Remove boxes
delete[] bxs;
+
+ // Copy blur_ back to for vertical blur or next iteration
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) red[i] = blur_red[i];
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) green[i] = blur_green[i];
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) blue[i] = blur_blue[i];
+ for (int i = 0; i < (frame_image->width() * frame_image->height()); i++) alpha[i] = blur_alpha[i];
}
}
diff --git a/tests/FFmpegReader_Tests.cpp b/tests/FFmpegReader_Tests.cpp
index 82e85735..b71465bb 100644
--- a/tests/FFmpegReader_Tests.cpp
+++ b/tests/FFmpegReader_Tests.cpp
@@ -72,8 +72,8 @@ TEST(FFmpegReader_Check_Audio_File)
CHECK_CLOSE(0.0f, samples[50], 0.00001);
CHECK_CLOSE(0.0f, samples[100], 0.00001);
CHECK_CLOSE(0.0f, samples[200], 0.00001);
- CHECK_CLOSE(0.164062f, samples[230], 0.00001);
- CHECK_CLOSE(-0.0625f, samples[300], 0.00001);
+ CHECK_CLOSE(0.157566f, samples[230], 0.00001);
+ CHECK_CLOSE(-0.060025f, samples[300], 0.00001);
// Close reader
r.Close();
diff --git a/tests/Timeline_Tests.cpp b/tests/Timeline_Tests.cpp
index 911518c1..8c81579c 100644
--- a/tests/Timeline_Tests.cpp
+++ b/tests/Timeline_Tests.cpp
@@ -146,9 +146,9 @@ TEST(Timeline_Check_Two_Track_Video)
f = t.GetFrame(24);
// Check image properties
- CHECK_EQUAL(176, (int)f->GetPixels(pixel_row)[pixel_index]);
- CHECK_EQUAL(0, (int)f->GetPixels(pixel_row)[pixel_index + 1]);
- CHECK_EQUAL(186, (int)f->GetPixels(pixel_row)[pixel_index + 2]);
+ CHECK_EQUAL(186, (int)f->GetPixels(pixel_row)[pixel_index]);
+ CHECK_EQUAL(106, (int)f->GetPixels(pixel_row)[pixel_index + 1]);
+ CHECK_EQUAL(0, (int)f->GetPixels(pixel_row)[pixel_index + 2]);
CHECK_EQUAL(255, (int)f->GetPixels(pixel_row)[pixel_index + 3]);
// Get frame