Many improvements for smooth audio playback when 'time' keyframes are used for time remapping.

- Added new Frame::ReverseAudio() helper method - for time keyframe support playing backwards audio
- Fixed math rounding error on time keyframe Resampler, to use the rounded source samples value
- Removed some unused args and Frame audio methods
- Reset FrameMapper resample context when non-adjacent frames are requested
- Correctly reverse Frame audio if a time keyframe is present, and reversing audio
- Rewrite Keyframe::IsIncreasing() function, to give accurate direction of time keyframes at any index (special logic to support first and last indexes)
- Fixed Keyframe unit tests - since logic has changed a bit
- Improved time mapping unit tests
- Replace many c-style casts with static_cast
- Added new sine.wav test file
This commit is contained in:
Jonathan Thomas
2023-03-10 01:15:14 -06:00
parent d104664da3
commit 09ce05bbf2
15 changed files with 144 additions and 181 deletions

BIN
examples/sine.wav Normal file

Binary file not shown.

View File

@@ -18,6 +18,7 @@ using namespace openshot;
// Default constructor, max frames to cache is 20 // resample_source(NULL), buffer_source(NULL), num_of_samples(0), new_num_of_samples(0), dest_ratio(0), source_ratio(0), resampled_buffer(NULL), isPrepared(false)
AudioResampler::AudioResampler(int numChannels)
{
buffer = NULL;
resample_source = NULL;
buffer_source = NULL;
num_of_samples = 0;

View File

@@ -238,7 +238,7 @@ Clip::~Clip()
void Clip::AttachToObject(std::string object_id)
{
// Search for the tracked object on the timeline
Timeline* parentTimeline = (Timeline *) ParentTimeline();
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
if (parentTimeline) {
// Create a smart pointer to the tracked object from the timeline
@@ -277,7 +277,7 @@ void Clip::Reader(ReaderBase* new_reader)
if (new_reader && allocated_reader) {
if (new_reader->Name() == "FrameMapper") {
// Determine if FrameMapper is pointing at the same allocated ready
FrameMapper* clip_mapped_reader = (FrameMapper*) new_reader;
FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
if (allocated_reader == clip_mapped_reader->Reader()) {
is_same_reader = true;
}
@@ -447,7 +447,7 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> backgroun
if (timeline != NULL && options != NULL) {
if (options->is_top_clip) {
// Apply global timeline effects (only to top clip... if overlapping, pass in timeline frame number)
Timeline* timeline_instance = (Timeline*) timeline;
Timeline* timeline_instance = static_cast<Timeline*>(timeline);
frame = timeline_instance->apply_effects(frame, background_frame->number, Layer());
}
}
@@ -580,6 +580,10 @@ void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
// Reverse audio (if needed)
if (!is_increasing)
frame->ReverseAudio();
if (frame_sample_count == 0) {
// No samples found in source frame (fill with silence)
if (is_increasing) {
@@ -620,9 +624,10 @@ void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
// We are fixing to clobber this with actual audio data (possibly resampled)
frame->AddAudioSilence(target_sample_count);
if (source_samples->getNumSamples() != target_sample_count) {
if (source_sample_count != target_sample_count) {
// Resample audio (if needed)
resampler->SetBuffer(source_samples, fabs(delta));
double resample_ratio = double(source_sample_count) / double(target_sample_count);
resampler->SetBuffer(source_samples, resample_ratio);
// Resample the data
juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
@@ -630,7 +635,7 @@ void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
// Fill the frame with resampled data
for (int channel = 0; channel < Reader()->info.channels; channel++) {
// Add new (slower) samples, to the frame object
frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), target_sample_count, 1.0f);
frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
}
} else {
// Fill the frame
@@ -1153,7 +1158,7 @@ void Clip::AddEffect(EffectBase* effect)
sort_effects();
// Get the parent timeline of this clip
Timeline* parentTimeline = (Timeline *) ParentTimeline();
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
if (parentTimeline)
effect->ParentTimeline(parentTimeline);
@@ -1247,7 +1252,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage>
painter.drawImage(0, 0, *source_image);
if (timeline) {
Timeline *t = (Timeline *) timeline;
Timeline *t = static_cast<Timeline *>(timeline);
// Draw frame #'s on top of image (if needed)
if (display != FRAME_DISPLAY_NONE) {
@@ -1576,7 +1581,7 @@ int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
// Get clip position from parent clip (if any)
float position = 0.0;
float start = 0.0;
Clip *parent = (Clip *) ParentClip();
Clip *parent = static_cast<Clip *>(ParentClip());
if (parent) {
position = parent->Position();
start = parent->Start();

View File

@@ -113,7 +113,7 @@ void EffectBase::SetJsonValue(const Json::Value root) {
if (ParentTimeline()){
// Get parent timeline
Timeline* parentTimeline = (Timeline *) ParentTimeline();
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
// Get the list of effects on the timeline
std::list<EffectBase*> effects = parentTimeline->ClipEffects();
@@ -129,14 +129,14 @@ void EffectBase::SetJsonValue(const Json::Value root) {
}
// Set this effect properties with the parent effect properties (except the id and parent_effect_id)
Json::Value my_root;
Json::Value my_root;
if (parentEffect){
my_root = parentEffect->JsonValue();
my_root["id"] = this->Id();
my_root["parent_effect_id"] = this->info.parent_effect_id;
} else {
my_root = root;
}
my_root = root;
}
// Set parent data
ClipBase::SetJsonValue(my_root);
@@ -183,7 +183,7 @@ void EffectBase::ParentClip(openshot::ClipBase* new_clip) {
void EffectBase::SetParentEffect(std::string parentEffect_id) {
// Get parent Timeline
Timeline* parentTimeline = (Timeline *) ParentTimeline();
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
if (parentTimeline){

View File

@@ -729,7 +729,7 @@ void FFmpegReader::UpdateAudioInfo() {
info.height = 480;
// Use timeline to set correct width & height (if any)
Clip *parent = (Clip *) ParentClip();
Clip *parent = static_cast<Clip *>(ParentClip());
if (parent) {
if (parent->ParentTimeline()) {
// Set max width/height based on parent clip's timeline (if attached to a timeline)
@@ -1403,7 +1403,7 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
int max_width = info.width;
int max_height = info.height;
Clip *parent = (Clip *) ParentClip();
Clip *parent = static_cast<Clip *>(ParentClip());
if (parent) {
if (parent->ParentTimeline()) {
// Set max width/height based on parent clip's timeline (if attached to a timeline)

View File

@@ -1628,7 +1628,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Get audio sample array
float *frame_samples_float = NULL;
// Get samples interleaved together (c1 c2 c1 c2 c1 c2)
frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame);
frame_samples_float = frame->GetInterleavedAudioSamples(&samples_in_frame);
// Calculate total samples
total_frame_samples = samples_in_frame * channels_in_frame;

View File

@@ -311,92 +311,25 @@ float Frame::GetAudioSample(int channel, int sample, int magnitude_range)
}
// Get an array of sample data (and optional reverse the sample values)
float* Frame::GetAudioSamples(int channel, bool reverse) {
if (reverse) {
// Copy audio buffer, and reverse channel
juce::AudioBuffer<float> *buffer(audio.get());
buffer->reverse(channel, 0, buffer->getNumSamples());
float* Frame::GetAudioSamples(int channel) {
// return JUCE audio data for this channel
return buffer->getWritePointer(channel);
} else {
// return JUCE audio data for this channel
return audio->getWritePointer(channel);
}
}
// Get a planar array of sample data, using any sample rate
float* Frame::GetPlanarAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count)
{
float *output = NULL;
// Copy audio data
juce::AudioBuffer<float> *buffer(audio.get());
int num_of_channels = audio->getNumChannels();
int num_of_samples = GetAudioSamplesCount();
// Resample to new sample rate (if needed)
if (new_sample_rate != sample_rate)
{
// YES, RESAMPLE AUDIO
resampler->SetBuffer(audio.get(), sample_rate, new_sample_rate);
// Resample data, and return new buffer pointer
buffer = resampler->GetResampledBuffer();
// Update num_of_samples
num_of_samples = buffer->getNumSamples();
}
// INTERLEAVE all samples together (channel 1 + channel 2 + channel 1 + channel 2, etc...)
output = new float[num_of_channels * num_of_samples];
int position = 0;
// Loop through samples in each channel (combining them)
for (int channel = 0; channel < num_of_channels; channel++)
{
for (int sample = 0; sample < num_of_samples; sample++)
{
// Add sample to output array
output[position] = buffer->getReadPointer(channel)[sample];
// increment position
position++;
}
}
// Update sample count (since it might have changed due to resampling)
*sample_count = num_of_samples;
// return combined array
return output;
// return JUCE audio data for this channel
return buffer->getWritePointer(channel);
}
// Get an array of sample data (all channels interleaved together), using any sample rate
float* Frame::GetInterleavedAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count, bool reverse)
float* Frame::GetInterleavedAudioSamples(int* sample_count)
{
float *output = NULL;
// Copy audio data
juce::AudioBuffer<float> *buffer(audio.get());
float *output = NULL;
int num_of_channels = audio->getNumChannels();
int num_of_samples = GetAudioSamplesCount();
if (reverse) {
// Reverse audio samples (if needed)
buffer->reverse(0, buffer->getNumSamples());
}
// Resample to new sample rate (if needed)
if (new_sample_rate != sample_rate && resampler)
{
// YES, RESAMPLE AUDIO
resampler->SetBuffer(audio.get(), sample_rate, new_sample_rate);
// Resample data, and return new buffer pointer
buffer = resampler->GetResampledBuffer();
// Update num_of_samples
num_of_samples = buffer->getNumSamples();
}
// INTERLEAVE all samples together (channel 1 + channel 2 + channel 1 + channel 2, etc...)
output = new float[num_of_channels * num_of_samples];
int position = 0;
@@ -700,8 +633,8 @@ void Frame::Thumbnail(std::string path, int new_width, int new_height, std::stri
mask->invertPixels();
// Get pixels
unsigned char *pixels = (unsigned char *) thumbnail->bits();
const unsigned char *mask_pixels = (const unsigned char *) mask->constBits();
unsigned char *pixels = static_cast<unsigned char *>(thumbnail->bits());
const unsigned char *mask_pixels = static_cast<const unsigned char *>(mask->constBits());
// Convert the mask image to grayscale
// Loop through pixels
@@ -865,6 +798,16 @@ void Frame::ResizeAudio(int channels, int length, int rate, ChannelLayout layout
max_audio_sample = length;
}
// Reverse the audio buffer of this frame (will only reverse a single time, regardless of how many times
// you invoke this method)
void Frame::ReverseAudio() {
if (audio && !audio_reversed) {
// Reverse audio buffer
audio->reverse(0, audio->getNumSamples());
audio_reversed = true;
}
}
// Add audio samples to a specific channel
void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float* source, int numSamples, float gainToApplyToSource = 1.0f) {
const std::lock_guard<std::recursive_mutex> lock(addingAudioMutex);
@@ -891,6 +834,9 @@ void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample,
// Calculate max audio sample added
if (new_length > max_audio_sample)
max_audio_sample = new_length;
// Reset audio reverse flag
audio_reversed = false;
}
// Apply gain ramp (i.e. fading volume)
@@ -1045,4 +991,7 @@ void Frame::AddAudioSilence(int numSamples)
// Calculate max audio sample added
max_audio_sample = numSamples;
// Reset audio reverse flag
audio_reversed = false;
}

View File

@@ -104,6 +104,7 @@ namespace openshot
int sample_rate;
std::string color;
int64_t max_audio_sample; ///< The max audio sample count added to this frame
bool audio_reversed; ///< Keep track of audio reversal (i.e. time keyframe)
#ifdef USE_OPENCV
cv::Mat imagecv; ///< OpenCV image. It will always be in BGR format
@@ -187,13 +188,10 @@ namespace openshot
float GetAudioSample(int channel, int sample, int magnitude_range);
/// Get an array of sample data (and optional reverse the sample values)
float* GetAudioSamples(int channel, bool reverse=false);
float* GetAudioSamples(int channel);
/// Get an array of sample data (all channels interleaved together), using any sample rate
float* GetInterleavedAudioSamples(int new_sample_rate, openshot::AudioResampler* resampler, int* sample_count, bool reverse=false);
// Get a planar array of sample data, using any sample rate
float* GetPlanarAudioSamples(int new_sample_rate, openshot::AudioResampler* resampler, int* sample_count);
float* GetInterleavedAudioSamples(int* sample_count);
/// Get number of audio channels
int GetAudioChannelsCount();
@@ -248,6 +246,10 @@ namespace openshot
/// Set the original sample rate of this frame's audio data
void SampleRate(int orig_sample_rate) { sample_rate = orig_sample_rate; };
/// Reverse the audio buffer of this frame (will only reverse a single time, regardless of how many times
/// you invoke this method)
void ReverseAudio();
/// Save the frame image to the specified path. The image format can be BMP, JPG, JPEG, PNG, PPM, XBM, XPM
void Save(std::string path, float scale, std::string format="PNG", int quality=100);

View File

@@ -117,7 +117,7 @@ void FrameMapper::Init()
Clear();
// Find parent position (if any)
Clip *parent = (Clip *) ParentClip();
Clip *parent = static_cast<Clip *>(ParentClip());
if (parent) {
parent_position = parent->Position();
parent_start = parent->Start();
@@ -430,7 +430,8 @@ std::shared_ptr<Frame> FrameMapper::GetFrame(int64_t requested_frame)
const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
// Find parent properties (if any)
Clip *parent = (Clip *) ParentClip();
Clip *parent = static_cast<Clip *>(ParentClip());
bool is_increasing = true;
if (parent) {
float position = parent->Position();
float start = parent->Start();
@@ -439,6 +440,10 @@ std::shared_ptr<Frame> FrameMapper::GetFrame(int64_t requested_frame)
// since this heavily affects frame #s and audio mappings
is_dirty = true;
}
// Determine direction of parent clip at this frame (forward or reverse direction)
// This is important for reversing audio in our resampler, for smooth reversed audio.
is_increasing = parent->time.IsIncreasing(requested_frame);
}
// Check if mappings are dirty (and need to be recalculated)
@@ -462,7 +467,6 @@ std::shared_ptr<Frame> FrameMapper::GetFrame(int64_t requested_frame)
// Loop through all requested frames
for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod(
"FrameMapper::GetFrame (inside omp for loop)",
@@ -487,7 +491,7 @@ std::shared_ptr<Frame> FrameMapper::GetFrame(int64_t requested_frame)
if (info.sample_rate == mapped_frame->SampleRate() &&
info.channels == mapped_frame->GetAudioChannelsCount() &&
info.channel_layout == mapped_frame->ChannelsLayout() &&
mapped.Samples.total == mapped_frame->GetAudioSamplesCount() == samples_in_frame &&
mapped.Samples.total == mapped_frame->GetAudioSamplesCount() == samples_in_frame && is_increasing &&
mapped.Samples.frame_start == mapped.Odd.Frame &&
mapped.Samples.sample_start == 0 &&
mapped_frame->number == frame_number &&// in some conditions (e.g. end of stream)
@@ -535,6 +539,16 @@ std::shared_ptr<Frame> FrameMapper::GetFrame(int64_t requested_frame)
if (need_resampling)
{
// Check for non-adjacent frame requests - so the resampler can be reset
if (abs(frame->number - previous_frame) > 1) {
if (avr) {
// Delete resampler (if exists)
SWR_CLOSE(avr);
SWR_FREE(&avr);
avr = NULL;
}
}
// Resampling needed, modify copy of SampleRange object that
// includes some additional input samples on first iteration,
// and continues the offset to ensure that the sample rate
@@ -629,6 +643,10 @@ std::shared_ptr<Frame> FrameMapper::GetFrame(int64_t requested_frame)
starting_frame++;
}
// Reverse audio (if needed)
if (!is_increasing)
frame->ReverseAudio();
// Resample audio on frame (if needed)
if (need_resampling)
// Resample audio and correct # of channels if needed
@@ -831,14 +849,6 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
// Recalculate mappings
Init();
// Determine direction of parent clip at this frame (forward or reverse direction)
// This is important for reversing audio in our resampler, for smooth reversed audio.
Clip *parent = (Clip *) ParentClip();
bool is_increasing = true;
if (parent) {
is_increasing = parent->time.IsIncreasing(original_frame_number);
}
// Init audio buffers / variables
int total_frame_samples = 0;
int channels_in_frame = frame->GetAudioChannelsCount();
@@ -857,7 +867,7 @@ void FrameMapper::ResampleMappedAudio(std::shared_ptr<Frame> frame, int64_t orig
// Get audio sample array
float* frame_samples_float = NULL;
// Get samples interleaved together (c1 c2 c1 c2 c1 c2)
frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame, !is_increasing);
frame_samples_float = frame->GetInterleavedAudioSamples(&samples_in_frame);
// Calculate total samples
total_frame_samples = samples_in_frame * channels_in_frame;
@@ -1026,7 +1036,7 @@ int64_t FrameMapper::AdjustFrameNumber(int64_t clip_frame_number) {
// Get clip position from parent clip (if any)
float position = 0.0;
float start = 0.0;
Clip *parent = (Clip *) ParentClip();
Clip *parent = static_cast<Clip *>(ParentClip());
if (parent) {
position = parent->Position();
start = parent->Start();

View File

@@ -291,27 +291,41 @@ int64_t Keyframe::GetLong(int64_t index) const {
// Get the direction of the curve at a specific index (increasing or decreasing)
bool Keyframe::IsIncreasing(int index) const
{
if (index < 1 || (index + 1) >= GetLength()) {
return true;
if (index <= 1) {
// Determine direction of frame 1 (and assume previous frames have same direction)
index = 1;
} else if (index >= GetLength()) {
// Determine direction of last valid frame # (and assume next frames have same direction)
index = GetLength() - 1;
}
std::vector<Point>::const_iterator candidate =
std::lower_bound(begin(Points), end(Points), static_cast<double>(index), IsPointBeforeX);
if (candidate == end(Points)) {
return false; // After the last point, thus constant.
}
if ((candidate->co.X == index) || (candidate == begin(Points))) {
++candidate;
}
int64_t const value = GetLong(index);
do {
if (value < round(candidate->co.Y)) {
return true;
} else if (value > round(candidate->co.Y)) {
return false;
// Get current index value
const double current_value = GetValue(index);
// Iterate from current index to next significant value change
int attempts = 1;
while (attempts < 600 && index + attempts <= GetLength()) {
// Get next value
const double next_value = GetValue(index + attempts);
// Is value significantly different
const double diff = next_value - current_value;
if (fabs(diff) > 0.0001) {
if (diff < 0.0) {
// Decreasing value found next
return false;
} else {
// Increasing value found next
return true;
}
}
++candidate;
} while (candidate != end(Points));
return false;
// increment attempt
attempts++;
}
// If no next value found, assume increasing values
return true;
}
// Generate JSON string of this object
@@ -396,7 +410,7 @@ Point const & Keyframe::GetPoint(int64_t index) const {
int64_t Keyframe::GetLength() const {
if (Points.empty()) return 0;
if (Points.size() == 1) return 1;
return round(Points.back().co.X) + 1;
return round(Points.back().co.X);
}
// Get the number of points (i.e. # of points)
@@ -475,7 +489,7 @@ void Keyframe::PrintValues(std::ostream* out) const {
<< "┼─────────"
<< "┼────────────┤\n";
for (int64_t i = 1; i < GetLength(); ++i) {
for (int64_t i = 1; i <= GetLength(); ++i) {
*out << ""
<< std::setw(w[0]-2) << std::defaultfloat << i
<< (Contains(Point(i, 1)) ? " *" : " ") << ""

View File

@@ -1668,7 +1668,7 @@ void Timeline::ClearAllCache(bool deep) {
// Clear nested Reader (if deep clear requested)
if (deep && clip->Reader()->Name() == "FrameMapper") {
FrameMapper *nested_reader = (FrameMapper *) clip->Reader();
FrameMapper *nested_reader = static_cast<FrameMapper *>(clip->Reader());
if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
nested_reader->Reader()->GetCache()->Clear();
}

View File

@@ -154,7 +154,7 @@ std::shared_ptr<Frame> ObjectDetection::GetFrame(std::shared_ptr<Frame> frame, i
// Get the Detected Object's child clip
if (trackedObject->ChildClipId() != ""){
// Cast the parent timeline of this effect
Timeline* parentTimeline = (Timeline *) ParentTimeline();
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
if (parentTimeline){
// Get the Tracked Object's child clip
Clip* childClip = parentTimeline->GetClip(trackedObject->ChildClipId());

View File

@@ -126,7 +126,7 @@ std::shared_ptr<Frame> Tracker::GetFrame(std::shared_ptr<Frame> frame, int64_t f
// Get the image of the Tracked Object' child clip
if (trackedData->ChildClipId() != ""){
// Cast the parent timeline of this effect
Timeline* parentTimeline = (Timeline *) ParentTimeline();
Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
if (parentTimeline){
// Get the Tracked Object's child clip
Clip* childClip = parentTimeline->GetClip(trackedData->ChildClipId());

View File

@@ -379,16 +379,16 @@ TEST_CASE( "time remapping", "[libopenshot][clip]" )
// Load clip with video
std::stringstream path;
//path << TEST_MEDIA_PATH << "piano.wav";
path << "/home/jonathan/Music/01 - Universal Fanfare (From _Pitch Perfect 2_ Soundtrack).mp3";
path << TEST_MEDIA_PATH << "piano.wav";
Clip clip(path.str());
int original_video_length = clip.Reader()->info.video_length;
clip.Position(0.0);
clip.Start(0.0);
// Set time keyframe (4X speed REVERSE)
clip.time.AddPoint(1, original_video_length, openshot::LINEAR);
clip.time.AddPoint(original_video_length, 1.0, openshot::LINEAR);
//clip.time.AddPoint(1, original_video_length, openshot::LINEAR);
//clip.time.AddPoint(original_video_length, 1.0, openshot::LINEAR);
// Set time keyframe (4X speed FORWARD)
//clip.time.AddPoint(1, 1.0, openshot::LINEAR);
@@ -427,22 +427,6 @@ TEST_CASE( "time remapping", "[libopenshot][clip]" )
if (expected_sample_count != f->GetAudioSamplesCount()) {
CHECK(expected_sample_count == f->GetAudioSamplesCount());
}
// Check for X sequential empty audio samples
// int empty_count = 0;
// float previous_value = -1.0;
// for (int sample = 0; sample < f->GetAudioSamplesCount(); sample++) {
// float value = f->GetAudioSample(0, sample, 1.0);
// if (value == 0.0) {
// empty_count++;
// } else {
// empty_count = 0;
// }
//
// if (empty_count >= 5) {
// bool bad = true;
// }
// }
}
// Clear cache
@@ -465,32 +449,32 @@ TEST_CASE( "time remapping", "[libopenshot][clip]" )
}
TEST_CASE( "resample_audio_48000_to_41000_reverse", "[libopenshot][clip]" )
TEST_CASE( "resample_audio_8000_to_48000_reverse", "[libopenshot][clip]" )
{
// Create a reader
std::stringstream path;
path << TEST_MEDIA_PATH << "sintel_trailer-720p.mp4";
path << TEST_MEDIA_PATH << "sine.wav";
openshot::FFmpegReader reader(path.str(), true);
// Map to 24 fps, 2 channels stereo, 44100 sample rate
FrameMapper map(&reader, Fraction(24,1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
FrameMapper map(&reader, Fraction(24,1), PULLDOWN_NONE, 48000, 2, LAYOUT_STEREO);
map.Open();
Clip clip;
clip.Reader(&map);
clip.Open();
int original_video_length = clip.Reader()->info.video_length + 1;
int original_video_length = clip.Reader()->info.video_length;
clip.Position(0.0);
clip.Start(0.0);
// Set time keyframe (REVERSE direction)
// Set time keyframe (REVERSE direction using bezier curve)
clip.time.AddPoint(1, original_video_length, openshot::LINEAR);
clip.time.AddPoint(original_video_length, 1.0, openshot::LINEAR);
clip.time.AddPoint(original_video_length, 1.0, openshot::BEZIER);
// Loop again through frames
// Time-remapping should start over (detect a gap)
for (int64_t frame = 1; frame < 100; frame++) {
for (int64_t frame = 1; frame <= original_video_length; frame++) {
int expected_sample_count = Frame::GetSamplesPerFrame(frame, map.info.fps,
map.info.sample_rate,
map.info.channels);
@@ -506,15 +490,13 @@ TEST_CASE( "resample_audio_48000_to_41000_reverse", "[libopenshot][clip]" )
// Loop again through frames
// Time-remapping should start over (detect a gap)
for (int64_t frame = 1; frame < 100; frame++) {
for (int64_t frame = 1; frame < original_video_length; frame++) {
int expected_sample_count = Frame::GetSamplesPerFrame(frame, map.info.fps,
map.info.sample_rate,
map.info.channels);
std::shared_ptr<Frame> f = clip.GetFrame(frame);
if (expected_sample_count != f->GetAudioSamplesCount()) {
CHECK(expected_sample_count == f->GetAudioSamplesCount());
}
CHECK(expected_sample_count == f->GetAudioSamplesCount());
}
// Close mapper

View File

@@ -93,7 +93,7 @@ TEST_CASE( "GetValue (Bezier curve, 2 Points)", "[libopenshot][keyframe]" )
CHECK(kf.GetValue(40) == Approx(3.79733f).margin(0.0001));
CHECK(kf.GetValue(50) == Approx(4.0f).margin(0.0001));
// Check the expected number of values
CHECK(kf.GetLength() == 51);
CHECK(kf.GetLength() == 50);
}
TEST_CASE( "GetValue (Bezier, 5 Points, 40% handle)", "[libopenshot][keyframe]" )
@@ -116,7 +116,7 @@ TEST_CASE( "GetValue (Bezier, 5 Points, 40% handle)", "[libopenshot][keyframe]"
CHECK(kf.GetValue(177) == Approx(1.73860f).margin(0.0001));
CHECK(kf.GetValue(200) == Approx(3.0f).margin(0.0001));
// Check the expected number of values
CHECK(kf.GetLength() == 201);
CHECK(kf.GetLength() == 200);
}
TEST_CASE( "GetValue (Bezier, 5 Points, 25% Handle)", "[libopenshot][keyframe]" )
@@ -139,7 +139,7 @@ TEST_CASE( "GetValue (Bezier, 5 Points, 25% Handle)", "[libopenshot][keyframe]"
CHECK(kf.GetValue(177) == Approx(1.73860f).margin(0.0001));
CHECK(kf.GetValue(200) == Approx(3.0f).margin(0.0001));
// Check the expected number of values
CHECK(kf.GetLength() == 201);
CHECK(kf.GetLength() == 200);
}
TEST_CASE( "GetValue (Linear, 3 Points)", "[libopenshot][keyframe]" )
@@ -159,7 +159,7 @@ TEST_CASE( "GetValue (Linear, 3 Points)", "[libopenshot][keyframe]" )
CHECK(kf.GetValue(40) == Approx(4.4f).margin(0.0001));
CHECK(kf.GetValue(50) == Approx(2.0f).margin(0.0001));
// Check the expected number of values
CHECK(kf.GetLength() == 51);
CHECK(kf.GetLength() == 50);
}
TEST_CASE( "GetValue (Constant, 3 Points)", "[libopenshot][keyframe]" )
@@ -180,7 +180,7 @@ TEST_CASE( "GetValue (Constant, 3 Points)", "[libopenshot][keyframe]" )
CHECK(kf.GetValue(49) == Approx(8.0f).margin(0.0001));
CHECK(kf.GetValue(50) == Approx(2.0f).margin(0.0001));
// Check the expected number of values
CHECK(kf.GetLength() == 51);
CHECK(kf.GetLength() == 50);
}
TEST_CASE( "GetDelta", "[libopenshot][keyframe]" )
@@ -201,11 +201,11 @@ TEST_CASE( "GetDelta", "[libopenshot][keyframe]" )
CHECK(kf.GetDelta(24) == Approx(-0.1622f).margin(0.0001));
CHECK(kf.GetLong(390) == 100);
CHECK(kf.IsIncreasing(390) == true);
CHECK(kf.IsIncreasing(390) == false);
CHECK(kf.GetDelta(390) == Approx(-0.0732f).margin(0.0001));
CHECK(kf.GetLong(391) == 100);
CHECK(kf.IsIncreasing(391) == true);
CHECK(kf.IsIncreasing(391) == false);
CHECK(kf.GetDelta(388) == Approx(-0.0886f).margin(0.0001));
}
@@ -382,7 +382,7 @@ TEST_CASE( "large number values", "[libopenshot][keyframe]" )
kf.AddPoint(large_value, 100.0); // 90 minutes long
// Spot check values from the curve
CHECK(kf.GetLength() == large_value + 1);
CHECK(kf.GetLength() == large_value);
CHECK(kf.GetPoint(0).co.Y == Approx(1.0).margin(0.01));
CHECK(kf.GetPoint(1).co.Y == Approx(100.0).margin(0.01));
}
@@ -434,7 +434,7 @@ TEST_CASE( "IsIncreasing", "[libopenshot][keyframe]" )
CHECK(kf.IsIncreasing(0) == true);
CHECK(kf.IsIncreasing(15) == true);
// all next equal
CHECK_FALSE(kf.IsIncreasing(12));
CHECK(kf.IsIncreasing(12) == true);
// first non-eq is larger
CHECK(kf.IsIncreasing(8) == true);
// first non-eq is smaller
@@ -451,15 +451,15 @@ TEST_CASE( "GetLength", "[libopenshot][keyframe]" )
f.AddPoint(1, 1);
CHECK(f.GetLength() == 1);
f.AddPoint(2, 1);
CHECK(f.GetLength() == 3);
CHECK(f.GetLength() == 2);
f.AddPoint(200, 1);
CHECK(f.GetLength() == 201);
CHECK(f.GetLength() == 200);
Keyframe g;
g.AddPoint(200, 1);
CHECK(g.GetLength() == 1);
g.AddPoint(1,1);
CHECK(g.GetLength() == 201);
CHECK(g.GetLength() == 200);
}
TEST_CASE( "use segment end point interpolation", "[libopenshot][keyframe]" )
@@ -485,7 +485,7 @@ TEST_CASE( "std::vector<Point> constructor", "[libopenshot][keyframe]" )
std::vector<Point> points{Point(1, 10), Point(5, 20), Point(10, 30)};
Keyframe k1(points);
CHECK(k1.GetLength() == 11);
CHECK(k1.GetLength() == 10);
CHECK(k1.GetValue(10) == Approx(30.0f).margin(0.0001));
}