Merge pull request #719 from OpenShot/audio-devices

New audio device [type] switching + Audio sync improvements
This commit is contained in:
Jonathan Thomas
2021-12-07 16:36:51 -06:00
committed by GitHub
18 changed files with 255 additions and 155 deletions

View File

@@ -41,10 +41,20 @@
%shared_ptr(juce::AudioBuffer<float>)
%shared_ptr(openshot::Frame)
/* Instantiate the required template specializations */
%template() std::map<std::string, int>;
%template() std::pair<int, int>;
%template() std::vector<int>;
%template() std::pair<double, double>;
%template() std::pair<float, float>;
%template() std::pair<std::string, std::string>;
%template() std::vector<std::pair<std::string, std::string>>;
%{
#include "OpenShotVersion.h"
#include "ReaderBase.h"
#include "WriterBase.h"
#include "AudioDevices.h"
#include "CacheBase.h"
#include "CacheDisk.h"
#include "CacheMemory.h"
@@ -79,7 +89,6 @@
#include "TimelineBase.h"
#include "Timeline.h"
#include "ZmqLogger.h"
#include "AudioDeviceInfo.h"
%}
@@ -116,12 +125,7 @@
}
}
/* Instantiate the required template specializations */
%template() std::map<std::string, int>;
%template() std::pair<int, int>;
%template() std::vector<int>;
%template() std::pair<double, double>;
%template() std::pair<float, float>;
/* Wrap std templates (list, vector, etc...) */
%template(ClipList) std::list<openshot::Clip *>;
@@ -131,6 +135,8 @@
%template(FieldVector) std::vector<openshot::Field>;
%template(MappedFrameVector) std::vector<openshot::MappedFrame>;
%template(MetadataMap) std::map<std::string, std::string>;
/* Deprecated */
%template(AudioDeviceInfoVector) std::vector<openshot::AudioDeviceInfo>;
/* Make openshot.Fraction more Pythonic */
@@ -256,6 +262,7 @@
%include "OpenShotVersion.h"
%include "ReaderBase.h"
%include "WriterBase.h"
%include "AudioDevices.h"
%include "CacheBase.h"
%include "CacheDisk.h"
%include "CacheMemory.h"
@@ -290,7 +297,6 @@
%include "TimelineBase.h"
%include "Timeline.h"
%include "ZmqLogger.h"
%include "AudioDeviceInfo.h"
#ifdef USE_OPENCV
%include "ClipProcessingJobs.h"

View File

@@ -41,12 +41,14 @@
%shared_ptr(juce::AudioBuffer<float>)
%shared_ptr(openshot::Frame)
/* Template specializations */
/* Instantiate the required template specializations */
%template() std::map<std::string, int>;
%template() std::pair<int, int>;
%template() std::vector<int>;
%template() std::pair<double, double>;
%template() std::pair<float, float>;
%template() std::pair<std::string, std::string>;
%template() std::vector<std::pair<std::string, std::string>>;
%{
/* Ruby and FFmpeg define competing RSHIFT macros,
@@ -60,6 +62,7 @@
#include "OpenShotVersion.h"
#include "ReaderBase.h"
#include "WriterBase.h"
#include "AudioDevices.h"
#include "CacheBase.h"
#include "CacheDisk.h"
#include "CacheMemory.h"
@@ -94,7 +97,6 @@
#include "TimelineBase.h"
#include "Timeline.h"
#include "ZmqLogger.h"
#include "AudioDeviceInfo.h"
/* Move FFmpeg's RSHIFT to FF_RSHIFT, if present */
#ifdef RSHIFT
@@ -115,9 +117,22 @@
%}
#endif
/* Wrap std templates (list, vector, etc...) */
%template(ClipList) std::list<openshot::Clip *>;
%template(EffectBaseList) std::list<openshot::EffectBase *>;
%template(CoordinateVector) std::vector<openshot::Coordinate>;
%template(PointsVector) std::vector<openshot::Point>;
%template(FieldVector) std::vector<openshot::Field>;
%template(MappedFrameVector) std::vector<openshot::MappedFrame>;
%template(MetadataMap) std::map<std::string, std::string>;
/* Deprecated */
%template(AudioDeviceInfoVector) std::vector<openshot::AudioDeviceInfo>;
%include "OpenShotVersion.h"
%include "ReaderBase.h"
%include "WriterBase.h"
%include "AudioDevices.h"
%include "CacheBase.h"
%include "CacheDisk.h"
%include "CacheMemory.h"
@@ -173,7 +188,6 @@
%include "TimelineBase.h"
%include "Timeline.h"
%include "ZmqLogger.h"
%include "AudioDeviceInfo.h"
#ifdef USE_IMAGEMAGICK
%include "ImageReader.h"
@@ -181,7 +195,6 @@
%include "TextReader.h"
#endif
/* Effects */
%include "effects/Bars.h"
%include "effects/Blur.h"
@@ -200,12 +213,3 @@
%include "effects/Wave.h"
/* Wrap std templates (list, vector, etc...) */
%template(ClipList) std::list<openshot::Clip *>;
%template(EffectBaseList) std::list<openshot::EffectBase *>;
%template(CoordinateVector) std::vector<openshot::Coordinate>;
%template(PointsVector) std::vector<openshot::Point>;
%template(FieldVector) std::vector<openshot::Field>;
%template(MappedFrameVector) std::vector<openshot::MappedFrame>;
%template(MetadataMap) std::map<std::string, std::string>;
%template(AudioDeviceInfoVector) std::vector<openshot::AudioDeviceInfo>;

View File

@@ -1,29 +0,0 @@
/**
* @file
* @brief Header file for Audio Device Info struct
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
// Copyright (c) 2008-2019 OpenShot Studios, LLC
//
// SPDX-License-Identifier: LGPL-3.0-or-later
#ifndef OPENSHOT_AUDIODEVICEINFO_H
#define OPENSHOT_AUDIODEVICEINFO_H
/**
* @brief This struct hold information about Audio Devices
*
* The type and name of the audio device.
*/
namespace openshot {
struct AudioDeviceInfo
{
std::string name;
std::string type;
};
}
#endif

41
src/AudioDevices.cpp Normal file
View File

@@ -0,0 +1,41 @@
/**
* @file
* @brief Utility methods for identifying audio devices
* @author Jonathan Thomas <jonathan@openshot.org>
* @author FeRD (Frank Dana) <ferdnyc@gmail.com>
*
* @ref License
*/
// Copyright (c) 2008-2019 OpenShot Studios, LLC
//
// SPDX-License-Identifier: LGPL-3.0-or-later
#include "AudioDevices.h"
#include <OpenShotAudio.h>
using namespace openshot;
using AudioDeviceList = std::vector<std::pair<std::string, std::string>>;
// Build a list of devices found, and return
AudioDeviceList AudioDevices::getNames() {
// A temporary device manager, used to scan device names.
// Its initialize() is never called, and devices are not opened.
std::unique_ptr<juce::AudioDeviceManager>
manager(new juce::AudioDeviceManager());
m_devices.clear();
auto &types = manager->getAvailableDeviceTypes();
for (auto* t : types) {
t->scanForDevices();
const auto names = t->getDeviceNames();
for (const auto& name : names) {
m_devices.emplace_back(
name.toStdString(), t->getTypeName().toStdString());
}
}
return m_devices;
}

47
src/AudioDevices.h Normal file
View File

@@ -0,0 +1,47 @@
/**
* @file
* @brief Header file for Audio Device Info struct
* @author Jonathan Thomas <jonathan@openshot.org>
*
* @ref License
*/
// Copyright (c) 2008-2019 OpenShot Studios, LLC
//
// SPDX-License-Identifier: LGPL-3.0-or-later
#ifndef OPENSHOT_AUDIODEVICEINFO_H
#define OPENSHOT_AUDIODEVICEINFO_H
#include <string>
#include <vector>
namespace openshot {
/**
* @brief This struct hold information about Audio Devices
*
* The type and name of the audio device.
*/
struct
AudioDeviceInfo {
std::string name;
std::string type;
};
using AudioDeviceList = std::vector<std::pair<std::string, std::string>>;
/// A class which probes the available audio devices
class AudioDevices
{
public:
AudioDevices() = default;
/// Return a vector of std::pair<> objects holding the
/// device name and type for each audio device detected
AudioDeviceList getNames();
private:
AudioDeviceList m_devices;
};
}
#endif

View File

@@ -19,19 +19,14 @@ using namespace std;
using namespace openshot;
// Constructor that reads samples from a reader
AudioReaderSource::AudioReaderSource(
ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size
) :
position(0),
size(buffer_size),
buffer(new juce::AudioBuffer<float>(audio_reader->info.channels, buffer_size)),
speed(1),
reader(audio_reader),
frame_number(starting_frame_number),
frame_position(0),
estimated_frame(0)
{
// Zero the buffer contents
AudioReaderSource::AudioReaderSource(ReaderBase *audio_reader, int64_t starting_frame_number, int buffer_size)
: reader(audio_reader), frame_number(starting_frame_number),
size(buffer_size), position(0), frame_position(0), estimated_frame(0), speed(1) {
// Initialize an audio buffer (based on reader)
buffer = new juce::AudioBuffer<float>(reader->info.channels, size);
// initialize the audio samples to zero (silence)
buffer->clear();
}
@@ -61,7 +56,7 @@ void AudioReaderSource::GetMoreSamplesFromReader()
estimated_frame = frame_number;
// Init new buffer
juce::AudioBuffer<float> *new_buffer = new juce::AudioSampleBuffer(reader->info.channels, size);
auto *new_buffer = new juce::AudioBuffer<float>(reader->info.channels, size);
new_buffer->clear();
// Move the remaining samples into new buffer (if any)
@@ -130,7 +125,7 @@ void AudioReaderSource::GetMoreSamplesFromReader()
}
// Reverse an audio buffer
juce::AudioBuffer<float>* AudioReaderSource::reverse_buffer(juce::AudioSampleBuffer* buffer)
juce::AudioBuffer<float>* AudioReaderSource::reverse_buffer(juce::AudioBuffer<float>* buffer)
{
int number_of_samples = buffer->getNumSamples();
int channels = buffer->getNumChannels();
@@ -139,7 +134,7 @@ juce::AudioBuffer<float>* AudioReaderSource::reverse_buffer(juce::AudioSampleBuf
ZmqLogger::Instance()->AppendDebugMethod("AudioReaderSource::reverse_buffer", "number_of_samples", number_of_samples, "channels", channels);
// Reverse array (create new buffer to hold the reversed version)
juce::AudioBuffer<float> *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
auto *reversed = new juce::AudioBuffer<float>(channels, number_of_samples);
reversed->clear();
for (int channel = 0; channel < channels; channel++)
@@ -229,7 +224,7 @@ void AudioReaderSource::getNextAudioBlock(const juce::AudioSourceChannelInfo& in
}
// Prepare to play this audio source
void AudioReaderSource::prepareToPlay(int, double) { }
void AudioReaderSource::prepareToPlay(int, double) {}
// Release all resources
void AudioReaderSource::releaseResources() { }

View File

@@ -47,7 +47,7 @@ namespace openshot
void GetMoreSamplesFromReader();
/// Reverse an audio buffer (for backwards audio)
juce::AudioBuffer<float>* reverse_buffer(juce::AudioSampleBuffer* buffer);
juce::AudioBuffer<float>* reverse_buffer(juce::AudioBuffer<float>* buffer);
public:

View File

@@ -45,6 +45,7 @@ add_feature_info("IWYU (include-what-you-use)" ENABLE_IWYU "Scan all source file
# Main library sources
set(OPENSHOT_SOURCES
AudioBufferSource.cpp
AudioDevices.cpp
AudioReaderSource.cpp
AudioResampler.cpp
CacheBase.cpp

View File

@@ -461,7 +461,7 @@ void Clip::reverse_buffer(juce::AudioBuffer<float>* buffer)
int channels = buffer->getNumChannels();
// Reverse array (create new buffer to hold the reversed version)
juce::AudioBuffer<float> *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
auto *reversed = new juce::AudioBuffer<float>(channels, number_of_samples);
reversed->clear();
for (int channel = 0; channel < channels; channel++)
@@ -479,7 +479,7 @@ void Clip::reverse_buffer(juce::AudioBuffer<float>* buffer)
buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
delete reversed;
reversed = NULL;
reversed = nullptr;
}
// Adjust the audio and image of a time mapped frame
@@ -496,7 +496,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
// create buffer and resampler
juce::AudioBuffer<float> *samples = NULL;
juce::AudioBuffer<float> *samples = nullptr;
if (!resampler)
resampler = new AudioResampler();
@@ -516,7 +516,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
if (time.GetRepeatFraction(frame_number).den > 1) {
// SLOWING DOWN AUDIO
// Resample data, and return new buffer pointer
juce::AudioBuffer<float> *resampled_buffer = NULL;
juce::AudioBuffer<float> *resampled_buffer = nullptr;
// SLOW DOWN audio (split audio)
samples = new juce::AudioBuffer<float>(channels, number_of_samples);
@@ -548,7 +548,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
number_of_samples, 1.0f);
// Clean up
resampled_buffer = NULL;
resampled_buffer = nullptr;
}
else if (abs(delta) > 1 && abs(delta) < 100) {
@@ -571,8 +571,8 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
delta_frame <= new_frame_number; delta_frame++) {
// buffer to hold detal samples
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
juce::AudioBuffer<float> *delta_samples = new juce::AudioSampleBuffer(channels,
number_of_delta_samples);
auto *delta_samples = new juce::AudioBuffer<float>(channels,
number_of_delta_samples);
delta_samples->clear();
for (int channel = 0; channel < channels; channel++)
@@ -591,7 +591,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
// Clean up
delete delta_samples;
delta_samples = NULL;
delta_samples = nullptr;
// Increment start position
start += number_of_delta_samples;
@@ -615,8 +615,8 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
delta_frame >= new_frame_number; delta_frame--) {
// buffer to hold delta samples
int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
juce::AudioBuffer<float> *delta_samples = new juce::AudioSampleBuffer(channels,
number_of_delta_samples);
auto *delta_samples = new juce::AudioBuffer<float>(channels,
number_of_delta_samples);
delta_samples->clear();
for (int channel = 0; channel < channels; channel++)
@@ -654,7 +654,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
// Clean up
buffer = NULL;
buffer = nullptr;
}
else {
// Use the samples on this frame (but maybe reverse them if needed)
@@ -678,7 +678,7 @@ void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_num
}
delete samples;
samples = NULL;
samples = nullptr;
}
}
}

View File

@@ -14,6 +14,12 @@
#include "AudioPlaybackThread.h"
#include "Settings.h"
#include "../ReaderBase.h"
#include "../RendererBase.h"
#include "../AudioReaderSource.h"
#include "../AudioDevices.h"
#include "../Settings.h"
#include <thread> // for std::this_thread::sleep_for
#include <chrono> // for std::chrono::milliseconds
@@ -31,39 +37,46 @@ namespace openshot
if (!m_pInstance) {
// Create the actual instance of device manager only once
m_pInstance = new AudioDeviceManagerSingleton;
auto* mgr = &m_pInstance->audioDeviceManager;
// Get preferred audio device name (if any)
juce::String preferred_audio_device = juce::String(Settings::Instance()->PLAYBACK_AUDIO_DEVICE_NAME.c_str());
// Get preferred audio device name and type (if any)
auto selected_device = juce::String(
Settings::Instance()->PLAYBACK_AUDIO_DEVICE_NAME);
auto selected_type = juce::String(
Settings::Instance()->PLAYBACK_AUDIO_DEVICE_TYPE);
if (selected_type.isEmpty() && !selected_device.isEmpty()) {
// Look up type for the selected device
for (const auto t : mgr->getAvailableDeviceTypes()) {
for (const auto n : t->getDeviceNames()) {
if (selected_device.trim().equalsIgnoreCase(n.trim())) {
selected_type = t->getTypeName();
break;
}
}
if(!selected_type.isEmpty())
break;
}
}
if (!selected_type.isEmpty())
m_pInstance->audioDeviceManager.setCurrentAudioDeviceType(selected_type, true);
// Initialize audio device only 1 time
juce::String audio_error = m_pInstance->audioDeviceManager.initialise (
0, /* number of input channels */
2, /* number of output channels */
0, /* no XML settings.. */
true, /* select default device on failure */
preferred_audio_device /* preferredDefaultDeviceName */);
0, // number of input channels
2, // number of output channels
nullptr, // no XML settings..
true, // select default device on failure
selected_device // preferredDefaultDeviceName
);
// Persist any errors detected
if (audio_error.isNotEmpty()) {
m_pInstance->initialise_error = audio_error.toRawUTF8();
m_pInstance->initialise_error = audio_error.toStdString();
} else {
m_pInstance->initialise_error = "";
}
// Get all audio device names
for (int i = 0; i < m_pInstance->audioDeviceManager.getAvailableDeviceTypes().size(); ++i)
{
const AudioIODeviceType* t = m_pInstance->audioDeviceManager.getAvailableDeviceTypes()[i];
const juce::StringArray deviceNames = t->getDeviceNames ();
for (int j = 0; j < deviceNames.size (); ++j )
{
juce::String deviceName = deviceNames[j];
juce::String typeName = t->getTypeName();
openshot::AudioDeviceInfo deviceInfo = {deviceName.toRawUTF8(), typeName.toRawUTF8()};
m_pInstance->audio_device_names.push_back(deviceInfo);
}
}
}
return m_pInstance;

View File

@@ -17,8 +17,10 @@
#include "ReaderBase.h"
#include "RendererBase.h"
#include "AudioReaderSource.h"
#include "AudioDeviceInfo.h"
#include "AudioDevices.h"
#include "AudioReaderSource.h"
#include <OpenShotAudio.h>
#include <AppConfig.h>
#include <juce_audio_basics/juce_audio_basics.h>
#include <juce_audio_devices/juce_audio_devices.h>
@@ -26,24 +28,27 @@
namespace openshot
{
/**
* @brief Singleton wrapper for AudioDeviceManager (to prevent multiple instances).
*/
class AudioDeviceManagerSingleton {
private:
// Forward decls
class ReaderBase;
class Frame;
class PlayerPrivate;
class QtPlayer;
/**
* @brief Singleton wrapper for AudioDeviceManager (to prevent multiple instances).
*/
class AudioDeviceManagerSingleton {
private:
/// Default constructor (Don't allow user to create an instance of this singleton)
AudioDeviceManagerSingleton(){ initialise_error=""; };
/// Private variable to keep track of singleton instance
static AudioDeviceManagerSingleton * m_pInstance;
public:
public:
/// Error found during JUCE initialise method
std::string initialise_error;
/// List of valid audio device names
std::vector<openshot::AudioDeviceInfo> audio_device_names;
/// Override with no channels and no preferred audio device
static AudioDeviceManagerSingleton * Instance();
@@ -102,22 +107,23 @@ namespace openshot
/// Get Speed (The speed and direction to playback a reader (1=normal, 2=fast, 3=faster, -1=rewind, etc...)
int getSpeed() const { if (source) return source->getSpeed(); else return 1; }
/// Get Audio Error (if any)
std::string getError()
{
return AudioDeviceManagerSingleton::Instance()->initialise_error;
}
/// Get Audio Error (if any)
std::string getError()
{
return AudioDeviceManagerSingleton::Instance()->initialise_error;
}
/// Get Audio Device Names (if any)
std::vector<openshot::AudioDeviceInfo> getAudioDeviceNames()
{
return AudioDeviceManagerSingleton::Instance()->audio_device_names;
};
/// Get Audio Device Names (if any)
std::vector<std::pair<std::string, std::string>> getAudioDeviceNames()
{
AudioDevices devs;
return devs.getNames();
};
friend class PlayerPrivate;
friend class QtPlayer;
};
};
}
} // namespace openshot
#endif // OPENSHOT_AUDIO_PLAYBACK_THREAD_H

View File

@@ -26,7 +26,7 @@ namespace openshot
, audioPlayback(new openshot::AudioPlaybackThread())
, videoPlayback(new openshot::VideoPlaybackThread(rb))
, videoCache(new openshot::VideoCacheThread())
, speed(1), reader(NULL), last_video_position(1), max_sleep_ms(3000)
, speed(1), reader(NULL), last_video_position(1), max_sleep_ms(125000)
{ }
// Destructor
@@ -56,13 +56,13 @@ namespace openshot
using std::chrono::duration_cast;
// Types for storing time durations in whole and fractional milliseconds
using ms = std::chrono::milliseconds;
using double_ms = std::chrono::duration<double, ms::period>;
// Calculate on-screen time for a single frame in milliseconds
const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble());
using micro_sec = std::chrono::microseconds;
using double_micro_sec = std::chrono::duration<double, micro_sec::period>;
while (!threadShouldExit()) {
// Calculate on-screen time for a single frame in milliseconds
const auto frame_duration = double_micro_sec(1000000.0 / reader->info.fps.ToDouble());
// Get the start time (to track how long a frame takes to render)
const auto time1 = std::chrono::high_resolution_clock::now();
@@ -101,16 +101,16 @@ namespace openshot
const auto time2 = std::chrono::high_resolution_clock::now();
// Determine how many milliseconds it took to render the frame
const auto render_time = double_ms(time2 - time1);
const auto render_time = double_micro_sec(time2 - time1);
// Calculate the amount of time to sleep (by subtracting the render time)
auto sleep_time = duration_cast<ms>(frame_duration - render_time);
auto sleep_time = duration_cast<micro_sec>(frame_duration - render_time);
// Debug
ZmqLogger::Instance()->AppendDebugMethod("PlayerPrivate::run (determine sleep)", "video_frame_diff", video_frame_diff, "video_position", video_position, "audio_position", audio_position, "speed", speed, "render_time(ms)", render_time.count(), "sleep_time(ms)", sleep_time.count());
// Adjust drift (if more than a few frames off between audio and video)
if (video_frame_diff > 0 && reader->info.has_audio && reader->info.has_video) {
if (video_frame_diff > 6 && reader->info.has_audio && reader->info.has_video) {
// Since the audio and video threads are running independently,
// they will quickly get out of sync. To fix this, we calculate
// how far ahead or behind the video frame is, and adjust the amount
@@ -118,12 +118,20 @@ namespace openshot
// If a frame is ahead of the audio, we sleep for longer.
// If a frame is behind the audio, we sleep less (or not at all),
// in order for the video to catch up.
sleep_time += duration_cast<ms>(video_frame_diff * frame_duration);
sleep_time += duration_cast<micro_sec>((video_frame_diff / 2) * frame_duration);
}
else if (video_frame_diff < -10 && reader->info.has_audio && reader->info.has_video) {
// Skip frame(s) to catch up to the audio (if more than 10 frames behind)
video_position += std::fabs(video_frame_diff) / 2; // Seek forward 1/2 the difference
else if (video_frame_diff < -3 && reader->info.has_audio && reader->info.has_video) {
// Video frames are a bit behind, sleep less, we need to display frames more quickly
sleep_time = duration_cast<micro_sec>(sleep_time * 0.75); // Sleep a little less
}
else if (video_frame_diff < -9 && reader->info.has_audio && reader->info.has_video) {
// Video frames are very behind, no sleep, we need to display frames more quickly
sleep_time = sleep_time.zero(); // Don't sleep now... immediately go to next position
}
else if (video_frame_diff < -12 && reader->info.has_audio && reader->info.has_video) {
// Video frames are very behind, jump forward the entire distance (catch up with the audio position)
// Skip frame(s) to catch up to the audio
video_position += std::fabs(video_frame_diff);
sleep_time = sleep_time.zero(); // Don't sleep now... immediately go to next position
}

View File

@@ -77,10 +77,9 @@ namespace openshot
using ms = std::chrono::milliseconds;
using double_ms = std::chrono::duration<double, ms::period>;
// Calculate on-screen time for a single frame in milliseconds
const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble());
while (!threadShouldExit() && is_playing) {
// Calculate on-screen time for a single frame in milliseconds
const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble());
// Cache frames before the other threads need them
// Cache frames up to the max frames. Reset to current position

View File

@@ -11,15 +11,20 @@
//
// SPDX-License-Identifier: LGPL-3.0-or-later
#include "QtPlayer.h"
#include "AudioDevices.h"
#include "Clip.h"
#include "FFmpegReader.h"
#include "Timeline.h"
#include "QtPlayer.h"
#include "Qt/PlayerPrivate.h"
#include "Qt/VideoRenderer.h"
namespace openshot
{
using AudioDeviceList = std::vector<std::pair<std::string, std::string>>;
// Delegating constructor
QtPlayer::QtPlayer()
: QtPlayer::QtPlayer(new VideoRenderer())
@@ -59,12 +64,9 @@ namespace openshot
}
/// Get Audio Devices from JUCE
std::vector<openshot::AudioDeviceInfo> QtPlayer::GetAudioDeviceNames() {
if (reader && threads_started) {
return p->audioPlayback->getAudioDeviceNames();
} else {
return std::vector<openshot::AudioDeviceInfo>();
}
AudioDeviceList QtPlayer::GetAudioDeviceNames() {
AudioDevices devs;
return devs.getNames();
}
void QtPlayer::SetSource(const std::string &source)
@@ -219,4 +221,4 @@ namespace openshot
void QtPlayer::Volume(float new_volume) {
volume = new_volume;
}
}
}

View File

@@ -16,12 +16,15 @@
#include <iostream>
#include <vector>
#include "PlayerBase.h"
#include "Qt/PlayerPrivate.h"
#include "RendererBase.h"
namespace openshot
{
using AudioDeviceList = std::vector<std::pair<std::string, std::string>>;
/**
* @brief This class is used to playback a video from a reader.
*
@@ -46,7 +49,7 @@ namespace openshot
std::string GetError();
/// Get Audio Devices from JUCE
std::vector<openshot::AudioDeviceInfo> GetAudioDeviceNames();
AudioDeviceList GetAudioDeviceNames();
/// Play the video
void Play();

View File

@@ -34,6 +34,7 @@ Settings *Settings::Instance()
m_pInstance->HW_DE_DEVICE_SET = 0;
m_pInstance->HW_EN_DEVICE_SET = 0;
m_pInstance->PLAYBACK_AUDIO_DEVICE_NAME = "";
m_pInstance->PLAYBACK_AUDIO_DEVICE_TYPE = "";
m_pInstance->DEBUG_TO_STDERR = false;
auto env_debug = std::getenv("LIBOPENSHOT_DEBUG");
if (env_debug != nullptr)

View File

@@ -85,6 +85,9 @@ namespace openshot {
/// The audio device name to use during playback
std::string PLAYBACK_AUDIO_DEVICE_NAME = "";
/// The device type for the playback audio devices
std::string PLAYBACK_AUDIO_DEVICE_TYPE = "";
/// The current install path of OpenShot (needs to be set when using Timeline(path), since certain
/// paths depend on the location of OpenShot transitions and files)
std::string PATH_OPENSHOT_INSTALL = "";

View File

@@ -29,7 +29,7 @@ void STFT::process(juce::AudioBuffer<float> &block)
current_output_buffer_write_position = output_buffer_write_position;
current_output_buffer_read_position = output_buffer_read_position;
current_samples_since_last_FFT = samples_since_last_FFT;
for (int sample = 0; sample < num_samples; ++sample) {
const float input_sample = channel_data[sample];
@@ -82,7 +82,7 @@ void STFT::updateFftSize(const int new_fft_size)
frequency_domain_buffer.realloc(fft_size);
frequency_domain_buffer.clear(fft_size);
input_buffer_write_position = 0;
output_buffer_write_position = 0;
output_buffer_read_position = 0;
@@ -130,7 +130,7 @@ void STFT::updateWindow(const int new_window_type)
break;
}
}
float window_sum = 0.0f;
for (int sample = 0; sample < fft_size; ++sample)
window_sum += fft_window[sample];
@@ -189,4 +189,4 @@ void STFT::synthesis(const int channel)
current_output_buffer_write_position += hop_size;
if (current_output_buffer_write_position >= output_buffer_length)
current_output_buffer_write_position = 0;
}
}