Update documentation and examples

This commit is contained in:
Jonathan Thomas
2015-02-05 00:11:55 -06:00
parent 7e8ab6f5bf
commit e5b272294f
8 changed files with 160 additions and 40 deletions

View File

@@ -61,19 +61,24 @@ namespace openshot
* computing environment, without needing to share the entire video file. They also allow a
* chunk to be frame accurate, since seeking inaccuracies are removed.
*
* \code
* @code
* // This example demonstrates how to feed a reader into a ChunkWriter
* FFmpegReader *r = new FFmpegReader("MyAwesomeVideo.mp4"); // Get a reader
* r.Open(); // Open the reader
*
* // Create a ChunkWriter (and a folder location on your computer)
* ChunkWriter w("/folder_path_to_hold_chunks/", r);
*
* // Open the writer
* w.Open();
*
* // Write a block of frames to the ChunkWriter (from frame 1 to the end)
* w.WriteFrame(r, 1, r->info.video_length);
*
* // Close the ChunkWriter
* // Close the reader & writer
* w.Close();
* \endcode
* r.Close();
* @endcode
*/
class ChunkWriter : public WriterBase
{
@@ -82,6 +87,7 @@ namespace openshot
int chunk_count;
int chunk_size;
int frame_count;
bool is_open;
bool is_writing;
ReaderBase *local_reader;
FFmpegWriter *writer_thumb;
@@ -118,24 +124,30 @@ namespace openshot
/// Get the chunk size (number of frames to write in each chunk)
int GetChunkSize() { return chunk_size; };
/// Determine if writer is open or closed
bool IsOpen() { return is_open; };
/// Open writer
void Open() throw(InvalidFile, InvalidCodec);
/// @brief Set the chunk size (number of frames to write in each chunk)
/// @param new_size The number of frames to write in this chunk file
void SetChunkSize(int new_size) { chunk_size = new_size; };
/// @brief Add a frame to the stack waiting to be encoded.
/// @param frame The openshot::Frame object that needs to be written to this chunk file.
void WriteFrame(tr1::shared_ptr<Frame> frame);
void WriteFrame(tr1::shared_ptr<Frame> frame) throw(WriterClosed);
/// @brief Write a block of frames from a reader
/// @param start The starting frame number to write (of the reader passed into the constructor)
/// @param length The number of frames to write (of the reader passed into the constructor)
void WriteFrame(int start, int length);
void WriteFrame(int start, int length) throw(WriterClosed);
/// @brief Write a block of frames from a reader
/// @param reader The reader containing the frames you need
/// @param start The starting frame number to write
/// @param length The number of frames to write
void WriteFrame(ReaderBase* reader, int start, int length);
void WriteFrame(ReaderBase* reader, int start, int length) throw(WriterClosed);
};

View File

@@ -143,7 +143,7 @@ namespace openshot {
void Open() throw(InvalidFile, ReaderClosed);
/// @brief Set the current reader
/// @param reader The reader to be used by this clip
/// @param new_reader The reader to be used by this clip
void Reader(ReaderBase* new_reader);
/// Get the current reader

View File

@@ -70,21 +70,21 @@ namespace openshot {
Coordinate(float x, float y);
/// @brief Set the repeating Fraction (used internally on the timeline, to track changes to coordinates)
/// @param repeated The fraction representing how many times this coordinate Y value repeats (only used on the timeline)
/// @param is_repeated The fraction representing how many times this coordinate Y value repeats (only used on the timeline)
void Repeat(Fraction is_repeated) { repeated=is_repeated; }
/// Get the repeating Fraction (used internally on the timeline, to track changes to coordinates)
Fraction Repeat() { return repeated; }
/// @brief Set the increasing flag (used internally on the timeline, to track changes to coordinates)
/// @param increasing Indicates if this coorindate Y value is increasing (when compared to the previous coordinate)
/// @param is_increasing Indicates if this coorindate Y value is increasing (when compared to the previous coordinate)
void IsIncreasing(bool is_increasing) { increasing = is_increasing; }
/// Get the increasing flag (used internally on the timeline, to track changes to coordinates)
bool IsIncreasing() { return increasing; }
/// @brief Set the delta / difference between previous coordinate value (used internally on the timeline, to track changes to coordinates)
/// @param delta Indicates how much this Y value differs from the previous Y value
/// @param new_delta Indicates how much this Y value differs from the previous Y value
void Delta(float new_delta) { delta=new_delta; }
/// Get the delta / difference between previous coordinate value (used internally on the timeline, to track changes to coordinates)

View File

@@ -260,6 +260,15 @@ namespace openshot {
virtual ~TooManySeeks() throw () {}
};
/// Exception when a writer is closed, and a frame is requested
class WriterClosed : public BaseException
{
public:
string file_path;
WriterClosed(string message, string file_path)
: BaseException(message), file_path(file_path) { }
virtual ~WriterClosed() throw () {}
};
}
#endif

View File

@@ -44,6 +44,7 @@
#include <unistd.h>
#include "Magick++.h"
#include "JuceLibraryCode/JuceHeader.h"
#include "ChannelLayouts.h"
#include "AudioBufferSource.h"
#include "AudioResampler.h"
#include "Fraction.h"
@@ -115,8 +116,10 @@ namespace openshot
tr1::shared_ptr<juce::AudioSampleBuffer> audio;
Fraction pixel_ratio;
int channels;
ChannelLayout channel_layout;
int width;
int height;
int sample_rate;
public:
int number; ///< This is the frame number (starting at 1)
@@ -175,6 +178,13 @@ namespace openshot
/// Experimental method to add the frame number on top of the image
void AddOverlayNumber(int overlay_number);
/// Channel Layout of audio samples. A frame needs to keep track of this, since Writers do not always
/// know the original channel layout of a frame's audio samples (i.e. mono, stereo, 5 point surround, etc...)
ChannelLayout ChannelsLayout();
// Set the channel layout of audio samples (i.e. mono, stereo, 5 point surround, etc...)
void ChannelsLayout(ChannelLayout new_channel_layout) { channel_layout = new_channel_layout; };
/// Clear the waveform image (and deallocate it's memory)
void ClearWaveform();
@@ -191,7 +201,10 @@ namespace openshot
float* GetAudioSamples(int channel);
/// Get an array of sample data (all channels interleaved together), using any sample rate
float* GetInterleavedAudioSamples(int original_sample_rate, int new_sample_rate, AudioResampler* resampler, int* sample_count);
float* GetInterleavedAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count);
// Get a planar array of sample data, using any sample rate
float* GetPlanarAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count);
/// Get number of audio channels
int GetAudioChannelsCount();
@@ -207,6 +220,9 @@ namespace openshot
/// Get pointer to Magick++ image object
tr1::shared_ptr<Magick::Image> GetImage();
/// Set Pixel Aspect Ratio
Fraction GetPixelRatio() { return pixel_ratio; };
/// Get pixel data (as packets)
const Magick::PixelPacket* GetPixels();
@@ -234,6 +250,12 @@ namespace openshot
/// Rotate the image
void Rotate(float degrees);
/// Get the original sample rate of this frame's audio data
int SampleRate();
/// Set the original sample rate of this frame's audio data
void SampleRate(int orig_sample_rate) { sample_rate = orig_sample_rate; };
/// Save the frame image to the specified path. The image format is determined from the extension (i.e. image.PNG, image.JPEG)
void Save(string path, float scale);
@@ -252,7 +274,7 @@ namespace openshot
void TransparentColors(string color, double fuzz);
/// Play audio samples for this frame
void Play(int sample_rate);
void Play();
};
}

View File

@@ -31,7 +31,7 @@ using namespace openshot;
ChunkWriter::ChunkWriter(string path, ReaderBase *reader) throw (InvalidFile, InvalidFormat, InvalidCodec, InvalidOptions, OutOfMemory) :
local_reader(reader), path(path), chunk_size(24*3), chunk_count(1), frame_count(1), is_writing(false),
default_extension(".webm"), default_vcodec("libvpx"), default_acodec("libvorbis"), last_frame_needed(false)
default_extension(".webm"), default_vcodec("libvpx"), default_acodec("libvorbis"), last_frame_needed(false), is_open(false)
{
// Change codecs to default
info.vcodec = default_vcodec;
@@ -74,8 +74,12 @@ string ChunkWriter::get_chunk_path(int chunk_number, string folder, string exten
}
// Add a frame to the queue waiting to be encoded.
void ChunkWriter::WriteFrame(tr1::shared_ptr<Frame> frame)
void ChunkWriter::WriteFrame(tr1::shared_ptr<Frame> frame) throw(WriterClosed)
{
// Check for open reader (or throw exception)
if (!is_open)
throw WriterClosed("The ChunkWriter is closed. Call Open() before calling this method.", path);
// Check if currently writing chunks?
if (!is_writing)
{
@@ -85,19 +89,19 @@ void ChunkWriter::WriteFrame(tr1::shared_ptr<Frame> frame)
// Create FFmpegWriter (FINAL quality)
create_folder(get_chunk_path(chunk_count, "final", ""));
writer_final = new FFmpegWriter(get_chunk_path(chunk_count, "final", default_extension));
writer_final->SetAudioOptions(true, default_acodec, info.sample_rate, info.channels, 128000);
writer_final->SetAudioOptions(true, default_acodec, info.sample_rate, info.channels, info.channel_layout, 128000);
writer_final->SetVideoOptions(true, default_vcodec, info.fps, info.width, info.height, info.pixel_ratio, false, false, info.video_bit_rate);
// Create FFmpegWriter (PREVIEW quality)
create_folder(get_chunk_path(chunk_count, "preview", ""));
writer_preview = new FFmpegWriter(get_chunk_path(chunk_count, "preview", default_extension));
writer_preview->SetAudioOptions(true, default_acodec, info.sample_rate, info.channels, 128000);
writer_preview->SetAudioOptions(true, default_acodec, info.sample_rate, info.channels, info.channel_layout, 128000);
writer_preview->SetVideoOptions(true, default_vcodec, info.fps, info.width * 0.5, info.height * 0.5, info.pixel_ratio, false, false, info.video_bit_rate * 0.5);
// Create FFmpegWriter (LOW quality)
create_folder(get_chunk_path(chunk_count, "thumb", ""));
writer_thumb = new FFmpegWriter(get_chunk_path(chunk_count, "thumb", default_extension));
writer_thumb->SetAudioOptions(true, default_acodec, info.sample_rate, info.channels, 128000);
writer_thumb->SetAudioOptions(true, default_acodec, info.sample_rate, info.channels, info.channel_layout, 128000);
writer_thumb->SetVideoOptions(true, default_vcodec, info.fps, info.width * 0.25, info.height * 0.25, info.pixel_ratio, false, false, info.video_bit_rate * 0.25);
// Prepare Streams
@@ -189,7 +193,7 @@ void ChunkWriter::WriteFrame(tr1::shared_ptr<Frame> frame)
// Write a block of frames from a reader
void ChunkWriter::WriteFrame(ReaderBase* reader, int start, int length)
void ChunkWriter::WriteFrame(ReaderBase* reader, int start, int length) throw(WriterClosed)
{
// Loop through each frame (and encoded it)
for (int number = start; number <= length; number++)
@@ -203,7 +207,7 @@ void ChunkWriter::WriteFrame(ReaderBase* reader, int start, int length)
}
// Write a block of frames from the local cached reader
void ChunkWriter::WriteFrame(int start, int length)
void ChunkWriter::WriteFrame(int start, int length) throw(WriterClosed)
{
// Loop through each frame (and encoded it)
for (int number = start; number <= length; number++)
@@ -252,6 +256,9 @@ void ChunkWriter::Close()
is_writing = false;
}
// close writer
is_open = false;
// Reset frame counters
chunk_count = 0;
frame_count = 0;
@@ -288,4 +295,10 @@ bool ChunkWriter::is_chunk_valid()
return true;
}
// Open the writer
void ChunkWriter::Open() throw(InvalidFile, InvalidCodec)
{
is_open = true;
}

View File

@@ -265,7 +265,7 @@ void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
{
int n=0;
for (int s = number_of_samples - 1; s >= 0; s--, n++)
reversed->getSampleData(channel)[n] = buffer->getSampleData(channel)[s];
reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
}
// Copy the samples back to the original array
@@ -273,7 +273,7 @@ void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
// Loop through channels, and get audio samples
for (int channel = 0; channel < channels; channel++)
// Get the audio samples for this channel
buffer->addFrom(channel, 0, reversed->getSampleData(channel), number_of_samples, 1.0f);
buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
delete reversed;
reversed = NULL;
@@ -349,7 +349,7 @@ tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame,
audio_cache->clear();
for (int channel = 0; channel < channels; channel++)
// Get the audio samples for this channel
audio_cache->addFrom(channel, 0, buffer->getSampleData(channel), buffer->getNumSamples(), 1.0f);
audio_cache->addFrom(channel, 0, buffer->getReadPointer(channel), buffer->getNumSamples(), 1.0f);
}
// Get the length of the resampled buffer
@@ -361,7 +361,7 @@ tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame,
start -= 1;
for (int channel = 0; channel < channels; channel++)
// Add new (slower) samples, to the frame object
new_frame->AddAudio(true, channel, 0, audio_cache->getSampleData(channel, start), number_of_samples, 1.0f);
new_frame->AddAudio(true, channel, 0, audio_cache->getReadPointer(channel, start), number_of_samples, 1.0f);
// Clean up if the final section
if (time.GetRepeatFraction(frame_number).num == time.GetRepeatFraction(frame_number).den)
@@ -409,7 +409,7 @@ tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame,
// Copy the samples to
for (int channel = 0; channel < channels; channel++)
// Get the audio samples for this channel
samples->addFrom(channel, start, delta_samples->getSampleData(channel), number_of_delta_samples, 1.0f);
samples->addFrom(channel, start, delta_samples->getReadPointer(channel), number_of_delta_samples, 1.0f);
// Clean up
delete delta_samples;
@@ -439,7 +439,7 @@ tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame,
// Copy the samples to
for (int channel = 0; channel < channels; channel++)
// Get the audio samples for this channel
samples->addFrom(channel, start, delta_samples->getSampleData(channel), number_of_delta_samples, 1.0f);
samples->addFrom(channel, start, delta_samples->getReadPointer(channel), number_of_delta_samples, 1.0f);
// Clean up
delete delta_samples;
@@ -460,7 +460,7 @@ tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame,
// Add the newly resized audio samples to the current frame
for (int channel = 0; channel < channels; channel++)
// Add new (slower) samples, to the frame object
new_frame->AddAudio(true, channel, 0, buffer->getSampleData(channel), number_of_samples, 1.0f);
new_frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
// Clean up
buffer = NULL;
@@ -482,7 +482,7 @@ tr1::shared_ptr<Frame> Clip::get_time_mapped_frame(tr1::shared_ptr<Frame> frame,
// Add reversed samples to the frame object
for (int channel = 0; channel < channels; channel++)
new_frame->AddAudio(true, channel, 0, samples->getSampleData(channel), number_of_samples, 1.0f);
new_frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
}

View File

@@ -31,7 +31,8 @@ using namespace std;
using namespace openshot;
// Constructor - blank frame (300x200 blank image, 48kHz audio silence)
Frame::Frame() : number(1), pixel_ratio(1,1), channels(2), width(1), height(1)
Frame::Frame() : number(1), pixel_ratio(1,1), channels(2), width(1), height(1),
channel_layout(LAYOUT_STEREO), sample_rate(44100)
{
// Init the image magic and audio buffer
image = tr1::shared_ptr<Magick::Image>(new Magick::Image(Magick::Geometry(1,1), Magick::Color("red")));
@@ -43,7 +44,8 @@ Frame::Frame() : number(1), pixel_ratio(1,1), channels(2), width(1), height(1)
// Constructor - image only (48kHz audio silence)
Frame::Frame(int number, int width, int height, string color)
: number(number), pixel_ratio(1,1), channels(2), width(width), height(height)
: number(number), pixel_ratio(1,1), channels(2), width(width), height(height),
channel_layout(LAYOUT_STEREO), sample_rate(44100)
{
// Init the image magic and audio buffer
image = tr1::shared_ptr<Magick::Image>(new Magick::Image(Magick::Geometry(1, 1), Magick::Color(color)));
@@ -55,7 +57,8 @@ Frame::Frame(int number, int width, int height, string color)
// Constructor - image only from pixel array (48kHz audio silence)
Frame::Frame(int number, int width, int height, const string map, const Magick::StorageType type, const void *pixels)
: number(number), pixel_ratio(1,1), channels(2), width(width), height(height)
: number(number), pixel_ratio(1,1), channels(2), width(width), height(height),
channel_layout(LAYOUT_STEREO), sample_rate(44100)
{
// Init the image magic and audio buffer
image = tr1::shared_ptr<Magick::Image>(new Magick::Image(width, height, map, type, pixels));
@@ -67,7 +70,8 @@ Frame::Frame(int number, int width, int height, const string map, const Magick::
// Constructor - audio only (300x200 blank image)
Frame::Frame(int number, int samples, int channels) :
number(number), pixel_ratio(1,1), channels(channels), width(1), height(1)
number(number), pixel_ratio(1,1), channels(channels), width(1), height(1),
channel_layout(LAYOUT_STEREO), sample_rate(44100)
{
// Init the image magic and audio buffer
image = tr1::shared_ptr<Magick::Image>(new Magick::Image(Magick::Geometry(1, 1), Magick::Color("white")));
@@ -79,7 +83,8 @@ Frame::Frame(int number, int samples, int channels) :
// Constructor - image & audio
Frame::Frame(int number, int width, int height, string color, int samples, int channels)
: number(number), pixel_ratio(1,1), channels(channels), width(width), height(height)
: number(number), pixel_ratio(1,1), channels(channels), width(width), height(height),
channel_layout(LAYOUT_STEREO), sample_rate(44100)
{
// Init the image magic and audio buffer
image = tr1::shared_ptr<Magick::Image>(new Magick::Image(Magick::Geometry(1, 1), Magick::Color(color)));
@@ -117,6 +122,7 @@ void Frame::DeepCopy(const Frame& other)
audio = tr1::shared_ptr<juce::AudioSampleBuffer>(new juce::AudioSampleBuffer(*(other.audio)));
pixel_ratio = Fraction(other.pixel_ratio.num, other.pixel_ratio.den);
channels = other.channels;
channel_layout = other.channel_layout;
if (other.wave_image)
wave_image = tr1::shared_ptr<Magick::Image>(new Magick::Image(*(other.wave_image)));
@@ -197,7 +203,7 @@ tr1::shared_ptr<Magick::Image> Frame::GetWaveform(int width, int height, int Red
lines.push_back(Magick::DrawableStrokeWidth(1));
// Get audio for this channel
float *samples = audio->getSampleData(channel);
const float *samples = audio->getReadPointer(channel);
for (int sample = 0; sample < audio->getNumSamples(); sample+=step, X++)
{
@@ -296,11 +302,11 @@ void Frame::DisplayWaveform()
float* Frame::GetAudioSamples(int channel)
{
// return JUCE audio data for this channel
return audio->getSampleData(channel);
return audio->getWritePointer(channel);
}
// Get an array of sample data (all channels interleaved together), using any sample rate
float* Frame::GetInterleavedAudioSamples(int original_sample_rate, int new_sample_rate, AudioResampler* resampler, int* sample_count)
// Get a planar array of sample data, using any sample rate
float* Frame::GetPlanarAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count)
{
float *output = NULL;
AudioSampleBuffer *buffer(audio.get());
@@ -308,10 +314,56 @@ float* Frame::GetInterleavedAudioSamples(int original_sample_rate, int new_sampl
int num_of_samples = audio->getNumSamples();
// Resample to new sample rate (if needed)
if (new_sample_rate != original_sample_rate)
if (new_sample_rate != sample_rate)
{
// YES, RESAMPLE AUDIO
resampler->SetBuffer(audio.get(), original_sample_rate, new_sample_rate);
resampler->SetBuffer(audio.get(), sample_rate, new_sample_rate);
// Resample data, and return new buffer pointer
buffer = resampler->GetResampledBuffer();
// Update num_of_samples
num_of_samples = buffer->getNumSamples();
}
// INTERLEAVE all samples together (channel 1 + channel 2 + channel 1 + channel 2, etc...)
output = new float[num_of_channels * num_of_samples];
int position = 0;
// Loop through samples in each channel (combining them)
for (int channel = 0; channel < num_of_channels; channel++)
{
for (int sample = 0; sample < num_of_samples; sample++)
{
// Add sample to output array
output[position] = buffer->getReadPointer(channel)[sample];
// increment position
position++;
}
}
// Update sample count (since it might have changed due to resampling)
*sample_count = num_of_samples;
// return combined array
return output;
}
// Get an array of sample data (all channels interleaved together), using any sample rate
float* Frame::GetInterleavedAudioSamples(int new_sample_rate, AudioResampler* resampler, int* sample_count)
{
float *output = NULL;
AudioSampleBuffer *buffer(audio.get());
int num_of_channels = audio->getNumChannels();
int num_of_samples = audio->getNumSamples();
// Resample to new sample rate (if needed)
if (new_sample_rate != sample_rate)
{
// YES, RESAMPLE AUDIO
resampler->SetBuffer(audio.get(), sample_rate, new_sample_rate);
// Resample data, and return new buffer pointer
buffer = resampler->GetResampledBuffer();
@@ -330,7 +382,7 @@ float* Frame::GetInterleavedAudioSamples(int original_sample_rate, int new_sampl
for (int channel = 0; channel < num_of_channels; channel++)
{
// Add sample to output array
output[position] = buffer->getSampleData(channel)[sample];
output[position] = buffer->getReadPointer(channel)[sample];
// increment position
position++;
@@ -436,6 +488,18 @@ int Frame::GetWidth()
//return image->columns();
}
// Get the original sample rate of this frame's audio data
int Frame::SampleRate()
{
return sample_rate;
}
// Get the original sample rate of this frame's audio data
ChannelLayout Frame::ChannelsLayout()
{
return channel_layout;
}
// Make colors in a specific range transparent
void Frame::TransparentColors(string color, double fuzz)
{
@@ -819,7 +883,7 @@ tr1::shared_ptr<Magick::Image> Frame::GetImage()
}
// Play audio samples for this frame
void Frame::Play(int sample_rate)
void Frame::Play()
{
// Check if samples are present
if (!audio->getNumSamples())