diff --git a/include/FFmpegWriter.h b/include/FFmpegWriter.h
index be72d97b..201d7477 100644
--- a/include/FFmpegWriter.h
+++ b/include/FFmpegWriter.h
@@ -73,7 +73,8 @@ namespace openshot
* to generate openshot::Frame objects needed for writing. Be sure to use valid bit rates, frame
* rates, and sample rates (each format / codec has a limited # of valid options).
*
- * @code
+ * @code SIMPLE EXAMPLE
+ *
* // Create a reader for a video
* FFmpegReader r("MyAwesomeVideo.webm");
* r.Open(); // Open the reader
@@ -85,16 +86,54 @@ namespace openshot
* w.SetAudioOptions(true, "libvorbis", 44100, 2, 128000); // Sample Rate: 44100, Channels: 2, Bitrate: 128000
* w.SetVideoOptions(true, "libvpx", openshot::Fraction(24,1), 720, 480, openshot::Fraction(1,1), false, false, 300000); // FPS: 24, Size: 720x480, Pixel Ratio: 1/1, Bitrate: 300000
*
- * // Prepare Streams
- * w.PrepareStreams();
- *
- * // Write header
- * w.WriteHeader();
+ * // Open the writer
+ * w.Open();
*
* // Write all frames from the reader
* w.WriteFrame(&r, 1, r.info.video_length);
*
- * // Write Footer
+ * // Close the reader & writer
+ * w.Close();
+ * r.Close();
+ * @endcode
+ *
+ * Here is a more advanced example, which sets some additional (and optional) encoding
+ * options.
+ *
+ * @code ADVANCED WRITER EXAMPLE
+ *
+ * // Create a reader for a video
+ * FFmpegReader r("MyAwesomeVideo.webm");
+ * r.Open(); // Open the reader
+ *
+ * // Create a writer (which will create a WebM video)
+ * FFmpegWriter w("/home/jonathan/NewVideo.webm");
+ *
+ * // Set options
+ * w.SetAudioOptions(true, "libvorbis", 44100, 2, 128000); // Sample Rate: 44100, Channels: 2, Bitrate: 128000
+ * w.SetVideoOptions(true, "libvpx", openshot::Fraction(24,1), 720, 480, openshot::Fraction(1,1), false, false, 300000); // FPS: 24, Size: 720x480, Pixel Ratio: 1/1, Bitrate: 300000
+ *
+ * // Prepare Streams (Optional method that must be called before any SetOption calls)
+ * w.PrepareStreams();
+ *
+ * // Set some specific encoding options (Optional methods)
+ * w.SetOption(VIDEO_STREAM, "qmin", "2" );
+ * w.SetOption(VIDEO_STREAM, "qmax", "30" );
+ * w.SetOption(VIDEO_STREAM, "crf", "10" );
+ * w.SetOption(VIDEO_STREAM, "rc_min_rate", "2000000" );
+ * w.SetOption(VIDEO_STREAM, "rc_max_rate", "4000000" );
+ * w.SetOption(VIDEO_STREAM, "max_b_frames", "10" );
+ *
+ * // Write the header of the video file
+ * w.WriteHeader();
+ *
+ * // Open the writer
+ * w.Open();
+ *
+ * // Write all frames from the reader
+ * w.WriteFrame(&r, 1, r.info.video_length);
+ *
+ * // Write the trailer of the video file
* w.WriteTrailer();
*
* // Close the reader & writer
@@ -108,9 +147,14 @@ namespace openshot
string path;
int cache_size;
bool is_writing;
+ bool is_open;
int64 write_video_count;
int64 write_audio_count;
+ bool prepare_streams;
+ bool write_header;
+ bool write_trailer;
+
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *audio_st, *video_st;
@@ -130,6 +174,8 @@ namespace openshot
int initial_audio_input_frame_size;
int audio_input_position;
AudioResampler *resampler;
+ AVAudioResampleContext *avr;
+ AVAudioResampleContext *avr_planar;
/* Resample options */
int original_sample_rate;
@@ -174,6 +220,11 @@ namespace openshot
/// initialize streams
void initialize_streams();
+ /// @brief Init a collection of software rescalers (thread safe)
+ /// @param source_width The source width of the image scalers (used to cache a bunch of scalers)
+ /// @param source_height The source height of the image scalers (used to cache a bunch of scalers)
+ void InitScalers(int source_width, int source_height);
+
/// open audio codec
void open_audio(AVFormatContext *oc, AVStream *st);
@@ -194,7 +245,8 @@ namespace openshot
public:
- /// Constructor for FFmpegWriter. Throws one of the following exceptions.
+ /// @brief Constructor for FFmpegWriter. Throws one of the following exceptions.
+ /// @param path The file path of the video file you want to open and read
FFmpegWriter(string path) throw(InvalidFile, InvalidFormat, InvalidCodec, InvalidOptions, OutOfMemory);
/// Close the writer
@@ -203,44 +255,76 @@ namespace openshot
/// Get the cache size (number of frames to queue before writing)
int GetCacheSize() { return cache_size; };
- /// Init a collection of software rescalers (thread safe)
- void InitScalers(int source_width, int source_height);
+ /// Determine if writer is open or closed
+ bool IsOpen() { return is_open; };
+
+ /// Open writer
+ void Open() throw(InvalidFile, InvalidCodec);
/// Output the ffmpeg info about this format, streams, and codecs (i.e. dump format)
void OutputStreamInfo();
- /// Prepare & initialize streams and open codecs
+ /// @brief Prepare & initialize streams and open codecs. This method is called automatically
+ /// by the Open() method if this method has not yet been called.
void PrepareStreams();
/// Remove & deallocate all software scalers
void RemoveScalers();
- /// Set audio resample options
+ /// @brief Set audio resample options
+ /// @param sample_rate The number of samples per second of the audio
+ /// @param channels The number of audio channels
void ResampleAudio(int sample_rate, int channels);
- /// Set audio export options
- void SetAudioOptions(bool has_audio, string codec, int sample_rate, int channels, int bit_rate);
+ /// @brief Set audio export options
+ /// @param has_audio Does this file need an audio stream?
+ /// @param codec The codec used to encode the audio for this file
+ /// @param sample_rate The number of audio samples needed in this file
+ /// @param channels The number of audio channels needed in this file
+ /// @param channel_layout The 'layout' of audio channels (i.e. mono, stereo, surround, etc...)
+ /// @param bit_rate The audio bit rate used during encoding
+ void SetAudioOptions(bool has_audio, string codec, int sample_rate, int channels, ChannelLayout channel_layout, int bit_rate);
- /// Set the cache size (number of frames to queue before writing)
+ /// @brief Set the cache size
+ /// @param new_size The number of frames to queue before writing to the file
void SetCacheSize(int new_size) { cache_size = new_size; };
- /// Set video export options
+ /// @brief Set video export options
+ /// @param has_video Does this file need a video stream
+ /// @param codec The codec used to encode the images in this video
+ /// @param fps The number of frames per second
+ /// @param width The width in pixels of this video
+ /// @param height The height in pixels of this video
+ /// @param pixel_ratio The shape of the pixels represented as a openshot::Fraction (1x1 is most common / square pixels)
+ /// @param interlaced Does this video need to be interlaced?
+ /// @param top_field_first Which frame should be used as the top field?
+ /// @param bit_rate The video bit rate used during encoding
void SetVideoOptions(bool has_video, string codec, Fraction fps, int width, int height,
Fraction pixel_ratio, bool interlaced, bool top_field_first, int bit_rate);
- /// Set custom options (some codecs accept additional params)
- void SetOption(StreamType stream, string name, string value);
+ /// @brief Set custom options (some codecs accept additional params). This must be called after the
+ /// PrepareStreams() method, otherwise the streams have not been initialized yet.
+ /// @param stream The stream (openshot::StreamType) this option should apply to
+ /// @param name The name of the option you want to set (i.e. qmin, qmax, etc...)
+ /// @param value The new value of this option
+ void SetOption(StreamType stream, string name, string value) throw(NoStreamsFound, InvalidOptions);
- /// Write the file header (after the options are set)
+ /// @brief Write the file header (after the options are set). This method is called automatically
+ /// by the Open() method if this method has not yet been called.
void WriteHeader();
- /// Add a frame to the stack waiting to be encoded.
- void WriteFrame(tr1::shared_ptr frame);
+ /// @brief Add a frame to the stack waiting to be encoded.
+ /// @param frame The openshot::Frame object to write to this image
+ void WriteFrame(tr1::shared_ptr frame) throw(WriterClosed);
- /// Write a block of frames from a reader
- void WriteFrame(ReaderBase* reader, int start, int length);
+ /// @brief Write a block of frames from a reader
+ /// @param reader A openshot::ReaderBase object which will provide frames to be written
+ /// @param start The starting frame number of the reader
+ /// @param length The number of frames to write
+ void WriteFrame(ReaderBase* reader, int start, int length) throw(WriterClosed);
- /// Write the file trailer (after all frames are written)
+ /// @brief Write the file trailer (after all frames are written). This is called automatically
+ /// by the Close() method if this method has not yet been called.
void WriteTrailer();
};
diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
index c6ae4669..4665ce73 100644
--- a/src/FFmpegWriter.cpp
+++ b/src/FFmpegWriter.cpp
@@ -37,8 +37,10 @@ FFmpegWriter::FFmpegWriter(string path) throw (InvalidFile, InvalidFormat, Inval
audio_outbuf(NULL), audio_outbuf_size(0), audio_input_frame_size(0), audio_input_position(0),
initial_audio_input_frame_size(0), resampler(NULL), img_convert_ctx(NULL), cache_size(8), num_of_rescalers(32),
rescaler_position(0), video_codec(NULL), audio_codec(NULL), is_writing(false), write_video_count(0), write_audio_count(0),
- original_sample_rate(0), original_channels(0)
+ original_sample_rate(0), original_channels(0), avr(NULL), avr_planar(NULL), is_open(false), prepare_streams(false),
+ write_header(false), write_trailer(false)
{
+
// Disable audio & video (so they can be independently enabled)
info.has_audio = false;
info.has_video = false;
@@ -50,6 +52,21 @@ FFmpegWriter::FFmpegWriter(string path) throw (InvalidFile, InvalidFormat, Inval
auto_detect_format();
}
+// Open the writer
+void FFmpegWriter::Open() throw(InvalidFile, InvalidCodec)
+{
+ // Open the writer
+ is_open = true;
+
+ // Prepare streams (if needed)
+ if (!prepare_streams)
+ PrepareStreams();
+
+ // Write header (if needed)
+ if (!write_header)
+ WriteHeader();
+}
+
// auto detect format (from path)
void FFmpegWriter::auto_detect_format()
{
@@ -79,6 +96,9 @@ void FFmpegWriter::auto_detect_format()
// initialize streams
void FFmpegWriter::initialize_streams()
{
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::initialize_streams", "fmt->video_codec", fmt->video_codec, "fmt->audio_codec", fmt->audio_codec, "CODEC_ID_NONE", CODEC_ID_NONE, "", -1, "", -1, "", -1);
+
// Add the audio and video streams using the default format codecs and initialize the codecs
video_st = NULL;
audio_st = NULL;
@@ -144,12 +164,15 @@ void FFmpegWriter::SetVideoOptions(bool has_video, string codec, Fraction fps, i
info.display_ratio.num = size.num;
info.display_ratio.den = size.den;
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::SetVideoOptions (" + codec + ")", "width", width, "height", height, "size.num", size.num, "size.den", size.den, "fps.num", fps.num, "fps.den", fps.den);
+
// Enable / Disable video
info.has_video = has_video;
}
// Set audio export options
-void FFmpegWriter::SetAudioOptions(bool has_audio, string codec, int sample_rate, int channels, int bit_rate)
+void FFmpegWriter::SetAudioOptions(bool has_audio, string codec, int sample_rate, int channels, ChannelLayout channel_layout, int bit_rate)
{
// Set audio options
if (codec.length() > 0)
@@ -172,6 +195,7 @@ void FFmpegWriter::SetAudioOptions(bool has_audio, string codec, int sample_rate
info.channels = channels;
if (bit_rate > 999)
info.audio_bit_rate = bit_rate;
+ info.channel_layout = channel_layout;
// init resample options (if zero)
if (original_sample_rate == 0)
@@ -179,21 +203,26 @@ void FFmpegWriter::SetAudioOptions(bool has_audio, string codec, int sample_rate
if (original_channels == 0)
original_channels = info.channels;
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::SetAudioOptions (" + codec + ")", "sample_rate", sample_rate, "channels", channels, "bit_rate", bit_rate, "", -1, "", -1, "", -1);
+
// Enable / Disable audio
info.has_audio = has_audio;
}
// Set custom options (some codecs accept additional params)
-void FFmpegWriter::SetOption(StreamType stream, string name, string value)
+void FFmpegWriter::SetOption(StreamType stream, string name, string value) throw(NoStreamsFound, InvalidOptions)
{
// Declare codec context
AVCodecContext *c = NULL;
stringstream convert(value);
- if (info.has_video && stream == VIDEO_STREAM)
+ if (info.has_video && stream == VIDEO_STREAM && video_st)
c = video_st->codec;
- else if (info.has_audio && stream == AUDIO_STREAM)
+ else if (info.has_audio && stream == AUDIO_STREAM && audio_st)
c = audio_st->codec;
+ else
+ throw NoStreamsFound("The stream was not found. Be sure to call PrepareStreams() first.", path);
// Init AVOption
const AVOption *option = NULL;
@@ -259,6 +288,10 @@ void FFmpegWriter::SetOption(StreamType stream, string name, string value)
#else
av_opt_set (c->priv_data, name.c_str(), value.c_str(), 0);
#endif
+
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::SetOption (" + (string)name + ")", "stream == VIDEO_STREAM", stream == VIDEO_STREAM, "", -1, "", -1, "", -1, "", -1, "", -1);
+
}
else
throw InvalidOptions("The option is not valid for this codec.", path);
@@ -271,6 +304,9 @@ void FFmpegWriter::PrepareStreams()
if (!info.has_audio && !info.has_video)
throw InvalidOptions("No video or audio options have been set. You must set has_video or has_audio (or both).", path);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::PrepareStreams [" + path + "]", "info.has_audio", info.has_audio, "info.has_video", info.has_video, "", -1, "", -1, "", -1, "", -1);
+
// Initialize the streams (i.e. add the streams)
initialize_streams();
@@ -279,6 +315,9 @@ void FFmpegWriter::PrepareStreams()
open_video(oc, video_st);
if (info.has_audio && audio_st)
open_audio(oc, audio_st);
+
+ // Mark as 'prepared'
+ prepare_streams = true;
}
// Write the file header (after the options are set)
@@ -296,11 +335,21 @@ void FFmpegWriter::WriteHeader()
// Write the stream header, if any
// TODO: add avoptions / parameters instead of NULL
avformat_write_header(oc, NULL);
+
+ // Mark as 'written'
+ write_header = true;
+
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::WriteHeader", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
}
// Add a frame to the queue waiting to be encoded.
-void FFmpegWriter::WriteFrame(tr1::shared_ptr frame)
+void FFmpegWriter::WriteFrame(tr1::shared_ptr frame) throw(WriterClosed)
{
+ // Check for open reader (or throw exception)
+ if (!is_open)
+ throw WriterClosed("The FFmpegWriter is closed. Call Open() before calling this method.", path);
+
// Add frame pointer to "queue", waiting to be processed the next
// time the WriteFrames() method is called.
if (info.has_video && video_st)
@@ -309,6 +358,9 @@ void FFmpegWriter::WriteFrame(tr1::shared_ptr frame)
if (info.has_audio && audio_st)
spooled_audio_frames.push_back(frame);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::WriteFrame", "frame->number", frame->number, "spooled_video_frames.size()", spooled_video_frames.size(), "spooled_audio_frames.size()", spooled_audio_frames.size(), "cache_size", cache_size, "is_writing", is_writing, "", -1);
+
// Write the frames once it reaches the correct cache size
if (spooled_video_frames.size() == cache_size || spooled_audio_frames.size() == cache_size)
{
@@ -335,6 +387,9 @@ void FFmpegWriter::WriteFrame(tr1::shared_ptr frame)
// Write all frames in the queue to the video file.
void FFmpegWriter::write_queued_frames()
{
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::write_queued_frames", "spooled_video_frames.size()", spooled_video_frames.size(), "spooled_audio_frames.size()", spooled_audio_frames.size(), "", -1, "", -1, "", -1, "", -1);
+
// Flip writing flag
is_writing = true;
@@ -437,8 +492,11 @@ void FFmpegWriter::write_queued_frames()
}
// Write a block of frames from a reader
-void FFmpegWriter::WriteFrame(ReaderBase* reader, int start, int length)
+void FFmpegWriter::WriteFrame(ReaderBase* reader, int start, int length) throw(WriterClosed)
{
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::WriteFrame (from Reader)", "start", start, "length", length, "", -1, "", -1, "", -1, "", -1);
+
// Loop through each frame (and encoded it)
for (int number = start; number <= length; number++)
{
@@ -468,6 +526,12 @@ void FFmpegWriter::WriteTrailer()
* header; otherwise write_trailer may try to use memory that
* was freed on av_codec_close() */
av_write_trailer(oc);
+
+ // Mark as 'written'
+ write_trailer = true;
+
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::WriteTrailer", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
}
// Flush encoders
@@ -485,8 +549,6 @@ void FFmpegWriter::flush_encoders()
if (info.has_video)
for (;;) {
- cout << "Flushing VIDEO buffer!" << endl;
-
// Increment PTS (in frames and scaled to the codec's timebase)
write_video_count += av_rescale_q(1, (AVRational){info.fps.den, info.fps.num}, video_codec->time_base);
@@ -529,9 +591,8 @@ void FFmpegWriter::flush_encoders()
#endif
if (error_code < 0) {
- string error_description = av_err2str(error_code);
- cout << "error encoding video: " << error_code << ": " << error_description << endl;
- //throw ErrorEncodingVideo("Error while flushing video frame", -1);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
}
if (!got_packet) {
stop_encoding = 1;
@@ -553,9 +614,8 @@ void FFmpegWriter::flush_encoders()
// Write packet
error_code = av_interleaved_write_frame(oc, &pkt);
if (error_code != 0) {
- string error_description = av_err2str(error_code);
- cout << "error writing video: " << error_code << ": " << error_description << endl;
- //throw ErrorEncodingVideo("Error while writing video packet to flush encoder", -1);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
}
// Deallocate memory (if needed)
@@ -567,8 +627,6 @@ void FFmpegWriter::flush_encoders()
if (info.has_audio)
for (;;) {
- cout << "Flushing AUDIO buffer!" << endl;
-
// Increment PTS (in samples and scaled to the codec's timebase)
#if LIBAVFORMAT_VERSION_MAJOR >= 54
// for some reason, it requires me to multiply channels X 2
@@ -587,9 +645,8 @@ void FFmpegWriter::flush_encoders()
int got_packet = 0;
error_code = avcodec_encode_audio2(audio_codec, &pkt, NULL, &got_packet);
if (error_code < 0) {
- string error_description = av_err2str(error_code);
- cout << "error encoding audio (flush): " << error_code << ": " << error_description << endl;
- //throw ErrorEncodingAudio("Error while flushing audio frame", -1);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
}
if (!got_packet) {
stop_encoding = 1;
@@ -615,9 +672,8 @@ void FFmpegWriter::flush_encoders()
// Write packet
error_code = av_interleaved_write_frame(oc, &pkt);
if (error_code != 0) {
- string error_description = av_err2str(error_code);
- cout << "error writing audio: " << error_code << ": " << error_description << endl;
- //throw ErrorEncodingAudio("Error while writing audio packet to flush encoder", -1);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
}
}
@@ -641,11 +697,20 @@ void FFmpegWriter::close_audio(AVFormatContext *oc, AVStream *st)
delete[] audio_outbuf;
delete resampler;
+
+ // Deallocate resample buffer
+ avresample_close(avr);
+ avresample_free(&avr);
+ avr = NULL;
}
// Close the writer
void FFmpegWriter::Close()
{
+ // Write trailer (if needed)
+ if (!write_trailer)
+ WriteTrailer();
+
// Close each codec
if (video_st)
close_video(oc, video_st);
@@ -673,6 +738,15 @@ void FFmpegWriter::Close()
// Free the stream
av_free(oc);
+
+ // Close writer
+ is_open = false;
+ prepare_streams = false;
+ write_header = false;
+ write_trailer = false;
+
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
}
// Add an AVFrame to the cache
@@ -741,7 +815,7 @@ AVStream* FFmpegWriter::add_audio_stream()
// Set a valid number of channels (or throw error)
- int channel_layout = info.channels == 1 ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
+ int channel_layout = info.channel_layout;
if (codec->channel_layouts) {
int i;
for (i = 0; codec->channel_layouts[i] != 0; i++)
@@ -775,6 +849,9 @@ AVStream* FFmpegWriter::add_audio_stream()
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::add_audio_stream", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->channels", c->channels, "c->sample_fmt", c->sample_fmt, "c->channel_layout", c->channel_layout, "c->sample_rate", c->sample_rate);
+
return st;
}
@@ -807,6 +884,8 @@ AVStream* FFmpegWriter::add_video_stream()
/* put sample parameters */
c->bit_rate = info.video_bit_rate;
+ c->rc_min_rate = info.video_bit_rate - (info.video_bit_rate / 6);
+ c->rc_max_rate = info.video_bit_rate;
/* resolution must be a multiple of two */
// TODO: require /2 height and width
c->width = info.width;
@@ -819,6 +898,7 @@ AVStream* FFmpegWriter::add_video_stream()
c->time_base.num = info.video_timebase.num;
c->time_base.den = info.video_timebase.den;
c->gop_size = 12; /* TODO: add this to "info"... emit one intra frame every twelve frames at most */
+ c->max_b_frames = 10;
if (c->codec_id == CODEC_ID_MPEG2VIDEO)
/* just for testing, we also add B frames */
c->max_b_frames = 2;
@@ -834,7 +914,6 @@ AVStream* FFmpegWriter::add_video_stream()
// Find all supported pixel formats for this codec
const PixelFormat* supported_pixel_formats = codec->pix_fmts;
while (supported_pixel_formats != NULL && *supported_pixel_formats != PIX_FMT_NONE) {
- cout << "supported pixel format: " << av_get_pix_fmt_name(*supported_pixel_formats) << endl;
// Assign the 1st valid pixel format (if one is missing)
if (c->pix_fmt == PIX_FMT_NONE)
c->pix_fmt = *supported_pixel_formats;
@@ -846,21 +925,19 @@ AVStream* FFmpegWriter::add_video_stream()
if(fmt->video_codec == CODEC_ID_RAWVIDEO) {
// Raw video should use RGB24
c->pix_fmt = PIX_FMT_RGB24;
- // Set raw picture flag (so we don't encode this video)
- oc->oformat->flags |= AVFMT_RAWPICTURE;
+
+ if (strcmp(fmt->name, "gif") != 0)
+ // If not GIF format, skip the encoding process
+ // Set raw picture flag (so we don't encode this video)
+ oc->oformat->flags |= AVFMT_RAWPICTURE;
} else {
// Set the default codec
c->pix_fmt = PIX_FMT_YUV420P;
}
}
- // Override Gif Support (TODO: Find a better way to accomplish this)
- if (c->codec_id == CODEC_ID_GIF) {
- // Force rgb24 which seems to be required for GIF
- c->pix_fmt = PIX_FMT_RGB24;
- // Set raw picture flag (so we don't encode the image)
- oc->oformat->flags |= AVFMT_RAWPICTURE;
- }
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "AVFMT_RAWPICTURE", AVFMT_RAWPICTURE, "", -1);
return st;
}
@@ -914,6 +991,9 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st)
audio_outbuf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
audio_outbuf = new uint8_t[audio_outbuf_size];
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
+
}
// open video codec
@@ -933,6 +1013,10 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st)
/* open the codec */
if (avcodec_open2(video_codec, codec, NULL) < 0)
throw InvalidCodec("Could not open codec", path);
+
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::open_video", "video_codec->thread_count", video_codec->thread_count, "", -1, "", -1, "", -1, "", -1, "", -1);
+
}
// write all queued frames' audio to the video file
@@ -951,13 +1035,11 @@ void FFmpegWriter::write_audio_packets(bool final)
int channels_in_frame = 0;
int sample_rate_in_frame = 0;
int samples_in_frame = 0;
+ ChannelLayout channel_layout_in_frame = LAYOUT_MONO; // default channel layout
// Create a new array (to hold all S16 audio samples, for the current queued frames
int16_t* frame_samples = new int16_t[(queued_audio_frames.size() * AVCODEC_MAX_AUDIO_FRAME_SIZE) + FF_INPUT_BUFFER_PADDING_SIZE];
- // create a new array (to hold all the re-sampled audio, for the current queued frames)
- int16_t* converted_audio = new int16_t[(queued_audio_frames.size() * AVCODEC_MAX_AUDIO_FRAME_SIZE) + FF_INPUT_BUFFER_PADDING_SIZE];
-
// Loop through each queued audio frame
while (!queued_audio_frames.empty())
{
@@ -965,12 +1047,17 @@ void FFmpegWriter::write_audio_packets(bool final)
tr1::shared_ptr frame = queued_audio_frames.front();
// Get the audio details from this frame
- sample_rate_in_frame = info.sample_rate; // resampling happens when getting the interleaved audio samples below
- samples_in_frame = frame->GetAudioSamplesCount(); // this is updated if resampling happens
+ sample_rate_in_frame = frame->SampleRate();
+ samples_in_frame = frame->GetAudioSamplesCount();
channels_in_frame = frame->GetAudioChannelsCount();
+ channel_layout_in_frame = frame->ChannelsLayout();
+
// Get audio sample array
- float* frame_samples_float = frame->GetInterleavedAudioSamples(original_sample_rate, info.sample_rate, new_sampler, &samples_in_frame);
+ float* frame_samples_float = NULL;
+ // Get samples interleaved together (c1 c2 c1 c2 c1 c2)
+ frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, new_sampler, &samples_in_frame);
+
// Calculate total samples
total_frame_samples = samples_in_frame * channels_in_frame;
@@ -980,6 +1067,7 @@ void FFmpegWriter::write_audio_packets(bool final)
// Translate sample value and copy into buffer
frame_samples[frame_position] = int(frame_samples_float[s] * (1 << 15));
+
// Deallocate float array
delete[] frame_samples_float;
@@ -994,39 +1082,98 @@ void FFmpegWriter::write_audio_packets(bool final)
int remaining_frame_samples = total_frame_samples;
int samples_position = 0;
- // Re-sample audio samples (into additional channels or changing the sample format / number format)
- // The sample rate has already been resampled using the GetInterleavedAudioSamples method.
- if (!final && (audio_codec->sample_fmt != AV_SAMPLE_FMT_S16 || info.channels != channels_in_frame)) {
- // Audio needs to be converted
- // Create an audio resample context object (used to convert audio samples)
- ReSampleContext *resampleCtx = av_audio_resample_init(
- info.channels, channels_in_frame,
- info.sample_rate, sample_rate_in_frame,
- audio_codec->sample_fmt, AV_SAMPLE_FMT_S16, 0, 0, 0, 0.0f);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::write_audio_packets", "final", final, "total_frame_samples", total_frame_samples, "remaining_frame_samples", remaining_frame_samples, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "", -1);
- if (!resampleCtx)
- throw ResampleError("Failed to resample & convert audio samples for encoding.", path);
- else {
- // FFmpeg audio resample & sample format conversion
- audio_resample(resampleCtx, (short *) converted_audio, (short *) frame_samples, total_frame_samples);
+ // Keep track of the original sample format
+ AVSampleFormat output_sample_fmt = audio_codec->sample_fmt;
- // Update total frames & input frame size (due to bigger or smaller data types)
- total_frame_samples *= (av_get_bytes_per_sample(audio_codec->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)); // adjust for different byte sizes
- total_frame_samples *= (float(info.channels) / channels_in_frame); // adjust for different # of channels
- audio_input_frame_size = initial_audio_input_frame_size * (av_get_bytes_per_sample(audio_codec->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
+ if (!final) {
+ // Create input frame (and allocate arrays)
+ AVFrame *audio_frame = avcodec_alloc_frame();
+ avcodec_get_frame_defaults(audio_frame);
+ audio_frame->nb_samples = total_frame_samples / channels_in_frame;
+ av_samples_alloc(audio_frame->data, audio_frame->linesize, channels_in_frame, total_frame_samples / channels_in_frame, AV_SAMPLE_FMT_S16, 0);
- // Set remaining samples
- remaining_frame_samples = total_frame_samples;
+ // Fill input frame with sample data
+ //memcpy(audio_frame->data[0], frame_samples, audio_frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * channels_in_frame);
+ avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) frame_samples,
+ audio_frame->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * channels_in_frame, 0);
- // Copy audio samples over original samples
- memcpy(frame_samples, converted_audio, total_frame_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
+ // Update total samples & input frame size (due to bigger or smaller data types)
+ total_frame_samples *= (av_get_bytes_per_sample(audio_codec->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)); // adjust for different byte sizes
+ total_frame_samples *= (float(info.channels) / channels_in_frame); // adjust for different # of channels
+ audio_input_frame_size = initial_audio_input_frame_size * (av_get_bytes_per_sample(audio_codec->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
- // Close context
- audio_resample_close(resampleCtx);
+ // Set remaining samples
+ remaining_frame_samples = total_frame_samples;
+
+ // Create output frame (and allocate arrays)
+ AVFrame *audio_converted = avcodec_alloc_frame();
+ avcodec_get_frame_defaults(audio_converted);
+ audio_converted->nb_samples = audio_frame->nb_samples;
+ av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, audio_codec->sample_fmt, 0);
+
+ // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point.
+ switch (audio_codec->sample_fmt)
+ {
+ case AV_SAMPLE_FMT_FLTP:
+ {
+ output_sample_fmt = AV_SAMPLE_FMT_FLT;
+ break;
+ }
+ case AV_SAMPLE_FMT_S32P:
+ {
+ output_sample_fmt = AV_SAMPLE_FMT_S32;
+ break;
+ }
+ case AV_SAMPLE_FMT_S16P:
+ {
+ output_sample_fmt = AV_SAMPLE_FMT_S16;
+ break;
+ }
+ case AV_SAMPLE_FMT_U8P:
+ {
+ output_sample_fmt = AV_SAMPLE_FMT_U8;
+ break;
+ }
}
+
+ // setup resample context
+ if (!avr) {
+ avr = avresample_alloc_context();
+ av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
+ av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
+ av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
+ av_opt_set_int(avr, "out_sample_fmt", output_sample_fmt, 0); // planar not allowed here
+ av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0);
+ av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
+ av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
+ av_opt_set_int(avr, "out_channels", info.channels, 0);
+ avresample_open(avr);
+ }
+ int nb_samples = 0;
+
+ // Convert audio samples
+ nb_samples = avresample_convert(avr, // audio resample context
+ audio_converted->data, // output data pointers
+ audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
+ audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
+ audio_frame->data, // input data pointers
+ audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
+ audio_frame->nb_samples); // number of input samples to convert
+
+ // Copy audio samples over original samples
+ memcpy(frame_samples, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(audio_codec->sample_fmt) * info.channels);
+
+ // Free frames
+ avcodec_free_frame(&audio_frame);
+ avcodec_free_frame(&audio_converted);
}
+
+
// Loop until no more samples
while (remaining_frame_samples > 0 || final) {
// Get remaining samples needed for this packet
@@ -1054,6 +1201,65 @@ void FFmpegWriter::write_audio_packets(bool final)
// Not enough samples to encode... so wait until the next frame
break;
+
+
+
+ // Convert to planar (if needed by audio codec)
+ AVFrame *frame_final = avcodec_alloc_frame();
+ avcodec_get_frame_defaults(frame_final);
+ if (av_sample_fmt_is_planar(audio_codec->sample_fmt))
+ {
+ // setup resample context
+ if (!avr_planar) {
+ avr_planar = avresample_alloc_context();
+ av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0);
+ av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
+ av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0);
+ av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec->sample_fmt, 0); // planar not allowed here
+ av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0);
+ av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0);
+ av_opt_set_int(avr_planar, "in_channels", info.channels, 0);
+ av_opt_set_int(avr_planar, "out_channels", info.channels, 0);
+ avresample_open(avr_planar);
+ }
+
+ // Create input frame (and allocate arrays)
+ AVFrame *audio_frame = avcodec_alloc_frame();
+ avcodec_get_frame_defaults(audio_frame);
+ audio_frame->nb_samples = audio_input_position / info.channels;
+ av_samples_alloc(audio_frame->data, audio_frame->linesize, info.channels, audio_input_position / info.channels, output_sample_fmt, 0);
+
+ // Fill input frame with sample data
+ avcodec_fill_audio_frame(audio_frame, info.channels, output_sample_fmt, (uint8_t *) samples,
+ audio_frame->nb_samples * av_get_bytes_per_sample(output_sample_fmt) * info.channels, 0);
+
+ // Create output frame (and allocate arrays)
+ frame_final->nb_samples = audio_frame->nb_samples;
+ av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, audio_frame->nb_samples, audio_codec->sample_fmt, 0);
+
+ // Convert audio samples
+ int nb_samples = avresample_convert(avr_planar, // audio resample context
+ frame_final->data, // output data pointers
+ frame_final->linesize[0], // output plane size, in bytes. (0 if unknown)
+ frame_final->nb_samples, // maximum number of samples that the output buffer can hold
+ audio_frame->data, // input data pointers
+ audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
+ audio_frame->nb_samples); // number of input samples to convert
+
+ // Copy audio samples over original samples
+ memcpy(samples, frame_final->data[0], frame_final->nb_samples * av_get_bytes_per_sample(audio_codec->sample_fmt) * info.channels);
+
+ // Free frames
+ avcodec_free_frame(&audio_frame);
+
+ } else {
+
+ // Fill the final_frame AVFrame with audio (non planar)
+ avcodec_fill_audio_frame(frame_final, audio_codec->channels, audio_codec->sample_fmt, (uint8_t *) samples,
+ audio_input_position * av_get_bytes_per_sample(audio_codec->sample_fmt), 1);
+ }
+
+
// Increment PTS (in samples and scaled to the codec's timebase)
#if LIBAVFORMAT_VERSION_MAJOR >= 54
// for some reason, it requires me to multiply channels X 2
@@ -1062,18 +1268,13 @@ void FFmpegWriter::write_audio_packets(bool final)
write_audio_count += av_rescale_q(audio_input_position / audio_codec->channels, (AVRational){1, info.sample_rate}, audio_codec->time_base);
#endif
- // Create AVFrame (and fill it with samples)
- AVFrame *frame_final = avcodec_alloc_frame();
+ // Set the # of samples
#if LIBAVFORMAT_VERSION_MAJOR >= 54
- // for some reason, it requires me to multiply channels X 2
frame_final->nb_samples = audio_input_position / (audio_codec->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
#else
frame_final->nb_samples = audio_input_frame_size / audio_codec->channels;
#endif
- //frame_final->nb_samples = audio_input_frame_size / audio_codec->channels; //av_get_bytes_per_sample(audio_codec->sample_fmt);
frame_final->pts = write_audio_count; // Set the AVFrame's PTS
- avcodec_fill_audio_frame(frame_final, audio_codec->channels, audio_codec->sample_fmt, (uint8_t *) samples,
- audio_input_position * av_get_bytes_per_sample(audio_codec->sample_fmt), 1);
// Init the packet
AVPacket pkt;
@@ -1111,16 +1312,15 @@ void FFmpegWriter::write_audio_packets(bool final)
int error_code = av_interleaved_write_frame(oc, &pkt);
if (error_code != 0)
{
- string error_description = av_err2str(error_code);
- cout << "error: " << error_code << ": " << error_description << endl;
- throw ErrorEncodingAudio("Error while writing compressed audio frame", write_audio_count);
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
}
}
if (error_code < 0)
{
- string error_description = av_err2str(error_code);
- cout << "Error encoding audio: " << error_code << ": " << error_description << endl;
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
}
// deallocate AVFrame
@@ -1137,7 +1337,6 @@ void FFmpegWriter::write_audio_packets(bool final)
// Delete arrays
delete[] frame_samples;
- delete[] converted_audio;
} // end task
}
@@ -1221,6 +1420,10 @@ void FFmpegWriter::process_video_packet(tr1::shared_ptr frame)
frame_source->data[0][row+3] = MagickCore::ScaleQuantumToChar((Magick::Quantum) pixel_packets[packet].opacity);
}
+
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final, "step", step, "", -1, "", -1);
+
// Resize & convert pixel format
sws_scale(scaler, frame_source->data, frame_source->linesize, 0,
source_image_height, frame_final->data, frame_final->linesize);
@@ -1240,6 +1443,9 @@ void FFmpegWriter::process_video_packet(tr1::shared_ptr frame)
// write video frame
void FFmpegWriter::write_video_packet(tr1::shared_ptr frame, AVFrame* frame_final)
{
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "oc->oformat->flags & AVFMT_RAWPICTURE", oc->oformat->flags & AVFMT_RAWPICTURE, "", -1, "", -1, "", -1, "", -1);
+
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
// Raw video case.
AVPacket pkt;
@@ -1247,7 +1453,7 @@ void FFmpegWriter::write_video_packet(tr1::shared_ptr frame, AVFrame* fra
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
- pkt.data= (uint8_t*)frame_final;
+ pkt.data= (uint8_t*)frame_final->data;
pkt.size= sizeof(AVPicture);
// Increment PTS (in frames and scaled to the codec's timebase)
@@ -1258,8 +1464,8 @@ void FFmpegWriter::write_video_packet(tr1::shared_ptr frame, AVFrame* fra
int error_code = av_interleaved_write_frame(oc, &pkt);
if (error_code != 0)
{
- string error_description = av_err2str(error_code);
- cout << "error: " << error_code << ": " << error_description << endl;
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
throw ErrorEncodingVideo("Error while writing raw video frame", frame->number);
}
@@ -1332,8 +1538,8 @@ void FFmpegWriter::write_video_packet(tr1::shared_ptr frame, AVFrame* fra
int error_code = av_interleaved_write_frame(oc, &pkt);
if (error_code != 0)
{
- string error_description = av_err2str(error_code);
- cout << "error: " << error_code << ": " << error_description << endl;
+ #pragma omp critical (debug_output)
+ AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
throw ErrorEncodingVideo("Error while writing compressed video frame", frame->number);
}
}
diff --git a/src/Main.cpp b/src/Main.cpp
index 9fbb795f..2c1baad9 100644
--- a/src/Main.cpp
+++ b/src/Main.cpp
@@ -43,6 +43,81 @@ using namespace tr1;
int main(int argc, char* argv[])
{
+ // Reader
+ FFmpegReader r9("/home/jonathan/Videos/sintel-1024-surround.mp4");
+ r9.Open();
+ //r9.info.has_audio = false;
+ //r9.enable_seek = false;
+ //r9.debug = true;
+
+ /* WRITER ---------------- */
+ //FFmpegWriter w9("/home/jonathan/output.webm");
+ //w9.debug = true;
+ ImageWriter w9("/home/jonathan/output.gif");
+
+ // Set options
+ //w9.SetAudioOptions(true, "libvorbis", 48000, r9.info.channels, r9.info.channel_layout, 120000);
+ //w9.SetVideoOptions(true, "libvpx", r9.info.fps, r9.info.width, r9.info.height, r9.info.pixel_ratio, false, false, 1500000);
+ //w9.SetVideoOptions(true, "rawvideo", r9.info.fps, 400, 2, r9.info.pixel_ratio, false, false, 20000000);
+ w9.SetVideoOptions("GIF", r9.info.fps, r9.info.width, r9.info.height, 70, 1, true);
+
+ // Open writer
+ w9.Open();
+
+ // Prepare Streams
+ //w9.PrepareStreams();
+
+// w9.SetOption(VIDEO_STREAM, "qmin", "2" );
+// w9.SetOption(VIDEO_STREAM, "qmax", "30" );
+// w9.SetOption(VIDEO_STREAM, "crf", "10" );
+// w9.SetOption(VIDEO_STREAM, "rc_min_rate", "2000000" );
+// w9.SetOption(VIDEO_STREAM, "rc_max_rate", "4000000" );
+// w9.SetOption(VIDEO_STREAM, "max_b_frames", "10" );
+
+ // Write header
+ //w9.WriteHeader();
+
+ //r9.DisplayInfo();
+
+ // 147000 frames, 28100 frames
+ //for (int frame = 1; frame <= (r9.info.video_length - 1); frame++)
+ for (int frame = 500; frame <= 530; frame++)
+ //int frame = 1;
+ //while (true)
+ {
+ //int frame_number = (rand() % 750) + 1;
+ int frame_number = ( frame);
+
+ //cout << "queue " << frame << " (frame: " << frame_number << ") ";
+ tr1::shared_ptr f = r9.GetFrame(frame_number);
+ //cout << "(" << f->number << ", " << f << ")" << endl;
+ //f->DisplayWaveform();
+ //f->AddColor(r9.info.width, r9.info.height, "blue");
+ w9.WriteFrame(f);
+
+ //frame++;
+ }
+
+ cout << "done looping" << endl;
+
+ // Write Footer
+ //w9.WriteTrailer();
+
+ // Close writer & reader
+ w9.Close();
+
+ // Close timeline
+ r9.Close();
+ /* ---------------- */
+ cout << "happy ending" << endl;
+
+ return 0;
+
+
+
+
+
+
FFmpegReader sinelReader("/home/jonathan/Videos/sintel_trailer-720p.mp4");
//sinelReader.debug = true;
sinelReader.Open();
@@ -59,15 +134,16 @@ int main(int argc, char* argv[])
tr1::shared_ptr f = sinelReader.GetFrame(frame_number);
//f->AddOverlayNumber(frame_number);
//f->Display();
+ f->DisplayWaveform();
- //if (x == 7654)
- sinelReader.debug = true;
+ //f->DisplayWaveform();
+ // sinelReader.debug = true;
//if (x == 7655)
// break;
}
- cout << sinelReader.OutputDebugJSON() << endl;
+ //cout << sinelReader.OutputDebugJSON() << endl;
sinelReader.Close();
return 0;
@@ -175,7 +251,7 @@ int main(int argc, char* argv[])
// Set options
//w.SetAudioOptions(true, "libvorbis", 48000, 2, 188000);
- w.SetAudioOptions(true, "libmp3lame", 44100, 1, 12800);
+ w.SetAudioOptions(true, "libmp3lame", 44100, 1, LAYOUT_STEREO, 12800);
w.SetVideoOptions(true, "mpeg4", Fraction(24,1), 1280, 720, Fraction(1,1), false, false, 30000000);
//w.SetVideoOptions(true, "libmp3lame", openshot::Fraction(30,1), 720, 360, Fraction(1,1), false, false, 3000000);