2013-09-09 23:32:16 -05:00
|
|
|
/**
|
|
|
|
|
* @file
|
|
|
|
|
* @brief Source file for FFmpegReader class
|
2013-09-12 17:52:10 -05:00
|
|
|
* @author Jonathan Thomas <jonathan@openshot.org>, Fabrice Bellard
|
2013-09-09 23:32:16 -05:00
|
|
|
*
|
|
|
|
|
* @section LICENSE
|
|
|
|
|
*
|
2013-09-12 17:52:10 -05:00
|
|
|
* Copyright (c) 2008-2013 OpenShot Studios, LLC, Fabrice Bellard
|
2013-09-09 23:32:16 -05:00
|
|
|
* (http://www.openshotstudios.com). This file is part of
|
|
|
|
|
* OpenShot Library (http://www.openshot.org), an open-source project
|
|
|
|
|
* dedicated to delivering high quality video editing and animation solutions
|
|
|
|
|
* to the world.
|
|
|
|
|
*
|
2013-09-12 17:52:10 -05:00
|
|
|
* This file is originally based on the Libavformat API example, and then modified
|
|
|
|
|
* by the libopenshot project.
|
|
|
|
|
*
|
2014-03-29 18:49:22 -05:00
|
|
|
* OpenShot Library (libopenshot) is free software: you can redistribute it
|
2014-07-11 16:52:14 -05:00
|
|
|
* and/or modify it under the terms of the GNU Lesser General Public License
|
2014-03-29 18:49:22 -05:00
|
|
|
* as published by the Free Software Foundation, either version 3 of the
|
|
|
|
|
* License, or (at your option) any later version.
|
2013-09-09 23:32:16 -05:00
|
|
|
*
|
2014-03-29 18:49:22 -05:00
|
|
|
* OpenShot Library (libopenshot) is distributed in the hope that it will be
|
|
|
|
|
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
2014-07-11 16:52:14 -05:00
|
|
|
* GNU Lesser General Public License for more details.
|
2013-09-09 23:32:16 -05:00
|
|
|
*
|
2014-07-11 16:52:14 -05:00
|
|
|
* You should have received a copy of the GNU Lesser General Public License
|
2014-03-29 18:49:22 -05:00
|
|
|
* along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
|
2013-09-09 23:32:16 -05:00
|
|
|
*/
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
#include "../include/FFmpegReader.h"
|
|
|
|
|
|
|
|
|
|
using namespace openshot;
|
|
|
|
|
|
2012-10-10 01:07:47 -05:00
|
|
|
FFmpegReader::FFmpegReader(string path) throw(InvalidFile, NoStreamsFound, InvalidCodec)
|
2012-10-10 17:27:46 -05:00
|
|
|
: last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0),
|
2012-10-26 00:27:44 -05:00
|
|
|
audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
|
2015-02-05 00:00:52 -06:00
|
|
|
check_fps(false), enable_seek(true), rescaler_position(0), num_of_rescalers(OPEN_MP_NUM_PROCESSORS), is_open(false),
|
|
|
|
|
seek_audio_frame_found(0), seek_video_frame_found(0), prev_samples(0), prev_pts(0),
|
2014-08-27 09:44:27 -05:00
|
|
|
pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0) {
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-07-08 23:26:44 -05:00
|
|
|
// Initialize FFMpeg, and register all formats and codecs
|
|
|
|
|
av_register_all();
|
2014-08-01 10:27:05 -05:00
|
|
|
avcodec_register_all();
|
2012-10-09 10:41:07 -05:00
|
|
|
|
2012-10-26 00:27:44 -05:00
|
|
|
// Init cache
|
|
|
|
|
int64 bytes = 720 * 1280 * 4 + (44100 * 2 * 4);
|
|
|
|
|
working_cache = Cache(0);
|
2012-10-28 23:06:10 -05:00
|
|
|
final_cache = Cache(20 * bytes); // 20 frames X 720 video, 4 colors of chars, 2 audio channels of 4 byte floats
|
2012-10-26 00:27:44 -05:00
|
|
|
|
2012-10-09 10:41:07 -05:00
|
|
|
// Open and Close the reader, to populate it's attributes (such as height, width, etc...)
|
|
|
|
|
Open();
|
|
|
|
|
Close();
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
2012-08-15 17:27:14 -05:00
|
|
|
// Init a collection of software rescalers (thread safe)
|
|
|
|
|
void FFmpegReader::InitScalers()
|
|
|
|
|
{
|
|
|
|
|
// Init software rescalers vector (many of them, one for each thread)
|
|
|
|
|
for (int x = 0; x < num_of_rescalers; x++)
|
|
|
|
|
{
|
|
|
|
|
SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, pCodecCtx->pix_fmt, info.width,
|
2012-12-04 02:21:01 -06:00
|
|
|
info.height, PIX_FMT_RGBA, SWS_FAST_BILINEAR, NULL, NULL, NULL);
|
2012-08-15 17:27:14 -05:00
|
|
|
|
|
|
|
|
// Add rescaler to vector
|
|
|
|
|
image_rescalers.push_back(img_convert_ctx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-09-08 23:09:54 -05:00
|
|
|
// This struct holds the associated video frame and starting sample # for an audio packet.
|
2013-09-10 12:59:06 -05:00
|
|
|
int AudioLocation::is_near(AudioLocation location, int samples_per_frame, int amount)
|
2013-09-08 23:09:54 -05:00
|
|
|
{
|
|
|
|
|
// Is frame even close to this one?
|
|
|
|
|
if (abs(location.frame - frame) >= 2)
|
|
|
|
|
// This is too far away to be considered
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
int sample_diff = abs(location.sample_start - sample_start);
|
|
|
|
|
if (location.frame == frame && sample_diff >= 0 && sample_diff <= amount)
|
|
|
|
|
// close
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
// new frame is after
|
|
|
|
|
if (location.frame > frame)
|
|
|
|
|
{
|
|
|
|
|
// remaining samples + new samples
|
|
|
|
|
int sample_diff = (samples_per_frame - sample_start) + location.sample_start;
|
|
|
|
|
if (sample_diff >= 0 && sample_diff <= amount)
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// new frame is before
|
|
|
|
|
if (location.frame < frame)
|
|
|
|
|
{
|
|
|
|
|
// remaining new samples + old samples
|
|
|
|
|
int sample_diff = (samples_per_frame - location.sample_start) + sample_start;
|
|
|
|
|
if (sample_diff >= 0 && sample_diff <= amount)
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// not close
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-15 17:27:14 -05:00
|
|
|
// Remove & deallocate all software scalers
|
|
|
|
|
void FFmpegReader::RemoveScalers()
|
|
|
|
|
{
|
|
|
|
|
// Close all rescalers
|
|
|
|
|
for (int x = 0; x < num_of_rescalers; x++)
|
|
|
|
|
sws_freeContext(image_rescalers[x]);
|
|
|
|
|
|
|
|
|
|
// Clear vector
|
|
|
|
|
image_rescalers.clear();
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-09 01:45:34 -05:00
|
|
|
void FFmpegReader::Open() throw(InvalidFile, NoStreamsFound, InvalidCodec)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2012-10-08 15:02:52 -05:00
|
|
|
// Open reader if not already open
|
|
|
|
|
if (!is_open)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2012-10-08 15:02:52 -05:00
|
|
|
// Initialize format context
|
|
|
|
|
pFormatCtx = NULL;
|
|
|
|
|
|
|
|
|
|
// Open video file
|
|
|
|
|
if (avformat_open_input(&pFormatCtx, path.c_str(), NULL, NULL) != 0)
|
|
|
|
|
throw InvalidFile("File could not be opened.", path);
|
|
|
|
|
|
|
|
|
|
// Retrieve stream information
|
|
|
|
|
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
|
|
|
|
|
throw NoStreamsFound("No streams found in file.", path);
|
|
|
|
|
|
|
|
|
|
videoStream = -1;
|
|
|
|
|
audioStream = -1;
|
|
|
|
|
// Loop through each stream, and identify the video and audio stream index
|
|
|
|
|
for (unsigned int i = 0; i < pFormatCtx->nb_streams; i++)
|
|
|
|
|
{
|
|
|
|
|
// Is this a video stream?
|
|
|
|
|
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
|
|
|
|
|
videoStream = i;
|
|
|
|
|
}
|
|
|
|
|
// Is this an audio stream?
|
|
|
|
|
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
|
|
|
|
|
audioStream = i;
|
|
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
2012-10-08 15:02:52 -05:00
|
|
|
if (videoStream == -1 && audioStream == -1)
|
|
|
|
|
throw NoStreamsFound("No video or audio streams found in this file.", path);
|
|
|
|
|
|
|
|
|
|
// Is there a video stream?
|
|
|
|
|
if (videoStream != -1)
|
|
|
|
|
{
|
|
|
|
|
// Set the stream index
|
|
|
|
|
info.video_stream_index = videoStream;
|
|
|
|
|
|
|
|
|
|
// Set the codec and codec context pointers
|
|
|
|
|
pStream = pFormatCtx->streams[videoStream];
|
|
|
|
|
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
|
|
|
|
|
|
|
|
|
|
// Set number of threads equal to number of processors + 1
|
2014-04-02 16:48:27 -05:00
|
|
|
pCodecCtx->thread_count = OPEN_MP_NUM_PROCESSORS;
|
2012-10-08 15:02:52 -05:00
|
|
|
|
|
|
|
|
// Find the decoder for the video stream
|
|
|
|
|
AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
|
|
|
|
|
if (pCodec == NULL) {
|
|
|
|
|
throw InvalidCodec("A valid video codec could not be found for this file.", path);
|
|
|
|
|
}
|
|
|
|
|
// Open video codec
|
|
|
|
|
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
|
|
|
|
|
throw InvalidCodec("A video codec was found, but could not be opened.", path);
|
|
|
|
|
|
|
|
|
|
// Update the File Info struct with video details (if a video stream is found)
|
|
|
|
|
UpdateVideoInfo();
|
|
|
|
|
|
|
|
|
|
// Init rescalers (if video stream detected)
|
|
|
|
|
InitScalers();
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
2012-10-08 15:02:52 -05:00
|
|
|
|
|
|
|
|
// Is there an audio stream?
|
|
|
|
|
if (audioStream != -1)
|
|
|
|
|
{
|
|
|
|
|
// Set the stream index
|
|
|
|
|
info.audio_stream_index = audioStream;
|
|
|
|
|
|
|
|
|
|
// Get a pointer to the codec context for the audio stream
|
|
|
|
|
aStream = pFormatCtx->streams[audioStream];
|
|
|
|
|
aCodecCtx = pFormatCtx->streams[audioStream]->codec;
|
|
|
|
|
|
|
|
|
|
// Set number of threads equal to number of processors + 1
|
2014-04-02 16:48:27 -05:00
|
|
|
aCodecCtx->thread_count = OPEN_MP_NUM_PROCESSORS;
|
2012-10-08 15:02:52 -05:00
|
|
|
|
|
|
|
|
// Find the decoder for the audio stream
|
|
|
|
|
AVCodec *aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
|
|
|
|
|
if (aCodec == NULL) {
|
|
|
|
|
throw InvalidCodec("A valid audio codec could not be found for this file.", path);
|
|
|
|
|
}
|
|
|
|
|
// Open audio codec
|
|
|
|
|
if (avcodec_open2(aCodecCtx, aCodec, NULL) < 0)
|
|
|
|
|
throw InvalidCodec("An audio codec was found, but could not be opened.", path);
|
|
|
|
|
|
|
|
|
|
// Update the File Info struct with audio details (if an audio stream is found)
|
|
|
|
|
UpdateAudioInfo();
|
|
|
|
|
}
|
|
|
|
|
|
2012-11-20 10:15:39 -06:00
|
|
|
// Init previous audio location to zero
|
|
|
|
|
previous_packet_location.frame = -1;
|
|
|
|
|
previous_packet_location.sample_start = 0;
|
|
|
|
|
|
2012-10-08 15:02:52 -05:00
|
|
|
// Mark as "open"
|
|
|
|
|
is_open = true;
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void FFmpegReader::Close()
|
|
|
|
|
{
|
2012-10-08 15:02:52 -05:00
|
|
|
// Close all objects, if reader is 'open'
|
|
|
|
|
if (is_open)
|
2012-07-08 23:26:44 -05:00
|
|
|
{
|
2012-10-08 15:02:52 -05:00
|
|
|
// Close the codec
|
|
|
|
|
if (info.has_video)
|
|
|
|
|
{
|
|
|
|
|
// Clear image scalers
|
|
|
|
|
RemoveScalers();
|
|
|
|
|
|
|
|
|
|
avcodec_flush_buffers(pCodecCtx);
|
|
|
|
|
avcodec_close(pCodecCtx);
|
|
|
|
|
}
|
|
|
|
|
if (info.has_audio)
|
|
|
|
|
{
|
|
|
|
|
avcodec_flush_buffers(aCodecCtx);
|
|
|
|
|
avcodec_close(aCodecCtx);
|
|
|
|
|
}
|
|
|
|
|
|
2012-10-14 21:09:22 -05:00
|
|
|
// Clear final cache
|
|
|
|
|
final_cache.Clear();
|
2012-10-08 15:02:52 -05:00
|
|
|
working_cache.Clear();
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Clear processed lists
|
|
|
|
|
processed_video_frames.clear();
|
|
|
|
|
processed_audio_frames.clear();
|
|
|
|
|
|
|
|
|
|
// Clear debug json
|
|
|
|
|
debug_root.clear();
|
|
|
|
|
|
2012-10-08 15:02:52 -05:00
|
|
|
// Close the video file
|
|
|
|
|
avformat_close_input(&pFormatCtx);
|
|
|
|
|
av_freep(&pFormatCtx);
|
|
|
|
|
|
|
|
|
|
// Mark as "closed"
|
|
|
|
|
is_open = false;
|
2012-10-10 14:49:33 -05:00
|
|
|
last_frame = 0;
|
2012-07-08 23:26:44 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void FFmpegReader::UpdateAudioInfo()
|
|
|
|
|
{
|
|
|
|
|
// Set values of FileInfo struct
|
|
|
|
|
info.has_audio = true;
|
2012-06-16 02:12:48 -05:00
|
|
|
info.file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
|
2011-10-11 08:44:27 -05:00
|
|
|
info.acodec = aCodecCtx->codec->name;
|
|
|
|
|
info.channels = aCodecCtx->channels;
|
2015-02-05 00:00:52 -06:00
|
|
|
if (aCodecCtx->channel_layout == 0)
|
|
|
|
|
aCodecCtx->channel_layout = av_get_default_channel_layout( aCodecCtx->channels );;
|
|
|
|
|
info.channel_layout = (ChannelLayout) aCodecCtx->channel_layout;
|
2011-10-11 08:44:27 -05:00
|
|
|
info.sample_rate = aCodecCtx->sample_rate;
|
|
|
|
|
info.audio_bit_rate = aCodecCtx->bit_rate;
|
|
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Set audio timebase
|
2011-10-11 08:44:27 -05:00
|
|
|
info.audio_timebase.num = aStream->time_base.num;
|
|
|
|
|
info.audio_timebase.den = aStream->time_base.den;
|
2011-10-27 09:40:03 -05:00
|
|
|
|
2012-08-12 02:14:15 -05:00
|
|
|
// Get timebase of audio stream (if valid)
|
|
|
|
|
if (aStream->duration > 0.0f)
|
|
|
|
|
info.duration = aStream->duration * info.audio_timebase.ToDouble();
|
2011-12-11 20:42:50 -06:00
|
|
|
|
|
|
|
|
// Check for an invalid video length
|
2012-08-12 02:14:15 -05:00
|
|
|
if (info.has_video && info.video_length <= 0)
|
2011-12-11 20:42:50 -06:00
|
|
|
{
|
|
|
|
|
// Calculate the video length from the audio duration
|
|
|
|
|
info.video_length = info.duration * info.fps.ToDouble();
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-27 09:40:03 -05:00
|
|
|
// Set video timebase (if no video stream was found)
|
|
|
|
|
if (!info.has_video)
|
|
|
|
|
{
|
|
|
|
|
// Set a few important default video settings (so audio can be divided into frames)
|
2012-11-12 01:25:35 -06:00
|
|
|
info.fps.num = 24;
|
2011-10-27 09:40:03 -05:00
|
|
|
info.fps.den = 1;
|
2011-12-11 20:42:50 -06:00
|
|
|
info.video_timebase.num = 1;
|
2012-11-12 01:25:35 -06:00
|
|
|
info.video_timebase.den = 24;
|
2011-12-11 20:42:50 -06:00
|
|
|
info.video_length = info.duration * info.fps.ToDouble();
|
|
|
|
|
|
2011-10-27 09:40:03 -05:00
|
|
|
}
|
2011-12-11 20:42:50 -06:00
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void FFmpegReader::UpdateVideoInfo()
|
|
|
|
|
{
|
|
|
|
|
// Set values of FileInfo struct
|
|
|
|
|
info.has_video = true;
|
2012-06-16 02:12:48 -05:00
|
|
|
info.file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
|
2011-10-11 08:44:27 -05:00
|
|
|
info.height = pCodecCtx->height;
|
|
|
|
|
info.width = pCodecCtx->width;
|
|
|
|
|
info.vcodec = pCodecCtx->codec->name;
|
|
|
|
|
info.video_bit_rate = pFormatCtx->bit_rate;
|
2011-12-15 16:11:48 -06:00
|
|
|
if (!check_fps)
|
|
|
|
|
{
|
2015-02-05 00:00:52 -06:00
|
|
|
// set frames per second (fps)
|
2015-02-05 16:00:18 -06:00
|
|
|
info.fps.num = pStream->avg_frame_rate.num;
|
|
|
|
|
info.fps.den = pStream->avg_frame_rate.den;
|
2011-12-15 16:11:48 -06:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
if (pStream->sample_aspect_ratio.num != 0)
|
2011-12-11 20:42:50 -06:00
|
|
|
{
|
2011-10-11 08:44:27 -05:00
|
|
|
info.pixel_ratio.num = pStream->sample_aspect_ratio.num;
|
2011-12-11 20:42:50 -06:00
|
|
|
info.pixel_ratio.den = pStream->sample_aspect_ratio.den;
|
|
|
|
|
}
|
|
|
|
|
else if (pCodecCtx->sample_aspect_ratio.num != 0)
|
|
|
|
|
{
|
|
|
|
|
info.pixel_ratio.num = pCodecCtx->sample_aspect_ratio.num;
|
|
|
|
|
info.pixel_ratio.den = pCodecCtx->sample_aspect_ratio.den;
|
|
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
else
|
2011-12-11 20:42:50 -06:00
|
|
|
{
|
2011-10-11 08:44:27 -05:00
|
|
|
info.pixel_ratio.num = 1;
|
2011-12-11 20:42:50 -06:00
|
|
|
info.pixel_ratio.den = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
info.pixel_format = pCodecCtx->pix_fmt;
|
|
|
|
|
|
|
|
|
|
// Calculate the DAR (display aspect ratio)
|
2011-12-11 20:42:50 -06:00
|
|
|
Fraction size(info.width * info.pixel_ratio.num, info.height * info.pixel_ratio.den);
|
2011-10-11 08:44:27 -05:00
|
|
|
|
|
|
|
|
// Reduce size fraction
|
|
|
|
|
size.Reduce();
|
|
|
|
|
|
|
|
|
|
// Set the ratio based on the reduced fraction
|
|
|
|
|
info.display_ratio.num = size.num;
|
|
|
|
|
info.display_ratio.den = size.den;
|
|
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Set the video timebase
|
2011-10-11 08:44:27 -05:00
|
|
|
info.video_timebase.num = pStream->time_base.num;
|
|
|
|
|
info.video_timebase.den = pStream->time_base.den;
|
2011-12-11 20:42:50 -06:00
|
|
|
|
|
|
|
|
// Set the duration in seconds, and video length (# of frames)
|
|
|
|
|
info.duration = pStream->duration * info.video_timebase.ToDouble();
|
2012-08-12 02:14:15 -05:00
|
|
|
|
2013-09-08 16:08:56 -05:00
|
|
|
// Check for valid duration (if found)
|
2012-08-12 02:14:15 -05:00
|
|
|
if (info.duration <= 0.0f && pFormatCtx->duration >= 0)
|
|
|
|
|
// Use the format's duration
|
|
|
|
|
info.duration = pFormatCtx->duration / AV_TIME_BASE;
|
|
|
|
|
|
2013-09-08 16:08:56 -05:00
|
|
|
// Calculate duration from filesize and bitrate (if any)
|
|
|
|
|
if (info.duration <= 0.0f && info.video_bit_rate > 0 && info.file_size > 0)
|
|
|
|
|
// Estimate from bitrate, total bytes, and framerate
|
|
|
|
|
info.duration = (info.file_size / info.video_bit_rate);
|
|
|
|
|
|
|
|
|
|
// No duration found in stream of file
|
|
|
|
|
if (info.duration <= 0.0f)
|
|
|
|
|
{
|
|
|
|
|
// No duration is found in the video stream
|
|
|
|
|
info.duration = -1;
|
|
|
|
|
info.video_length = -1;
|
|
|
|
|
is_duration_known = false;
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// Yes, a duration was found
|
|
|
|
|
is_duration_known = true;
|
|
|
|
|
|
|
|
|
|
// Calculate number of frames
|
|
|
|
|
info.video_length = round(info.duration * info.fps.ToDouble());
|
|
|
|
|
}
|
2011-12-11 20:42:50 -06:00
|
|
|
|
2012-07-03 02:42:47 -05:00
|
|
|
// Override an invalid framerate
|
|
|
|
|
if (info.fps.ToFloat() > 120.0f)
|
2012-07-08 23:26:44 -05:00
|
|
|
{
|
|
|
|
|
// Set a few important default video settings (so audio can be divided into frames)
|
2012-11-21 16:57:21 -06:00
|
|
|
info.fps.num = 24;
|
2012-07-08 23:26:44 -05:00
|
|
|
info.fps.den = 1;
|
|
|
|
|
info.video_timebase.num = 1;
|
2012-11-21 16:57:21 -06:00
|
|
|
info.video_timebase.den = 24;
|
2012-07-08 23:26:44 -05:00
|
|
|
}
|
2012-07-03 02:42:47 -05:00
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
2012-10-31 01:17:12 -05:00
|
|
|
|
2014-01-28 02:41:15 -06:00
|
|
|
tr1::shared_ptr<Frame> FFmpegReader::GetFrame(int requested_frame) throw(OutOfBoundsFrame, ReaderClosed, TooManySeeks)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2012-10-09 01:45:34 -05:00
|
|
|
// Check for open reader (or throw exception)
|
|
|
|
|
if (!is_open)
|
|
|
|
|
throw ReaderClosed("The FFmpegReader is closed. Call Open() before calling this method.", path);
|
|
|
|
|
|
2014-03-21 01:25:17 -05:00
|
|
|
// Adjust for a requested frame that is too small or too large
|
|
|
|
|
if (requested_frame < 1)
|
|
|
|
|
requested_frame = 1;
|
|
|
|
|
if (requested_frame > info.video_length && is_duration_known)
|
|
|
|
|
requested_frame = info.video_length;
|
|
|
|
|
if (info.has_video && info.video_length == 0)
|
|
|
|
|
// Invalid duration of video file
|
|
|
|
|
throw InvalidFile("Could not detect the duration of the video or audio stream.", path);
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
2015-02-05 00:00:52 -06:00
|
|
|
#pragma omp critical (debug_output)
|
2014-08-27 09:44:27 -05:00
|
|
|
AppendDebugMethod("FFmpegReader::GetFrame", "requested_frame", requested_frame, "last_frame", last_frame, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Check the cache for this frame
|
2014-08-27 09:44:27 -05:00
|
|
|
if (final_cache.Exists(requested_frame)) {
|
|
|
|
|
// Debug output
|
2015-02-05 00:00:52 -06:00
|
|
|
#pragma omp critical (debug_output)
|
2014-08-27 09:44:27 -05:00
|
|
|
AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Return the cached frame
|
2011-10-14 09:47:05 -05:00
|
|
|
return final_cache.GetFrame(requested_frame);
|
2014-08-27 09:44:27 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// Frame is not in cache
|
2013-09-08 16:08:56 -05:00
|
|
|
// Reset seek count
|
|
|
|
|
seek_count = 0;
|
|
|
|
|
|
2012-10-10 14:49:33 -05:00
|
|
|
// Check for first frame (always need to get frame 1 before other frames, to correctly calculate offsets)
|
|
|
|
|
if (last_frame == 0 && requested_frame != 1)
|
|
|
|
|
// Get first frame
|
|
|
|
|
ReadStream(1);
|
|
|
|
|
|
2014-09-15 00:24:46 -05:00
|
|
|
// Are we within X frames of the requested frame?
|
2012-07-06 15:17:57 -05:00
|
|
|
int diff = requested_frame - last_frame;
|
2014-09-15 00:24:46 -05:00
|
|
|
if (diff >= 1 && diff <= 20)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
|
|
|
|
// Continue walking the stream
|
|
|
|
|
return ReadStream(requested_frame);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2012-10-10 17:27:46 -05:00
|
|
|
// Greater than 30 frames away, or backwards, we need to seek to the nearest key frame
|
2011-12-11 20:42:50 -06:00
|
|
|
if (enable_seek)
|
|
|
|
|
// Only seek if enabled
|
|
|
|
|
Seek(requested_frame);
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-10-10 17:27:46 -05:00
|
|
|
else if (!enable_seek && diff < 0)
|
|
|
|
|
{
|
|
|
|
|
// Start over, since we can't seek, and the requested frame is smaller than our position
|
2012-10-14 21:09:22 -05:00
|
|
|
Close();
|
2012-10-10 17:27:46 -05:00
|
|
|
Open();
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Then continue walking the stream
|
|
|
|
|
return ReadStream(requested_frame);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read the stream until we find the requested Frame
|
2012-10-14 03:43:52 -05:00
|
|
|
tr1::shared_ptr<Frame> FFmpegReader::ReadStream(int requested_frame)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
|
|
|
|
// Allocate video frame
|
2011-10-24 08:22:21 -05:00
|
|
|
bool end_of_stream = false;
|
2012-07-03 02:59:38 -05:00
|
|
|
bool check_seek = false;
|
2012-07-03 16:58:07 -05:00
|
|
|
bool frame_finished = false;
|
|
|
|
|
int packet_error = -1;
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-08-24 17:03:23 -05:00
|
|
|
// Minimum number of packets to process (for performance reasons)
|
2013-01-25 02:24:18 -06:00
|
|
|
int packets_processed = 0;
|
2014-04-02 16:48:27 -05:00
|
|
|
int minimum_packets = OPEN_MP_NUM_PROCESSORS;
|
|
|
|
|
|
|
|
|
|
// Set the number of threads in OpenMP
|
|
|
|
|
omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
|
|
|
|
|
// Allow nested OpenMP sections
|
2012-11-12 17:21:21 -06:00
|
|
|
omp_set_nested(true);
|
2013-09-08 16:08:56 -05:00
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
2015-02-05 00:00:52 -06:00
|
|
|
#pragma omp critical (debug_output)
|
2014-08-27 09:44:27 -05:00
|
|
|
AppendDebugMethod("FFmpegReader::ReadStream", "requested_frame", requested_frame, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2012-06-18 09:26:14 -05:00
|
|
|
#pragma omp parallel
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2013-02-13 02:46:55 -06:00
|
|
|
#pragma omp single
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
|
|
|
|
// Loop through the stream until the correct frame is found
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
2012-07-03 16:58:07 -05:00
|
|
|
#pragma omp critical (packet_cache)
|
|
|
|
|
packet_error = GetNextPacket();
|
|
|
|
|
|
2013-02-15 00:23:55 -06:00
|
|
|
// Wait if too many frames are being processed
|
|
|
|
|
while (processing_video_frames.size() + processing_audio_frames.size() >= minimum_packets)
|
2015-02-05 00:00:52 -06:00
|
|
|
usleep(50);
|
2013-02-15 00:23:55 -06:00
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Get the next packet (if any)
|
2012-07-03 16:58:07 -05:00
|
|
|
if (packet_error < 0)
|
2011-10-24 08:22:21 -05:00
|
|
|
{
|
|
|
|
|
// Break loop when no more packets found
|
|
|
|
|
end_of_stream = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ReadStream (GetNextPacket)", "requested_frame", requested_frame, "processing_video_frames.size()", processing_video_frames.size(), "processing_audio_frames.size()", processing_audio_frames.size(), "minimum_packets", minimum_packets, "packets_processed", packets_processed, "", -1);
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Video packet
|
2012-07-02 00:51:10 -05:00
|
|
|
if (packet->stream_index == videoStream)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2011-10-24 08:22:21 -05:00
|
|
|
// Check the status of a seek (if any)
|
2012-08-15 17:27:14 -05:00
|
|
|
if (is_seeking)
|
|
|
|
|
#pragma omp critical (openshot_cache)
|
|
|
|
|
check_seek = CheckSeek(true);
|
|
|
|
|
else
|
|
|
|
|
check_seek = false;
|
2012-07-03 02:59:38 -05:00
|
|
|
|
2014-09-15 00:24:46 -05:00
|
|
|
if (check_seek) {
|
|
|
|
|
// Remove packet (since this packet is pointless)
|
|
|
|
|
RemoveAVPacket(packet);
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Jump to the next iteration of this loop
|
|
|
|
|
continue;
|
2014-09-15 00:24:46 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-07-03 16:58:07 -05:00
|
|
|
#pragma omp critical (packet_cache)
|
|
|
|
|
frame_finished = GetAVFrame();
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Check if the AVFrame is finished and set it
|
2012-07-03 16:58:07 -05:00
|
|
|
if (frame_finished)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2011-10-24 08:22:21 -05:00
|
|
|
// Update PTS / Frame Offset (if any)
|
|
|
|
|
UpdatePTSOffset(true);
|
2011-10-11 08:44:27 -05:00
|
|
|
|
|
|
|
|
// Process Video Packet
|
|
|
|
|
ProcessVideoPacket(requested_frame);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
// Audio packet
|
2012-07-02 00:51:10 -05:00
|
|
|
else if (packet->stream_index == audioStream)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2011-10-24 08:22:21 -05:00
|
|
|
// Check the status of a seek (if any)
|
2012-08-15 17:27:14 -05:00
|
|
|
if (is_seeking)
|
|
|
|
|
#pragma omp critical (openshot_cache)
|
|
|
|
|
check_seek = CheckSeek(false);
|
|
|
|
|
else
|
|
|
|
|
check_seek = false;
|
2012-07-03 02:59:38 -05:00
|
|
|
|
2014-09-15 00:24:46 -05:00
|
|
|
if (check_seek) {
|
|
|
|
|
// Remove packet (since this packet is pointless)
|
|
|
|
|
RemoveAVPacket(packet);
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Jump to the next iteration of this loop
|
|
|
|
|
continue;
|
2014-09-15 00:24:46 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Update PTS / Frame Offset (if any)
|
|
|
|
|
UpdatePTSOffset(false);
|
|
|
|
|
|
|
|
|
|
// Determine related video frame and starting sample # from audio PTS
|
2013-09-10 12:59:06 -05:00
|
|
|
AudioLocation location = GetAudioPTSLocation(packet->pts);
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Process Audio Packet
|
|
|
|
|
ProcessAudioPacket(requested_frame, location.frame, location.sample_start);
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Check if working frames are 'finished'
|
2012-07-03 02:42:47 -05:00
|
|
|
bool is_cache_found = false;
|
2012-06-18 09:26:14 -05:00
|
|
|
#pragma omp critical (openshot_cache)
|
2012-07-03 02:42:47 -05:00
|
|
|
{
|
|
|
|
|
if (!is_seeking)
|
|
|
|
|
CheckWorkingFrames(false);
|
2012-06-18 09:26:14 -05:00
|
|
|
|
2012-07-03 02:42:47 -05:00
|
|
|
// Check if requested 'final' frame is available
|
|
|
|
|
is_cache_found = final_cache.Exists(requested_frame);
|
2012-08-26 02:44:05 -05:00
|
|
|
|
|
|
|
|
// Increment frames processed
|
2013-01-25 02:24:18 -06:00
|
|
|
packets_processed++;
|
2012-07-03 02:42:47 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Break once the frame is found
|
2013-01-25 02:24:18 -06:00
|
|
|
if (is_cache_found && packets_processed >= minimum_packets)
|
2011-10-24 08:22:21 -05:00
|
|
|
break;
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
} // end while
|
|
|
|
|
|
2012-08-28 15:53:18 -05:00
|
|
|
} // end omp single
|
2011-10-11 08:44:27 -05:00
|
|
|
} // end omp parallel
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
2015-02-05 00:00:52 -06:00
|
|
|
AppendDebugMethod("FFmpegReader::ReadStream (Completed)", "packets_processed", packets_processed, "end_of_stream", end_of_stream, "largest_frame_processed", largest_frame_processed, "Working Cache Count", working_cache.Count(), "", -1, "", -1);
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2014-01-28 02:41:15 -06:00
|
|
|
// End of stream?
|
|
|
|
|
if (end_of_stream) {
|
|
|
|
|
// Mark the any other working frames as 'finished'
|
2011-10-26 00:34:48 -05:00
|
|
|
CheckWorkingFrames(end_of_stream);
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2014-03-21 01:25:17 -05:00
|
|
|
// Update readers video length (to a largest processed frame number)
|
|
|
|
|
info.video_length = largest_frame_processed; // just a guess, but this frame is certainly out of bounds
|
|
|
|
|
is_duration_known = largest_frame_processed;
|
2014-01-28 02:41:15 -06:00
|
|
|
}
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Return requested frame (if found)
|
|
|
|
|
if (final_cache.Exists(requested_frame))
|
|
|
|
|
// Return prepared frame
|
|
|
|
|
return final_cache.GetFrame(requested_frame);
|
2015-02-05 00:00:52 -06:00
|
|
|
else {
|
|
|
|
|
|
|
|
|
|
// Check if largest frame is still cached
|
|
|
|
|
if (final_cache.Exists(largest_frame_processed)) {
|
|
|
|
|
// return the largest processed frame (assuming it was the last in the video file)
|
|
|
|
|
return final_cache.GetFrame(largest_frame_processed);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// The largest processed frame is no longer in cache, return a blank frame
|
|
|
|
|
tr1::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
|
|
|
|
|
f->AddColor(info.width, info.height, "#000");
|
|
|
|
|
return f;
|
|
|
|
|
}
|
|
|
|
|
}
|
2014-01-28 02:41:15 -06:00
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get the next packet (if any)
|
|
|
|
|
int FFmpegReader::GetNextPacket()
|
|
|
|
|
{
|
2012-07-02 00:51:10 -05:00
|
|
|
AVPacket *next_packet = new AVPacket();
|
|
|
|
|
int found_packet = av_read_frame(pFormatCtx, next_packet);
|
|
|
|
|
|
|
|
|
|
if (found_packet >= 0)
|
|
|
|
|
{
|
|
|
|
|
// Add packet to packet cache
|
2012-07-03 16:58:07 -05:00
|
|
|
packets[next_packet] = next_packet;
|
2012-07-02 00:51:10 -05:00
|
|
|
|
|
|
|
|
// Update current packet pointer
|
2012-07-03 16:58:07 -05:00
|
|
|
packet = packets[next_packet];
|
2014-09-15 00:24:46 -05:00
|
|
|
|
2012-07-08 23:26:44 -05:00
|
|
|
}else
|
|
|
|
|
{
|
|
|
|
|
// Free packet, since it's unused
|
|
|
|
|
av_free_packet(next_packet);
|
2012-07-09 00:41:17 -05:00
|
|
|
delete next_packet;
|
2012-07-02 00:51:10 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return if packet was found (or error number)
|
|
|
|
|
return found_packet;
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get an AVFrame (if any)
|
|
|
|
|
bool FFmpegReader::GetAVFrame()
|
|
|
|
|
{
|
|
|
|
|
// Decode video frame
|
|
|
|
|
int frameFinished = 0;
|
|
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
AVFrame *next_frame = avcodec_alloc_frame();
|
|
|
|
|
avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet);
|
|
|
|
|
|
|
|
|
|
// is frame finished
|
|
|
|
|
if (frameFinished)
|
2011-12-11 20:42:50 -06:00
|
|
|
{
|
2012-10-12 00:54:53 -05:00
|
|
|
// AVFrames are clobbered on the each call to avcodec_decode_video, so we
|
|
|
|
|
// must make a copy of the image data before this method is called again.
|
|
|
|
|
AVPicture *copyFrame = new AVPicture();
|
|
|
|
|
avpicture_alloc(copyFrame, pCodecCtx->pix_fmt, info.width, info.height);
|
|
|
|
|
av_picture_copy(copyFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt, info.width, info.height);
|
|
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
// add to AVFrame cache (if frame finished)
|
2012-10-12 00:54:53 -05:00
|
|
|
frames[copyFrame] = copyFrame;
|
|
|
|
|
pFrame = frames[copyFrame];
|
2012-07-02 00:51:10 -05:00
|
|
|
|
|
|
|
|
// Detect interlaced frame (only once)
|
|
|
|
|
if (!check_interlace)
|
|
|
|
|
{
|
|
|
|
|
check_interlace = true;
|
2012-10-12 00:54:53 -05:00
|
|
|
info.interlaced_frame = next_frame->interlaced_frame;
|
|
|
|
|
info.top_field_first = next_frame->top_field_first;
|
2012-07-02 00:51:10 -05:00
|
|
|
}
|
2011-12-11 20:42:50 -06:00
|
|
|
}
|
2012-07-03 16:58:07 -05:00
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// Remove packet (since this packet is pointless)
|
|
|
|
|
RemoveAVPacket(packet);
|
|
|
|
|
}
|
2011-12-11 20:42:50 -06:00
|
|
|
|
2012-10-12 16:41:23 -05:00
|
|
|
// deallocate the frame
|
|
|
|
|
av_free(next_frame);
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Did we get a video frame?
|
|
|
|
|
return frameFinished;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check the current seek position and determine if we need to seek again
|
2011-10-24 08:22:21 -05:00
|
|
|
bool FFmpegReader::CheckSeek(bool is_video)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
|
|
|
|
// Are we seeking for a specific frame?
|
|
|
|
|
if (is_seeking)
|
|
|
|
|
{
|
2014-08-27 09:44:27 -05:00
|
|
|
// Determine if both an audio and video packet have been decoded since the seek happened.
|
|
|
|
|
// If not, allow the ReadStream method to keep looping
|
2014-09-13 16:35:11 -05:00
|
|
|
if ((is_video_seek && !seek_video_frame_found) || (!is_video_seek && !seek_audio_frame_found))
|
2014-08-27 09:44:27 -05:00
|
|
|
return false;
|
|
|
|
|
|
2014-09-26 09:35:38 -05:00
|
|
|
// Determine max seeked frame
|
|
|
|
|
int max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
|
|
|
|
|
if (seek_video_frame_found > max_seeked_frame)
|
|
|
|
|
max_seeked_frame = seek_video_frame_found;
|
2011-10-11 08:44:27 -05:00
|
|
|
|
|
|
|
|
// determine if we are "before" the requested frame
|
2014-09-26 09:35:38 -05:00
|
|
|
if (max_seeked_frame >= seeking_frame)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2012-10-12 16:41:23 -05:00
|
|
|
// SEEKED TOO FAR
|
2014-08-27 09:44:27 -05:00
|
|
|
#pragma omp critical (debug_output)
|
2015-02-05 00:00:52 -06:00
|
|
|
AppendDebugMethod("FFmpegReader::CheckSeek (Too far, seek again)", "is_video_seek", is_video_seek, "max_seeked_frame", max_seeked_frame, "seeking_frame", seeking_frame, "seeking_pts", seeking_pts, "seek_video_frame_found", seek_video_frame_found, "seek_audio_frame_found", seek_audio_frame_found);
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-10-12 16:41:23 -05:00
|
|
|
// Seek again... to the nearest Keyframe
|
|
|
|
|
Seek(seeking_frame - 10);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
2014-09-13 16:35:11 -05:00
|
|
|
// SEEK WORKED
|
2014-08-27 09:44:27 -05:00
|
|
|
#pragma omp critical (debug_output)
|
2014-09-26 09:35:38 -05:00
|
|
|
AppendDebugMethod("FFmpegReader::CheckSeek (Successful)", "is_video_seek", is_video_seek, "current_pts", packet->pts, "seeking_pts", seeking_pts, "seeking_frame", seeking_frame, "seek_video_frame_found", seek_video_frame_found, "seek_audio_frame_found", seek_audio_frame_found);
|
2014-08-27 09:44:27 -05:00
|
|
|
|
2012-10-12 16:41:23 -05:00
|
|
|
// Seek worked, and we are "before" the requested frame
|
|
|
|
|
is_seeking = false;
|
|
|
|
|
seeking_frame = 0;
|
2014-04-05 10:19:20 -05:00
|
|
|
seeking_pts = -1;
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// return the pts to seek to (if any)
|
|
|
|
|
return is_seeking;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Process a video packet
|
|
|
|
|
void FFmpegReader::ProcessVideoPacket(int requested_frame)
|
|
|
|
|
{
|
2011-10-24 08:22:21 -05:00
|
|
|
// Calculate current frame #
|
|
|
|
|
int current_frame = ConvertVideoPTStoFrame(GetVideoPTS());
|
2011-10-11 08:44:27 -05:00
|
|
|
|
|
|
|
|
// Are we close enough to decode the frame?
|
2011-10-24 08:22:21 -05:00
|
|
|
if ((current_frame) < (requested_frame - 20))
|
2012-07-01 01:43:06 -05:00
|
|
|
{
|
2012-07-03 16:58:07 -05:00
|
|
|
#pragma omp critical (packet_cache)
|
|
|
|
|
{
|
|
|
|
|
// Remove frame and packet
|
|
|
|
|
RemoveAVFrame(pFrame);
|
|
|
|
|
RemoveAVPacket(packet);
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessVideoPacket (Skipped)", "requested_frame", requested_frame, "current_frame", current_frame, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Skip to next frame without decoding or caching
|
|
|
|
|
return;
|
2012-07-01 01:43:06 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessVideoPacket (Before)", "requested_frame", requested_frame, "current_frame", current_frame, "", -1, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2012-07-01 01:43:06 -05:00
|
|
|
// Init some things local (for OpenMP)
|
2012-06-18 09:26:14 -05:00
|
|
|
PixelFormat pix_fmt = pCodecCtx->pix_fmt;
|
|
|
|
|
int height = info.height;
|
|
|
|
|
int width = info.width;
|
|
|
|
|
long int video_length = info.video_length;
|
2012-06-29 02:32:19 -05:00
|
|
|
Cache *my_cache = &working_cache;
|
2012-07-03 16:58:07 -05:00
|
|
|
AVPacket *my_packet = packets[packet];
|
2012-10-12 00:54:53 -05:00
|
|
|
AVPicture *my_frame = frames[pFrame];
|
2012-06-18 09:26:14 -05:00
|
|
|
|
2012-10-12 00:54:53 -05:00
|
|
|
// Get a scaling context
|
2012-08-15 17:27:14 -05:00
|
|
|
SwsContext *img_convert_ctx = image_rescalers[rescaler_position];
|
|
|
|
|
rescaler_position++;
|
|
|
|
|
if (rescaler_position == num_of_rescalers)
|
|
|
|
|
rescaler_position = 0;
|
|
|
|
|
|
2012-07-06 02:34:18 -05:00
|
|
|
// Add video frame to list of processing video frames
|
|
|
|
|
#pragma omp critical (processing_list)
|
|
|
|
|
processing_video_frames[current_frame] = current_frame;
|
|
|
|
|
|
2012-10-22 17:05:34 -05:00
|
|
|
// Track 1st video packet after a successful seek
|
2014-09-13 16:35:11 -05:00
|
|
|
if (!seek_video_frame_found && is_seeking)
|
2012-10-22 17:05:34 -05:00
|
|
|
seek_video_frame_found = current_frame;
|
|
|
|
|
|
2012-08-15 17:27:14 -05:00
|
|
|
#pragma omp task firstprivate(current_frame, my_cache, my_packet, my_frame, height, width, video_length, pix_fmt, img_convert_ctx)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2012-06-29 02:02:12 -05:00
|
|
|
// Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
|
2012-06-18 09:26:14 -05:00
|
|
|
AVFrame *pFrameRGB = NULL;
|
|
|
|
|
int numBytes;
|
|
|
|
|
uint8_t *buffer = NULL;
|
|
|
|
|
|
|
|
|
|
// Allocate an AVFrame structure
|
|
|
|
|
pFrameRGB = avcodec_alloc_frame();
|
|
|
|
|
if (pFrameRGB == NULL)
|
|
|
|
|
throw OutOfBoundsFrame("Convert Image Broke!", current_frame, video_length);
|
|
|
|
|
|
|
|
|
|
// Determine required buffer size and allocate buffer
|
2012-12-04 02:21:01 -06:00
|
|
|
numBytes = avpicture_get_size(PIX_FMT_RGBA, width, height);
|
2015-02-05 00:00:52 -06:00
|
|
|
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t) * 2);
|
2012-06-18 09:26:14 -05:00
|
|
|
|
|
|
|
|
// Assign appropriate parts of buffer to image planes in pFrameRGB
|
|
|
|
|
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
|
|
|
|
|
// of AVPicture
|
2012-12-04 02:21:01 -06:00
|
|
|
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGBA, width, height);
|
2012-06-18 09:26:14 -05:00
|
|
|
|
|
|
|
|
// Resize / Convert to RGB
|
2012-07-04 03:07:26 -05:00
|
|
|
sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0,
|
2012-06-18 09:26:14 -05:00
|
|
|
height, pFrameRGB->data, pFrameRGB->linesize);
|
|
|
|
|
|
2012-10-14 03:43:52 -05:00
|
|
|
tr1::shared_ptr<Frame> f;
|
2012-08-15 17:27:14 -05:00
|
|
|
#pragma omp critical (openshot_cache)
|
|
|
|
|
// Create or get frame object
|
|
|
|
|
f = CreateFrame(current_frame);
|
2012-07-04 03:07:26 -05:00
|
|
|
|
2012-08-15 17:27:14 -05:00
|
|
|
// Add Image data to frame
|
2012-12-04 02:21:01 -06:00
|
|
|
f->AddImage(width, height, "RGBA", Magick::CharPixel, buffer);
|
2012-06-18 09:26:14 -05:00
|
|
|
|
|
|
|
|
#pragma omp critical (openshot_cache)
|
|
|
|
|
// Update working cache
|
2012-08-15 17:27:14 -05:00
|
|
|
my_cache->Add(f->number, f);
|
2012-06-18 09:26:14 -05:00
|
|
|
|
|
|
|
|
// Free the RGB image
|
|
|
|
|
av_free(buffer);
|
|
|
|
|
av_free(pFrameRGB);
|
|
|
|
|
|
2012-07-03 16:58:07 -05:00
|
|
|
#pragma omp critical (packet_cache)
|
|
|
|
|
{
|
|
|
|
|
// Remove frame and packet
|
2012-07-04 03:07:26 -05:00
|
|
|
RemoveAVFrame(my_frame);
|
2012-07-03 16:58:07 -05:00
|
|
|
RemoveAVPacket(my_packet);
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-06 02:34:18 -05:00
|
|
|
// Remove video frame from list of processing video frames
|
|
|
|
|
#pragma omp critical (processing_list)
|
2014-08-27 09:44:27 -05:00
|
|
|
{
|
2012-07-06 02:34:18 -05:00
|
|
|
processing_video_frames.erase(current_frame);
|
2014-08-27 09:44:27 -05:00
|
|
|
processed_video_frames[current_frame] = current_frame;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessVideoPacket (After)", "requested_frame", requested_frame, "current_frame", current_frame, "f->number", f->number, "", -1, "", -1, "", -1);
|
2012-07-06 02:34:18 -05:00
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
} // end omp task
|
2012-06-18 09:26:14 -05:00
|
|
|
|
2012-06-29 02:02:12 -05:00
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Process an audio packet
|
2011-10-24 08:22:21 -05:00
|
|
|
void FFmpegReader::ProcessAudioPacket(int requested_frame, int target_frame, int starting_sample)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
|
|
|
|
// Are we close enough to decode the frame's audio?
|
2011-10-24 08:22:21 -05:00
|
|
|
if (target_frame < (requested_frame - 20))
|
2012-07-03 16:58:07 -05:00
|
|
|
{
|
|
|
|
|
#pragma omp critical (packet_cache)
|
2012-07-04 03:07:26 -05:00
|
|
|
// Remove packet
|
2012-07-03 16:58:07 -05:00
|
|
|
RemoveAVPacket(packet);
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Skipped)", "requested_frame", requested_frame, "target_frame", target_frame, "starting_sample", starting_sample, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// Skip to next frame without decoding or caching
|
|
|
|
|
return;
|
2012-07-03 16:58:07 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
// Init some local variables (for OpenMP)
|
|
|
|
|
Cache *my_cache = &working_cache;
|
2012-07-03 16:58:07 -05:00
|
|
|
AVPacket *my_packet = packets[packet];
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-07-08 23:26:44 -05:00
|
|
|
// Add audio frame to list of processing audio frames
|
|
|
|
|
#pragma omp critical (processing_list)
|
|
|
|
|
processing_audio_frames[target_frame] = target_frame;
|
|
|
|
|
|
2012-10-22 17:05:34 -05:00
|
|
|
// Track 1st audio packet after a successful seek
|
2014-09-13 16:35:11 -05:00
|
|
|
if (!seek_audio_frame_found && is_seeking)
|
2012-10-22 17:05:34 -05:00
|
|
|
seek_audio_frame_found = target_frame;
|
2012-08-21 15:31:52 -05:00
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Before)", "requested_frame", requested_frame, "target_frame", target_frame, "starting_sample", starting_sample, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// Init an AVFrame to hold the decoded audio samples
|
|
|
|
|
int frame_finished = 0;
|
|
|
|
|
AVFrame *audio_frame = avcodec_alloc_frame();
|
|
|
|
|
avcodec_get_frame_defaults(audio_frame);
|
|
|
|
|
|
2012-08-21 15:31:52 -05:00
|
|
|
// Allocate audio buffer
|
|
|
|
|
int16_t *audio_buf = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
|
|
|
|
int packet_samples = 0;
|
2015-02-05 00:00:52 -06:00
|
|
|
int data_size = 0;
|
2014-09-22 00:40:21 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// re-initialize buffer size (it gets changed in the avcodec_decode_audio2 method call)
|
|
|
|
|
int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
|
|
|
|
|
int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, my_packet);
|
2012-08-21 15:31:52 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
if (used <= 0) {
|
|
|
|
|
// Throw exception
|
|
|
|
|
throw ErrorDecodingAudio("Error decoding audio samples", target_frame);
|
|
|
|
|
|
|
|
|
|
} else if (frame_finished) {
|
|
|
|
|
|
|
|
|
|
// determine how many samples were decoded
|
|
|
|
|
int planar = av_sample_fmt_is_planar(aCodecCtx->sample_fmt);
|
|
|
|
|
int plane_size = -1;
|
|
|
|
|
data_size = av_samples_get_buffer_size(&plane_size,
|
|
|
|
|
aCodecCtx->channels,
|
|
|
|
|
audio_frame->nb_samples,
|
|
|
|
|
aCodecCtx->sample_fmt, 1);
|
2012-08-21 15:31:52 -05:00
|
|
|
|
|
|
|
|
// Calculate total number of samples
|
2015-02-05 00:00:52 -06:00
|
|
|
packet_samples = audio_frame->nb_samples * aCodecCtx->channels;
|
2012-08-21 15:31:52 -05:00
|
|
|
}
|
|
|
|
|
|
2014-09-22 00:40:21 -05:00
|
|
|
|
2012-11-20 10:15:39 -06:00
|
|
|
// Estimate the # of samples and the end of this packet's location (to prevent GAPS for the next timestamp)
|
2012-12-03 04:51:17 -06:00
|
|
|
int pts_remaining_samples = packet_samples / info.channels; // Adjust for zero based array
|
|
|
|
|
|
|
|
|
|
// DEBUG (FOR AUDIO ISSUES) - Get the audio packet start time (in seconds)
|
|
|
|
|
int adjusted_pts = packet->pts + audio_pts_offset;
|
|
|
|
|
double audio_seconds = double(adjusted_pts) * info.audio_timebase.ToDouble();
|
|
|
|
|
double sample_seconds = float(pts_total) / info.sample_rate;
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
{
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Decode Info A)", "pts_counter", pts_counter, "PTS", adjusted_pts, "Offset", audio_pts_offset, "PTS Diff", adjusted_pts - prev_pts, "Samples", pts_remaining_samples, "Sample PTS ratio", float(adjusted_pts - prev_pts) / pts_remaining_samples);
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessAudioPacket (Decode Info B)", "Sample Diff", pts_remaining_samples - prev_samples - prev_pts, "Total", pts_total, "PTS Seconds", audio_seconds, "Sample Seconds", sample_seconds, "Seconds Diff", audio_seconds - sample_seconds, "raw samples", packet_samples);
|
|
|
|
|
}
|
2012-12-03 04:51:17 -06:00
|
|
|
|
|
|
|
|
// DEBUG (FOR AUDIO ISSUES)
|
|
|
|
|
prev_pts = adjusted_pts;
|
|
|
|
|
pts_total += pts_remaining_samples;
|
|
|
|
|
pts_counter++;
|
|
|
|
|
prev_samples = pts_remaining_samples;
|
|
|
|
|
|
|
|
|
|
|
2012-11-20 10:15:39 -06:00
|
|
|
while (pts_remaining_samples)
|
|
|
|
|
{
|
|
|
|
|
// Get Samples per frame (for this frame number)
|
2014-01-28 17:17:38 -06:00
|
|
|
int samples_per_frame = Frame::GetSamplesPerFrame(previous_packet_location.frame, info.fps, info.sample_rate);
|
2012-11-20 10:15:39 -06:00
|
|
|
|
|
|
|
|
// Calculate # of samples to add to this frame
|
|
|
|
|
int samples = samples_per_frame - previous_packet_location.sample_start;
|
|
|
|
|
if (samples > pts_remaining_samples)
|
|
|
|
|
samples = pts_remaining_samples;
|
|
|
|
|
|
|
|
|
|
// Decrement remaining samples
|
|
|
|
|
pts_remaining_samples -= samples;
|
|
|
|
|
|
|
|
|
|
if (pts_remaining_samples > 0) {
|
|
|
|
|
// next frame
|
|
|
|
|
previous_packet_location.frame++;
|
|
|
|
|
previous_packet_location.sample_start = 0;
|
|
|
|
|
} else {
|
|
|
|
|
// Increment sample start
|
|
|
|
|
previous_packet_location.sample_start += samples;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
2012-08-21 15:31:52 -05:00
|
|
|
#pragma omp critical (packet_cache)
|
2014-09-22 00:40:21 -05:00
|
|
|
RemoveAVPacket(my_packet);
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// TODO: Disable OpenMP on audio packet processing. It is not currently possible to reassemble the packets
|
|
|
|
|
// in order without creating small gaps and/or overlapping sample values.
|
|
|
|
|
#pragma xxx omp task firstprivate(requested_frame, target_frame, my_cache, starting_sample, audio_buf)
|
2012-08-21 15:31:52 -05:00
|
|
|
{
|
2012-08-21 21:51:00 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessAudioPacket (ReSample)", "packet_samples", packet_samples, "info.channels", info.channels, "info.sample_rate", info.sample_rate, "aCodecCtx->sample_fmt", aCodecCtx->sample_fmt, "AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16, "", -1);
|
2012-08-21 21:51:00 -05:00
|
|
|
|
|
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// Create output frame
|
|
|
|
|
AVFrame *audio_converted = avcodec_alloc_frame();
|
|
|
|
|
avcodec_get_frame_defaults(audio_converted);
|
|
|
|
|
audio_converted->nb_samples = audio_frame->nb_samples;
|
|
|
|
|
av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 1);
|
2011-12-18 02:29:34 -06:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// setup resample context
|
|
|
|
|
AVAudioResampleContext *avr = avresample_alloc_context();
|
|
|
|
|
av_opt_set_int(avr, "in_channel_layout", aCodecCtx->channel_layout, 0);
|
|
|
|
|
av_opt_set_int(avr, "out_channel_layout", aCodecCtx->channel_layout, 0);
|
|
|
|
|
av_opt_set_int(avr, "in_sample_fmt", aCodecCtx->sample_fmt, 0);
|
|
|
|
|
av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
|
|
|
|
av_opt_set_int(avr, "in_sample_rate", info.sample_rate, 0);
|
|
|
|
|
av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
|
|
|
|
|
av_opt_set_int(avr, "in_channels", info.channels, 0);
|
|
|
|
|
av_opt_set_int(avr, "out_channels", info.channels, 0);
|
|
|
|
|
int r = avresample_open(avr);
|
|
|
|
|
int nb_samples = 0;
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// Convert audio samples
|
|
|
|
|
nb_samples = avresample_convert(avr, // audio resample context
|
|
|
|
|
audio_converted->data, // output data pointers
|
|
|
|
|
audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
|
|
|
|
|
audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
|
|
|
|
|
audio_frame->data, // input data pointers
|
|
|
|
|
audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
|
|
|
|
|
audio_frame->nb_samples); // number of input samples to convert
|
|
|
|
|
|
|
|
|
|
// Copy audio samples over original samples
|
|
|
|
|
memcpy(audio_buf, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels);
|
|
|
|
|
|
|
|
|
|
// Deallocate resample buffer
|
|
|
|
|
avresample_close(avr);
|
|
|
|
|
avresample_free(&avr);
|
|
|
|
|
avr = NULL;
|
|
|
|
|
|
|
|
|
|
// Free frames
|
|
|
|
|
avcodec_free_frame(&audio_frame);
|
|
|
|
|
avcodec_free_frame(&audio_converted);
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2012-12-03 04:51:17 -06:00
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
int starting_frame_number = -1;
|
2015-02-05 00:00:52 -06:00
|
|
|
bool partial_frame = true;
|
2012-07-02 00:51:10 -05:00
|
|
|
for (int channel_filter = 0; channel_filter < info.channels; channel_filter++)
|
2011-10-24 08:22:21 -05:00
|
|
|
{
|
2012-07-02 00:51:10 -05:00
|
|
|
// Array of floats (to hold samples for each channel)
|
|
|
|
|
starting_frame_number = target_frame;
|
2012-12-03 04:51:17 -06:00
|
|
|
int channel_buffer_size = packet_samples / info.channels;
|
2012-08-21 02:12:35 -05:00
|
|
|
float *channel_buffer = new float[channel_buffer_size];
|
2012-02-26 16:40:53 -06:00
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
// Init buffer array
|
|
|
|
|
for (int z = 0; z < channel_buffer_size; z++)
|
|
|
|
|
channel_buffer[z] = 0.0f;
|
|
|
|
|
|
|
|
|
|
// Loop through all samples and add them to our Frame based on channel.
|
|
|
|
|
// Toggle through each channel number, since channel data is stored like (left right left right)
|
|
|
|
|
int channel = 0;
|
|
|
|
|
int position = 0;
|
|
|
|
|
for (int sample = 0; sample < packet_samples; sample++)
|
2012-06-29 02:02:12 -05:00
|
|
|
{
|
2012-07-02 00:51:10 -05:00
|
|
|
// Only add samples for current channel
|
|
|
|
|
if (channel_filter == channel)
|
|
|
|
|
{
|
|
|
|
|
// Add sample (convert from (-32768 to 32768) to (-1.0 to 1.0))
|
|
|
|
|
channel_buffer[position] = audio_buf[sample] * (1.0f / (1 << 15));
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
// Increment audio position
|
|
|
|
|
position++;
|
|
|
|
|
}
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
// increment channel (if needed)
|
|
|
|
|
if ((channel + 1) < info.channels)
|
|
|
|
|
// move to next channel
|
|
|
|
|
channel ++;
|
|
|
|
|
else
|
|
|
|
|
// reset channel
|
|
|
|
|
channel = 0;
|
2012-06-29 02:02:12 -05:00
|
|
|
}
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
// Loop through samples, and add them to the correct frames
|
|
|
|
|
int start = starting_sample;
|
|
|
|
|
int remaining_samples = channel_buffer_size;
|
2012-07-03 02:42:47 -05:00
|
|
|
float *iterate_channel_buffer = channel_buffer; // pointer to channel buffer
|
2012-07-02 00:51:10 -05:00
|
|
|
while (remaining_samples > 0)
|
|
|
|
|
{
|
|
|
|
|
// Get Samples per frame (for this frame number)
|
2014-01-28 17:17:38 -06:00
|
|
|
int samples_per_frame = Frame::GetSamplesPerFrame(starting_frame_number, info.fps, info.sample_rate);
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2012-07-06 16:52:13 -05:00
|
|
|
// Calculate # of samples to add to this frame
|
|
|
|
|
int samples = samples_per_frame - start;
|
|
|
|
|
if (samples > remaining_samples)
|
|
|
|
|
samples = remaining_samples;
|
|
|
|
|
|
2012-07-08 23:26:44 -05:00
|
|
|
// Add audio frame to list of processing audio frames
|
2012-07-06 02:34:18 -05:00
|
|
|
#pragma omp critical (processing_list)
|
|
|
|
|
processing_audio_frames[starting_frame_number] = starting_frame_number;
|
|
|
|
|
|
2012-10-14 03:43:52 -05:00
|
|
|
tr1::shared_ptr<Frame> f;
|
2012-07-02 00:51:10 -05:00
|
|
|
#pragma omp critical (openshot_cache)
|
|
|
|
|
// Create or get frame object
|
2012-08-15 17:27:14 -05:00
|
|
|
f = CreateFrame(starting_frame_number);
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2013-10-17 14:53:00 -05:00
|
|
|
// Add samples for current channel to the frame. Reduce the volume to 98%, to prevent
|
|
|
|
|
// some louder samples from maxing out at 1.0 (not sure why this happens)
|
|
|
|
|
f->AddAudio(true, channel_filter, start, iterate_channel_buffer, samples, 0.98f);
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// Determine if this frame was "partially" filled in
|
|
|
|
|
if (samples_per_frame == start + samples)
|
|
|
|
|
partial_frame = false;
|
|
|
|
|
else
|
|
|
|
|
partial_frame = true;
|
|
|
|
|
|
|
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessAudioPacket (f->AddAudio)", "frame", starting_frame_number, "start", start, "samples", samples, "channel", channel_filter, "partial_frame", partial_frame, "samples_per_frame", samples_per_frame);
|
|
|
|
|
|
2012-08-15 17:27:14 -05:00
|
|
|
#pragma omp critical (openshot_cache)
|
2012-07-02 00:51:10 -05:00
|
|
|
// Add or update cache
|
2012-08-15 17:27:14 -05:00
|
|
|
my_cache->Add(f->number, f);
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2012-07-06 16:52:13 -05:00
|
|
|
// Decrement remaining samples
|
|
|
|
|
remaining_samples -= samples;
|
|
|
|
|
|
|
|
|
|
// Increment buffer (to next set of samples)
|
|
|
|
|
if (remaining_samples > 0)
|
|
|
|
|
iterate_channel_buffer += samples;
|
|
|
|
|
|
|
|
|
|
// Increment frame number
|
|
|
|
|
starting_frame_number++;
|
|
|
|
|
|
2012-07-02 00:51:10 -05:00
|
|
|
// Reset starting sample #
|
|
|
|
|
start = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// clear channel buffer
|
2012-07-08 23:26:44 -05:00
|
|
|
delete[] channel_buffer;
|
2012-07-02 00:51:10 -05:00
|
|
|
channel_buffer = NULL;
|
|
|
|
|
iterate_channel_buffer = NULL;
|
2011-10-24 08:22:21 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
|
2012-07-04 03:07:26 -05:00
|
|
|
// Clean up some arrays
|
2012-07-08 23:26:44 -05:00
|
|
|
delete[] audio_buf;
|
2012-07-04 03:07:26 -05:00
|
|
|
audio_buf = NULL;
|
2012-07-03 16:58:07 -05:00
|
|
|
|
2012-07-06 02:34:18 -05:00
|
|
|
// Add video frame to list of processing video frames
|
|
|
|
|
#pragma omp critical (processing_list)
|
|
|
|
|
{
|
|
|
|
|
// Update all frames as completed
|
2014-08-27 09:44:27 -05:00
|
|
|
for (int f = target_frame; f < starting_frame_number; f++) {
|
2015-02-05 00:00:52 -06:00
|
|
|
if (f == (starting_frame_number - 1) && partial_frame)
|
|
|
|
|
// ignore partial frames (always the last frame processed)
|
|
|
|
|
break;
|
2012-07-06 02:34:18 -05:00
|
|
|
processing_audio_frames.erase(f);
|
2014-08-27 09:44:27 -05:00
|
|
|
processed_audio_frames[f] = f;
|
|
|
|
|
}
|
2012-07-06 02:34:18 -05:00
|
|
|
}
|
2012-08-21 15:31:52 -05:00
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::ProcessAudioPacket (After)", "requested_frame", requested_frame, "starting_frame", target_frame, "end_frame", starting_frame_number, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2012-08-21 15:31:52 -05:00
|
|
|
} // end task
|
2012-07-08 23:26:44 -05:00
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Seek to a specific frame. This is not always frame accurate, it's more of an estimation on many codecs.
|
2012-10-10 17:27:46 -05:00
|
|
|
void FFmpegReader::Seek(int requested_frame) throw(TooManySeeks)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
|
|
|
|
// Adjust for a requested frame that is too small or too large
|
|
|
|
|
if (requested_frame < 1)
|
|
|
|
|
requested_frame = 1;
|
|
|
|
|
if (requested_frame > info.video_length)
|
|
|
|
|
requested_frame = info.video_length;
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::Seek", "requested_frame", requested_frame, "seek_count", seek_count, "last_frame", last_frame, "", -1, "", -1, "", -1);
|
|
|
|
|
|
2011-10-14 09:47:05 -05:00
|
|
|
// Clear working cache (since we are seeking to another location in the file)
|
|
|
|
|
working_cache.Clear();
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Clear processed lists
|
2015-02-05 00:00:52 -06:00
|
|
|
processing_audio_frames.clear();
|
|
|
|
|
processing_video_frames.clear();
|
2014-08-27 09:44:27 -05:00
|
|
|
processed_video_frames.clear();
|
|
|
|
|
processed_audio_frames.clear();
|
|
|
|
|
|
2012-07-06 15:17:57 -05:00
|
|
|
// Reset the last frame variable
|
|
|
|
|
last_frame = 0;
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2012-10-10 17:27:46 -05:00
|
|
|
// Increment seek count
|
|
|
|
|
seek_count++;
|
|
|
|
|
|
|
|
|
|
// too many seeks
|
|
|
|
|
if (seek_count > 10)
|
|
|
|
|
throw TooManySeeks("Too many seek attempts... something seems wrong.", path);
|
|
|
|
|
|
2011-10-11 08:44:27 -05:00
|
|
|
// If seeking to frame 1, we need to close and re-open the file (this is more reliable than seeking)
|
2014-04-03 22:35:25 -05:00
|
|
|
int buffer_amount = 6;
|
2012-10-12 16:41:23 -05:00
|
|
|
if (requested_frame - buffer_amount <= 1)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
|
|
|
|
// Close and re-open file (basically seeking to frame 1)
|
2012-10-14 21:09:22 -05:00
|
|
|
Close();
|
2011-10-11 08:44:27 -05:00
|
|
|
Open();
|
|
|
|
|
|
|
|
|
|
// Not actually seeking, so clear these flags
|
|
|
|
|
is_seeking = false;
|
2012-10-10 17:27:46 -05:00
|
|
|
seeking_frame = 1;
|
2011-10-24 08:22:21 -05:00
|
|
|
seeking_pts = ConvertFrameToVideoPTS(1);
|
2014-09-13 16:35:11 -05:00
|
|
|
seek_audio_frame_found = 0; // used to detect which frames to throw away after a seek
|
|
|
|
|
seek_video_frame_found = 0; // used to detect which frames to throw away after a seek
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// Seek to nearest key-frame (aka, i-frame)
|
2014-04-05 10:19:20 -05:00
|
|
|
bool seek_worked = false;
|
2012-07-09 00:41:17 -05:00
|
|
|
|
|
|
|
|
// Seek video stream (if any)
|
2012-10-12 16:41:23 -05:00
|
|
|
int64_t seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
|
2014-04-05 10:19:20 -05:00
|
|
|
if (info.has_video) {
|
|
|
|
|
if (av_seek_frame(pFormatCtx, info.video_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
|
|
|
|
|
fprintf(stderr, "%s: error while seeking video stream\n", pFormatCtx->filename);
|
|
|
|
|
} else
|
|
|
|
|
{
|
|
|
|
|
// VIDEO SEEK
|
|
|
|
|
is_video_seek = true;
|
|
|
|
|
seek_worked = true;
|
|
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
2012-07-09 00:41:17 -05:00
|
|
|
|
|
|
|
|
// Seek audio stream (if not already seeked... and if an audio stream is found)
|
2013-02-15 00:23:55 -06:00
|
|
|
if (!seek_worked && info.has_audio)
|
2012-07-09 00:41:17 -05:00
|
|
|
{
|
2013-01-25 02:24:18 -06:00
|
|
|
seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
|
2014-04-05 10:19:20 -05:00
|
|
|
|
2013-01-25 02:24:18 -06:00
|
|
|
if (info.has_audio && av_seek_frame(pFormatCtx, info.audio_stream_index, seek_target, AVSEEK_FLAG_BACKWARD) < 0) {
|
|
|
|
|
fprintf(stderr, "%s: error while seeking audio stream\n", pFormatCtx->filename);
|
|
|
|
|
} else
|
|
|
|
|
{
|
|
|
|
|
// AUDIO SEEK
|
|
|
|
|
is_video_seek = false;
|
2014-04-05 10:19:20 -05:00
|
|
|
seek_worked = true;
|
2013-01-25 02:24:18 -06:00
|
|
|
}
|
2012-07-09 00:41:17 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Was the seek successful?
|
|
|
|
|
if (seek_worked)
|
2011-12-11 20:42:50 -06:00
|
|
|
{
|
2012-10-12 00:54:53 -05:00
|
|
|
// Flush audio buffer
|
2012-10-14 02:36:05 -05:00
|
|
|
if (info.has_audio)
|
|
|
|
|
avcodec_flush_buffers(aCodecCtx);
|
2012-10-12 00:54:53 -05:00
|
|
|
|
|
|
|
|
// Flush video buffer
|
2012-10-14 02:36:05 -05:00
|
|
|
if (info.has_video)
|
|
|
|
|
avcodec_flush_buffers(pCodecCtx);
|
2012-10-12 00:54:53 -05:00
|
|
|
|
2013-01-25 02:24:18 -06:00
|
|
|
// Reset previous audio location to zero
|
|
|
|
|
previous_packet_location.frame = -1;
|
|
|
|
|
previous_packet_location.sample_start = 0;
|
|
|
|
|
|
2012-10-10 17:27:46 -05:00
|
|
|
// init seek flags
|
|
|
|
|
is_seeking = true;
|
2015-02-05 00:00:52 -06:00
|
|
|
seeking_pts = seek_target;
|
|
|
|
|
seeking_frame = requested_frame;
|
|
|
|
|
seek_audio_frame_found = 0; // used to detect which frames to throw away after a seek
|
|
|
|
|
seek_video_frame_found = 0; // used to detect which frames to throw away after a seek
|
|
|
|
|
|
2012-10-10 17:27:46 -05:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// seek failed
|
|
|
|
|
is_seeking = false;
|
|
|
|
|
seeking_pts = 0;
|
|
|
|
|
seeking_frame = 0;
|
2011-12-11 20:42:50 -06:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Get the PTS for the current video packet
|
|
|
|
|
int FFmpegReader::GetVideoPTS()
|
|
|
|
|
{
|
2011-12-11 20:42:50 -06:00
|
|
|
int current_pts = 0;
|
2012-07-02 00:51:10 -05:00
|
|
|
if(packet->dts != AV_NOPTS_VALUE)
|
|
|
|
|
current_pts = packet->dts;
|
2011-10-24 08:22:21 -05:00
|
|
|
|
|
|
|
|
// Return adjusted PTS
|
|
|
|
|
return current_pts;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update PTS Offset (if any)
|
|
|
|
|
void FFmpegReader::UpdatePTSOffset(bool is_video)
|
|
|
|
|
{
|
|
|
|
|
// Determine the offset between the PTS and Frame number (only for 1st frame)
|
|
|
|
|
if (is_video)
|
|
|
|
|
{
|
|
|
|
|
// VIDEO PACKET
|
2011-12-11 20:42:50 -06:00
|
|
|
if (video_pts_offset == 99999) // Has the offset been set yet?
|
2011-10-24 08:22:21 -05:00
|
|
|
// Find the difference between PTS and frame number
|
2011-11-07 17:12:25 -06:00
|
|
|
video_pts_offset = 0 - GetVideoPTS();
|
2011-10-24 08:22:21 -05:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// AUDIO PACKET
|
2011-12-11 20:42:50 -06:00
|
|
|
if (audio_pts_offset == 99999) // Has the offset been set yet?
|
2011-10-24 08:22:21 -05:00
|
|
|
// Find the difference between PTS and frame number
|
2012-07-02 00:51:10 -05:00
|
|
|
audio_pts_offset = 0 - packet->pts;
|
2011-10-24 08:22:21 -05:00
|
|
|
}
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Convert PTS into Frame Number
|
2011-10-24 08:22:21 -05:00
|
|
|
int FFmpegReader::ConvertVideoPTStoFrame(int pts)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2011-12-11 20:42:50 -06:00
|
|
|
// Apply PTS offset
|
|
|
|
|
pts = pts + video_pts_offset;
|
|
|
|
|
|
2011-11-07 17:12:25 -06:00
|
|
|
// Get the video packet start time (in seconds)
|
2011-12-11 20:42:50 -06:00
|
|
|
double video_seconds = double(pts) * info.video_timebase.ToDouble();
|
2011-11-07 17:12:25 -06:00
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
|
|
|
|
|
int frame = round(video_seconds * info.fps.ToDouble()) + 1;
|
2011-11-07 17:12:25 -06:00
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Return frame #
|
2011-11-07 17:12:25 -06:00
|
|
|
return frame;
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Convert Frame Number into Video PTS
|
|
|
|
|
int FFmpegReader::ConvertFrameToVideoPTS(int frame_number)
|
2011-10-11 08:44:27 -05:00
|
|
|
{
|
2011-11-07 17:12:25 -06:00
|
|
|
// Get timestamp of this frame (in seconds)
|
2011-12-11 20:42:50 -06:00
|
|
|
double seconds = double(frame_number) / info.fps.ToDouble();
|
2011-11-07 17:12:25 -06:00
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Calculate the # of video packets in this timestamp
|
|
|
|
|
int video_pts = round(seconds / info.video_timebase.ToDouble());
|
2011-11-07 17:12:25 -06:00
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Apply PTS offset (opposite)
|
2011-11-07 17:12:25 -06:00
|
|
|
return video_pts - video_pts_offset;
|
2011-10-11 08:44:27 -05:00
|
|
|
}
|
|
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Convert Frame Number into Video PTS
|
2011-10-24 08:22:21 -05:00
|
|
|
int FFmpegReader::ConvertFrameToAudioPTS(int frame_number)
|
|
|
|
|
{
|
2011-11-07 17:12:25 -06:00
|
|
|
// Get timestamp of this frame (in seconds)
|
2011-12-11 20:42:50 -06:00
|
|
|
double seconds = double(frame_number) / info.fps.ToDouble();
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Calculate the # of audio packets in this timestamp
|
|
|
|
|
int audio_pts = round(seconds / info.audio_timebase.ToDouble());
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Apply PTS offset (opposite)
|
2011-10-24 08:22:21 -05:00
|
|
|
return audio_pts - audio_pts_offset;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Calculate Starting video frame and sample # for an audio PTS
|
2013-09-10 12:59:06 -05:00
|
|
|
AudioLocation FFmpegReader::GetAudioPTSLocation(int pts)
|
2011-10-14 09:47:05 -05:00
|
|
|
{
|
2011-12-11 20:42:50 -06:00
|
|
|
// Apply PTS offset
|
|
|
|
|
pts = pts + audio_pts_offset;
|
2011-10-14 09:47:05 -05:00
|
|
|
|
2011-12-11 20:42:50 -06:00
|
|
|
// Get the audio packet start time (in seconds)
|
|
|
|
|
double audio_seconds = double(pts) * info.audio_timebase.ToDouble();
|
|
|
|
|
|
|
|
|
|
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
|
|
|
|
|
double frame = (audio_seconds * info.fps.ToDouble()) + 1;
|
2011-10-24 08:22:21 -05:00
|
|
|
|
|
|
|
|
// Frame # as a whole number (no more decimals)
|
|
|
|
|
int whole_frame = int(frame);
|
|
|
|
|
|
|
|
|
|
// Remove the whole number, and only get the decimal of the frame
|
|
|
|
|
double sample_start_percentage = frame - double(whole_frame);
|
|
|
|
|
|
|
|
|
|
// Get Samples per frame
|
2014-01-28 17:17:38 -06:00
|
|
|
int samples_per_frame = Frame::GetSamplesPerFrame(whole_frame, info.fps, info.sample_rate);
|
2011-11-07 17:12:25 -06:00
|
|
|
|
|
|
|
|
// Calculate the sample # to start on
|
2011-10-26 14:34:14 -05:00
|
|
|
int sample_start = round(double(samples_per_frame) * sample_start_percentage);
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2012-12-03 04:51:17 -06:00
|
|
|
// Protect against broken (i.e. negative) timestamps
|
|
|
|
|
if (whole_frame < 1)
|
|
|
|
|
whole_frame = 1;
|
|
|
|
|
if (sample_start < 0)
|
|
|
|
|
sample_start = 0;
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Prepare final audio packet location
|
2013-09-10 12:59:06 -05:00
|
|
|
AudioLocation location = {whole_frame, sample_start};
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2012-11-20 10:15:39 -06:00
|
|
|
// Compare to previous audio packet (and fix small gaps due to varying PTS timestamps)
|
2015-02-05 00:00:52 -06:00
|
|
|
if (previous_packet_location.frame != -1 && location.is_near(previous_packet_location, samples_per_frame, samples_per_frame))
|
2012-11-20 10:15:39 -06:00
|
|
|
{
|
2012-11-20 16:22:50 -06:00
|
|
|
int orig_frame = location.frame;
|
|
|
|
|
int orig_start = location.sample_start;
|
2012-11-20 10:15:39 -06:00
|
|
|
|
|
|
|
|
// Update sample start, to prevent gaps in audio
|
2013-10-17 14:53:00 -05:00
|
|
|
if (previous_packet_location.sample_start <= samples_per_frame)
|
2012-12-03 04:51:17 -06:00
|
|
|
{
|
|
|
|
|
location.sample_start = previous_packet_location.sample_start;
|
|
|
|
|
location.frame = previous_packet_location.frame;
|
|
|
|
|
}
|
2012-11-20 16:22:50 -06:00
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// set to next frame (since we exceeded the # of samples on a frame)
|
|
|
|
|
location.sample_start = 0;
|
|
|
|
|
location.frame++;
|
|
|
|
|
}
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::GetAudioPTSLocation (Audio Gap Detected)", "Source Frame", orig_frame, "Source Audio Sample", orig_start, "Target Frame", location.frame, "Target Audio Sample", location.sample_start, "pts", pts, "", -1);
|
|
|
|
|
|
2012-11-20 10:15:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set previous location
|
|
|
|
|
previous_packet_location = location;
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Return the associated video frame and starting sample #
|
|
|
|
|
return location;
|
2011-10-14 09:47:05 -05:00
|
|
|
}
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Create a new Frame (or return an existing one) and add it to the working queue.
|
2012-10-14 03:43:52 -05:00
|
|
|
tr1::shared_ptr<Frame> FFmpegReader::CreateFrame(int requested_frame)
|
2011-10-24 08:22:21 -05:00
|
|
|
{
|
|
|
|
|
// Check working cache
|
|
|
|
|
if (working_cache.Exists(requested_frame))
|
2012-07-01 01:43:06 -05:00
|
|
|
{
|
2011-10-24 08:22:21 -05:00
|
|
|
// Return existing frame
|
2012-10-14 03:43:52 -05:00
|
|
|
tr1::shared_ptr<Frame> output = working_cache.GetFrame(requested_frame);
|
2012-07-01 01:43:06 -05:00
|
|
|
|
|
|
|
|
return output;
|
|
|
|
|
}
|
2011-10-24 08:22:21 -05:00
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// Create a new frame on the working cache
|
2014-01-29 00:18:40 -06:00
|
|
|
tr1::shared_ptr<Frame> f(new Frame(requested_frame, info.width, info.height, "#000000", Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate), info.channels));
|
2015-02-05 00:00:52 -06:00
|
|
|
f->SetPixelRatio(info.pixel_ratio.num, info.pixel_ratio.den); // update pixel ratio
|
|
|
|
|
f->ChannelsLayout(info.channel_layout); // update audio channel layout from the parent reader
|
|
|
|
|
f->SampleRate(info.sample_rate); // update the frame's sample rate of the parent reader
|
2012-07-02 00:51:10 -05:00
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
working_cache.Add(requested_frame, f);
|
|
|
|
|
|
2014-03-21 01:25:17 -05:00
|
|
|
// Set the largest processed frame (if this is larger)
|
|
|
|
|
if (requested_frame > largest_frame_processed)
|
|
|
|
|
largest_frame_processed = requested_frame;
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Return new frame
|
|
|
|
|
return f;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-13 16:35:11 -05:00
|
|
|
// Determine if frame is partial due to seek
|
|
|
|
|
bool FFmpegReader::IsPartialFrame(int requested_frame) {
|
|
|
|
|
|
|
|
|
|
// Sometimes a seek gets partial frames, and we need to remove them
|
|
|
|
|
bool seek_trash = false;
|
|
|
|
|
int max_seeked_frame = seek_audio_frame_found; // determine max seeked frame
|
|
|
|
|
if (seek_video_frame_found > max_seeked_frame)
|
|
|
|
|
max_seeked_frame = seek_video_frame_found;
|
2014-09-26 09:35:38 -05:00
|
|
|
if ((info.has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
|
|
|
|
|
(info.has_video && seek_video_frame_found && max_seeked_frame >= requested_frame))
|
2014-09-13 16:35:11 -05:00
|
|
|
seek_trash = true;
|
|
|
|
|
|
|
|
|
|
return seek_trash;
|
|
|
|
|
}
|
|
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Check the working queue, and move finished frames to the finished queue
|
|
|
|
|
void FFmpegReader::CheckWorkingFrames(bool end_of_stream)
|
|
|
|
|
{
|
2014-04-05 10:19:20 -05:00
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Loop through all working queue frames
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
|
|
|
|
// Break if no working frames
|
|
|
|
|
if (working_cache.Count() == 0)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
// Get the front frame of working cache
|
2012-10-14 03:43:52 -05:00
|
|
|
tr1::shared_ptr<Frame> f(working_cache.GetSmallestFrame());
|
2011-10-24 08:22:21 -05:00
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
bool is_video_ready = processed_video_frames.count(f->number);
|
|
|
|
|
bool is_audio_ready = processed_audio_frames.count(f->number);
|
2014-09-13 16:35:11 -05:00
|
|
|
bool is_seek_trash = IsPartialFrame(f->number);
|
2014-08-27 09:44:27 -05:00
|
|
|
|
2015-02-05 00:00:52 -06:00
|
|
|
// Adjust for available streams
|
|
|
|
|
if (!info.has_video) is_video_ready = true;
|
|
|
|
|
if (!info.has_audio) is_audio_ready = true;
|
|
|
|
|
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
|
|
|
|
AppendDebugMethod("FFmpegReader::CheckWorkingFrames", "frame_number", f->number, "is_video_ready", is_video_ready, "is_audio_ready", is_audio_ready, "processed_video_frames.count(f->number)", processed_video_frames.count(f->number), "processed_audio_frames.count(f->number)", processed_audio_frames.count(f->number), "", -1);
|
2012-07-06 16:52:13 -05:00
|
|
|
|
2011-10-24 08:22:21 -05:00
|
|
|
// Check if working frame is final
|
2014-09-13 16:35:11 -05:00
|
|
|
if ((!end_of_stream && is_video_ready && is_audio_ready) || end_of_stream || is_seek_trash || working_cache.Count() >= 200)
|
2011-10-24 08:22:21 -05:00
|
|
|
{
|
2014-08-27 09:44:27 -05:00
|
|
|
// Debug output
|
|
|
|
|
#pragma omp critical (debug_output)
|
2014-09-13 16:35:11 -05:00
|
|
|
AppendDebugMethod("FFmpegReader::CheckWorkingFrames (mark frame as final)", "f->number", f->number, "is_seek_trash", is_seek_trash, "Working Cache Count", working_cache.Count(), "Final Cache Count", final_cache.Count(), "", -1, "", -1);
|
2014-08-27 09:44:27 -05:00
|
|
|
|
2014-09-13 16:35:11 -05:00
|
|
|
if (!is_seek_trash)
|
2012-10-22 17:05:34 -05:00
|
|
|
{
|
|
|
|
|
// Move frame to final cache
|
|
|
|
|
final_cache.Add(f->number, f);
|
2012-07-06 15:17:57 -05:00
|
|
|
|
2012-10-22 17:05:34 -05:00
|
|
|
// Remove frame from working cache
|
|
|
|
|
working_cache.Remove(f->number);
|
|
|
|
|
|
|
|
|
|
// Update last frame processed
|
|
|
|
|
last_frame = f->number;
|
2014-04-05 10:19:20 -05:00
|
|
|
|
2012-10-22 17:05:34 -05:00
|
|
|
} else {
|
|
|
|
|
// Seek trash, so delete the frame from the working cache, and never add it to the final cache.
|
|
|
|
|
working_cache.Remove(f->number);
|
|
|
|
|
}
|
2011-10-24 08:22:21 -05:00
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
// Stop looping
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-15 16:11:48 -06:00
|
|
|
// Check for the correct frames per second (FPS) value by scanning the 1st few seconds of video packets.
|
|
|
|
|
void FFmpegReader::CheckFPS()
|
|
|
|
|
{
|
|
|
|
|
check_fps = true;
|
2012-10-12 00:54:53 -05:00
|
|
|
avpicture_alloc(pFrame, pCodecCtx->pix_fmt, info.width, info.height);
|
2011-12-15 16:11:48 -06:00
|
|
|
|
|
|
|
|
int first_second_counter = 0;
|
|
|
|
|
int second_second_counter = 0;
|
|
|
|
|
int third_second_counter = 0;
|
|
|
|
|
int forth_second_counter = 0;
|
|
|
|
|
int fifth_second_counter = 0;
|
|
|
|
|
|
2012-07-03 02:42:47 -05:00
|
|
|
int iterations = 0;
|
|
|
|
|
int threshold = 500;
|
|
|
|
|
|
2011-12-15 16:11:48 -06:00
|
|
|
// Loop through the stream
|
|
|
|
|
while (true)
|
|
|
|
|
{
|
|
|
|
|
// Get the next packet (if any)
|
|
|
|
|
if (GetNextPacket() < 0)
|
|
|
|
|
// Break loop when no more packets found
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
// Video packet
|
2012-07-02 00:51:10 -05:00
|
|
|
if (packet->stream_index == videoStream)
|
2011-12-15 16:11:48 -06:00
|
|
|
{
|
|
|
|
|
// Check if the AVFrame is finished and set it
|
|
|
|
|
if (GetAVFrame())
|
|
|
|
|
{
|
|
|
|
|
// Update PTS / Frame Offset (if any)
|
|
|
|
|
UpdatePTSOffset(true);
|
|
|
|
|
|
|
|
|
|
// Get PTS of this packet
|
|
|
|
|
int pts = GetVideoPTS();
|
|
|
|
|
|
2012-07-03 16:58:07 -05:00
|
|
|
// Remove pFrame
|
|
|
|
|
RemoveAVFrame(pFrame);
|
|
|
|
|
|
|
|
|
|
// remove packet
|
|
|
|
|
RemoveAVPacket(packet);
|
|
|
|
|
|
2011-12-15 16:11:48 -06:00
|
|
|
// Apply PTS offset
|
|
|
|
|
pts += video_pts_offset;
|
|
|
|
|
|
|
|
|
|
// Get the video packet start time (in seconds)
|
|
|
|
|
double video_seconds = double(pts) * info.video_timebase.ToDouble();
|
|
|
|
|
|
|
|
|
|
// Increment the correct counter
|
|
|
|
|
if (video_seconds <= 1.0)
|
|
|
|
|
first_second_counter++;
|
|
|
|
|
else if (video_seconds > 1.0 && video_seconds <= 2.0)
|
|
|
|
|
second_second_counter++;
|
|
|
|
|
else if (video_seconds > 2.0 && video_seconds <= 3.0)
|
|
|
|
|
third_second_counter++;
|
|
|
|
|
else if (video_seconds > 3.0 && video_seconds <= 4.0)
|
|
|
|
|
forth_second_counter++;
|
|
|
|
|
else if (video_seconds > 4.0 && video_seconds <= 5.0)
|
|
|
|
|
fifth_second_counter++;
|
|
|
|
|
else
|
|
|
|
|
// Too far
|
|
|
|
|
break;
|
|
|
|
|
}
|
2012-07-03 16:58:07 -05:00
|
|
|
else
|
|
|
|
|
// remove packet
|
|
|
|
|
RemoveAVPacket(packet);
|
2011-12-15 16:11:48 -06:00
|
|
|
}
|
2012-07-03 16:58:07 -05:00
|
|
|
else
|
|
|
|
|
// remove packet
|
|
|
|
|
RemoveAVPacket(packet);
|
2012-07-03 02:42:47 -05:00
|
|
|
|
|
|
|
|
// Increment counters
|
|
|
|
|
iterations++;
|
|
|
|
|
|
|
|
|
|
// Give up (if threshold exceeded)
|
|
|
|
|
if (iterations > threshold)
|
2012-07-08 23:26:44 -05:00
|
|
|
break;
|
2012-07-03 02:42:47 -05:00
|
|
|
}
|
2011-12-15 16:11:48 -06:00
|
|
|
|
2012-02-26 16:40:53 -06:00
|
|
|
// Double check that all counters have greater than zero (or give up)
|
|
|
|
|
if (second_second_counter == 0 || third_second_counter == 0 || forth_second_counter == 0 || fifth_second_counter == 0)
|
2012-02-26 16:45:50 -06:00
|
|
|
{
|
|
|
|
|
// Seek to frame 1
|
|
|
|
|
Seek(1);
|
|
|
|
|
|
2012-02-26 16:40:53 -06:00
|
|
|
// exit with no changes to FPS (not enough data to calculate)
|
|
|
|
|
return;
|
2012-02-26 16:45:50 -06:00
|
|
|
}
|
2012-02-26 16:40:53 -06:00
|
|
|
|
2011-12-17 16:16:59 -06:00
|
|
|
int sum_fps = second_second_counter + third_second_counter + forth_second_counter + fifth_second_counter;
|
|
|
|
|
int avg_fps = round(sum_fps / 4.0f);
|
2011-12-15 16:11:48 -06:00
|
|
|
|
|
|
|
|
// Sometimes the FPS is incorrectly detected by FFmpeg. If the 1st and 2nd seconds counters
|
|
|
|
|
// agree with each other, we are going to adjust the FPS of this reader instance. Otherwise, print
|
|
|
|
|
// a warning message.
|
|
|
|
|
|
|
|
|
|
// Get diff from actual frame rate
|
|
|
|
|
double fps = info.fps.ToDouble();
|
|
|
|
|
double diff = fps - double(avg_fps);
|
|
|
|
|
|
|
|
|
|
// Is difference bigger than 1 frame?
|
|
|
|
|
if (diff <= -1 || diff >= 1)
|
|
|
|
|
{
|
2011-12-17 16:16:59 -06:00
|
|
|
// Compare to half the frame rate (the most common type of issue)
|
|
|
|
|
double half_fps = Fraction(info.fps.num / 2, info.fps.den).ToDouble();
|
|
|
|
|
diff = half_fps - double(avg_fps);
|
|
|
|
|
|
|
|
|
|
// Is difference bigger than 1 frame?
|
|
|
|
|
if (diff <= -1 || diff >= 1)
|
|
|
|
|
{
|
|
|
|
|
// Update FPS for this reader instance
|
|
|
|
|
info.fps = Fraction(avg_fps, 1);
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
{
|
|
|
|
|
// Update FPS for this reader instance (to 1/2 the original framerate)
|
|
|
|
|
info.fps = Fraction(info.fps.num / 2, info.fps.den);
|
|
|
|
|
}
|
2011-12-15 16:11:48 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Seek to frame 1
|
|
|
|
|
Seek(1);
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-03 16:58:07 -05:00
|
|
|
// Remove AVFrame from cache (and deallocate it's memory)
|
2012-10-12 00:54:53 -05:00
|
|
|
void FFmpegReader::RemoveAVFrame(AVPicture* remove_frame)
|
2012-07-03 16:58:07 -05:00
|
|
|
{
|
|
|
|
|
// Remove pFrame (if exists)
|
|
|
|
|
if (frames.count(remove_frame))
|
|
|
|
|
{
|
|
|
|
|
// Free memory
|
2012-10-12 16:41:23 -05:00
|
|
|
avpicture_free(frames[remove_frame]);
|
2012-07-03 16:58:07 -05:00
|
|
|
|
|
|
|
|
// Remove from cache
|
|
|
|
|
frames.erase(remove_frame);
|
2014-09-15 00:24:46 -05:00
|
|
|
|
|
|
|
|
// Delete the object
|
|
|
|
|
delete remove_frame;
|
2012-07-03 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remove AVPacket from cache (and deallocate it's memory)
|
|
|
|
|
void FFmpegReader::RemoveAVPacket(AVPacket* remove_packet)
|
|
|
|
|
{
|
2012-07-04 03:07:26 -05:00
|
|
|
// Remove packet (if any)
|
2012-07-03 16:58:07 -05:00
|
|
|
if (packets.count(remove_packet))
|
|
|
|
|
{
|
2012-07-04 03:07:26 -05:00
|
|
|
// deallocate memory for packet
|
|
|
|
|
av_free_packet(remove_packet);
|
2012-07-03 16:58:07 -05:00
|
|
|
|
2014-09-15 00:24:46 -05:00
|
|
|
// Remove from cache
|
|
|
|
|
packets.erase(remove_packet);
|
|
|
|
|
|
|
|
|
|
// Delete the object
|
|
|
|
|
delete remove_packet;
|
2012-07-03 16:58:07 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-07-06 02:34:18 -05:00
|
|
|
/// Get the smallest video frame that is still being processed
|
|
|
|
|
int FFmpegReader::GetSmallestVideoFrame()
|
|
|
|
|
{
|
|
|
|
|
// Loop through frame numbers
|
|
|
|
|
map<int, int>::iterator itr;
|
2012-07-08 23:26:44 -05:00
|
|
|
int smallest_frame = -1;
|
2012-07-06 02:34:18 -05:00
|
|
|
for(itr = processing_video_frames.begin(); itr != processing_video_frames.end(); ++itr)
|
|
|
|
|
{
|
|
|
|
|
if (itr->first < smallest_frame || smallest_frame == -1)
|
|
|
|
|
smallest_frame = itr->first;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return frame number
|
|
|
|
|
return smallest_frame;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Get the smallest audio frame that is still being processed
|
|
|
|
|
int FFmpegReader::GetSmallestAudioFrame()
|
|
|
|
|
{
|
|
|
|
|
// Loop through frame numbers
|
|
|
|
|
map<int, int>::iterator itr;
|
2012-07-08 23:26:44 -05:00
|
|
|
int smallest_frame = -1;
|
2012-07-06 02:34:18 -05:00
|
|
|
for(itr = processing_audio_frames.begin(); itr != processing_audio_frames.end(); ++itr)
|
|
|
|
|
{
|
|
|
|
|
if (itr->first < smallest_frame || smallest_frame == -1)
|
|
|
|
|
smallest_frame = itr->first;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return frame number
|
|
|
|
|
return smallest_frame;
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
// Generate JSON string of this object
|
|
|
|
|
string FFmpegReader::Json() {
|
|
|
|
|
|
|
|
|
|
// Return formatted string
|
|
|
|
|
return JsonValue().toStyledString();
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 16:52:09 -06:00
|
|
|
// Generate Json::JsonValue for this object
|
|
|
|
|
Json::Value FFmpegReader::JsonValue() {
|
2012-07-03 16:58:07 -05:00
|
|
|
|
2013-12-07 16:52:09 -06:00
|
|
|
// Create root json object
|
|
|
|
|
Json::Value root = ReaderBase::JsonValue(); // get parent properties
|
2013-12-07 21:09:55 -06:00
|
|
|
root["type"] = "FFmpegReader";
|
2013-12-07 16:52:09 -06:00
|
|
|
root["path"] = path;
|
|
|
|
|
|
|
|
|
|
// return JsonValue
|
|
|
|
|
return root;
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
// Load JSON string into this object
|
|
|
|
|
void FFmpegReader::SetJson(string value) throw(InvalidJSON) {
|
|
|
|
|
|
|
|
|
|
// Parse JSON string into JSON objects
|
|
|
|
|
Json::Value root;
|
|
|
|
|
Json::Reader reader;
|
|
|
|
|
bool success = reader.parse( value, root );
|
|
|
|
|
if (!success)
|
|
|
|
|
// Raise exception
|
|
|
|
|
throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
|
|
|
|
|
|
|
|
|
|
try
|
|
|
|
|
{
|
|
|
|
|
// Set all values that match
|
|
|
|
|
SetJsonValue(root);
|
|
|
|
|
}
|
|
|
|
|
catch (exception e)
|
|
|
|
|
{
|
|
|
|
|
// Error parsing JSON (or missing keys)
|
|
|
|
|
throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-07 16:52:09 -06:00
|
|
|
// Load Json::JsonValue into this object
|
2013-12-07 21:09:55 -06:00
|
|
|
void FFmpegReader::SetJsonValue(Json::Value root) throw(InvalidFile) {
|
2013-12-07 16:52:09 -06:00
|
|
|
|
|
|
|
|
// Set parent data
|
2013-12-07 21:09:55 -06:00
|
|
|
ReaderBase::SetJsonValue(root);
|
2013-12-07 16:52:09 -06:00
|
|
|
|
|
|
|
|
// Set data from Json (if key is found)
|
2014-01-08 01:43:58 -06:00
|
|
|
if (!root["path"].isNull())
|
2013-12-07 16:52:09 -06:00
|
|
|
path = root["path"].asString();
|
|
|
|
|
|
2013-12-07 21:09:55 -06:00
|
|
|
// Re-Open path, and re-init everything (if needed)
|
|
|
|
|
if (is_open)
|
|
|
|
|
{
|
|
|
|
|
Close();
|
|
|
|
|
Open();
|
|
|
|
|
}
|
2013-12-07 16:52:09 -06:00
|
|
|
}
|