2013-09-09 23:32:16 -05:00
/**
* @ file
* @ brief Source file for FFmpegReader class
2013-09-12 17:52:10 -05:00
* @ author Jonathan Thomas < jonathan @ openshot . org > , Fabrice Bellard
2013-09-09 23:32:16 -05:00
*
* @ section LICENSE
*
2013-09-12 17:52:10 -05:00
* Copyright ( c ) 2008 - 2013 OpenShot Studios , LLC , Fabrice Bellard
2013-09-09 23:32:16 -05:00
* ( http : //www.openshotstudios.com). This file is part of
* OpenShot Library ( http : //www.openshot.org), an open-source project
* dedicated to delivering high quality video editing and animation solutions
* to the world .
*
2013-09-12 17:52:10 -05:00
* This file is originally based on the Libavformat API example , and then modified
* by the libopenshot project .
*
2014-03-29 18:49:22 -05:00
* OpenShot Library ( libopenshot ) is free software : you can redistribute it
2014-07-11 16:52:14 -05:00
* and / or modify it under the terms of the GNU Lesser General Public License
2014-03-29 18:49:22 -05:00
* as published by the Free Software Foundation , either version 3 of the
* License , or ( at your option ) any later version .
2013-09-09 23:32:16 -05:00
*
2014-03-29 18:49:22 -05:00
* OpenShot Library ( libopenshot ) is distributed in the hope that it will be
* useful , but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
2014-07-11 16:52:14 -05:00
* GNU Lesser General Public License for more details .
2013-09-09 23:32:16 -05:00
*
2014-07-11 16:52:14 -05:00
* You should have received a copy of the GNU Lesser General Public License
2014-03-29 18:49:22 -05:00
* along with OpenShot Library . If not , see < http : //www.gnu.org/licenses/>.
2013-09-09 23:32:16 -05:00
*/
2011-10-11 08:44:27 -05:00
# include "../include/FFmpegReader.h"
using namespace openshot ;
2017-10-26 18:44:35 -05:00
FFmpegReader : : FFmpegReader ( string path )
2012-10-10 17:27:46 -05:00
: last_frame ( 0 ) , is_seeking ( 0 ) , seeking_pts ( 0 ) , seeking_frame ( 0 ) , seek_count ( 0 ) ,
2012-10-26 00:27:44 -05:00
audio_pts_offset ( 99999 ) , video_pts_offset ( 99999 ) , path ( path ) , is_video_seek ( true ) , check_interlace ( false ) ,
2016-09-14 04:11:12 -05:00
check_fps ( false ) , enable_seek ( true ) , is_open ( false ) , seek_audio_frame_found ( 0 ) , seek_video_frame_found ( 0 ) ,
prev_samples ( 0 ) , prev_pts ( 0 ) , pts_total ( 0 ) , pts_counter ( 0 ) , is_duration_known ( false ) , largest_frame_processed ( 0 ) ,
2018-09-11 00:40:31 -05:00
current_video_frame ( 0 ) , has_missing_frames ( false ) , num_packets_since_video_frame ( 0 ) , num_checks_since_final ( 0 ) ,
packet ( NULL ) , use_omp_threads ( true ) {
2011-10-11 08:44:27 -05:00
2012-07-08 23:26:44 -05:00
// Initialize FFMpeg, and register all formats and codecs
2018-09-11 00:40:31 -05:00
AV_REGISTER_ALL
AVCODEC_REGISTER_ALL
2012-10-09 10:41:07 -05:00
2012-10-26 00:27:44 -05:00
// Init cache
2018-04-14 16:25:13 -05:00
working_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * info . fps . ToDouble ( ) * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2016-07-27 13:18:55 -05:00
missing_frames . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
final_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2012-10-26 00:27:44 -05:00
2012-10-09 10:41:07 -05:00
// Open and Close the reader, to populate it's attributes (such as height, width, etc...)
Open ( ) ;
Close ( ) ;
2011-10-11 08:44:27 -05:00
}
2017-10-26 18:44:35 -05:00
FFmpegReader : : FFmpegReader ( string path , bool inspect_reader )
2016-09-16 17:43:26 -05:00
: last_frame ( 0 ) , is_seeking ( 0 ) , seeking_pts ( 0 ) , seeking_frame ( 0 ) , seek_count ( 0 ) ,
audio_pts_offset ( 99999 ) , video_pts_offset ( 99999 ) , path ( path ) , is_video_seek ( true ) , check_interlace ( false ) ,
check_fps ( false ) , enable_seek ( true ) , is_open ( false ) , seek_audio_frame_found ( 0 ) , seek_video_frame_found ( 0 ) ,
prev_samples ( 0 ) , prev_pts ( 0 ) , pts_total ( 0 ) , pts_counter ( 0 ) , is_duration_known ( false ) , largest_frame_processed ( 0 ) ,
2018-09-11 00:40:31 -05:00
current_video_frame ( 0 ) , has_missing_frames ( false ) , num_packets_since_video_frame ( 0 ) , num_checks_since_final ( 0 ) ,
packet ( NULL ) , use_omp_threads ( true ) {
2016-09-16 17:43:26 -05:00
// Initialize FFMpeg, and register all formats and codecs
2018-09-11 00:40:31 -05:00
AV_REGISTER_ALL
AVCODEC_REGISTER_ALL
2016-09-16 17:43:26 -05:00
// Init cache
2018-04-14 16:25:13 -05:00
working_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * info . fps . ToDouble ( ) * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2016-09-16 17:43:26 -05:00
missing_frames . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
final_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
// Open and Close the reader, to populate it's attributes (such as height, width, etc...)
if ( inspect_reader ) {
Open ( ) ;
Close ( ) ;
}
}
2015-12-24 16:44:45 -06:00
FFmpegReader : : ~ FFmpegReader ( ) {
if ( is_open )
// Auto close reader if not already done
Close ( ) ;
}
2013-09-08 23:09:54 -05:00
// This struct holds the associated video frame and starting sample # for an audio packet.
2017-09-28 16:03:01 -05:00
bool AudioLocation : : is_near ( AudioLocation location , int samples_per_frame , int64_t amount )
2013-09-08 23:09:54 -05:00
{
// Is frame even close to this one?
if ( abs ( location . frame - frame ) > = 2 )
// This is too far away to be considered
return false ;
2017-01-07 17:34:11 -05:00
// Note that samples_per_frame can vary slightly frame to frame when the
// audio sampling rate is not an integer multiple of the video fps.
2017-09-28 16:03:01 -05:00
int64_t diff = samples_per_frame * ( location . frame - frame ) + location . sample_start - sample_start ;
2017-01-07 17:34:11 -05:00
if ( abs ( diff ) < = amount )
2013-09-08 23:09:54 -05:00
// close
return true ;
// not close
return false ;
}
2017-10-26 18:44:35 -05:00
void FFmpegReader : : Open ( )
2011-10-11 08:44:27 -05:00
{
2012-10-08 15:02:52 -05:00
// Open reader if not already open
if ( ! is_open )
2011-10-11 08:44:27 -05:00
{
2012-10-08 15:02:52 -05:00
// Initialize format context
pFormatCtx = NULL ;
// Open video file
if ( avformat_open_input ( & pFormatCtx , path . c_str ( ) , NULL , NULL ) ! = 0 )
throw InvalidFile ( " File could not be opened. " , path ) ;
// Retrieve stream information
if ( avformat_find_stream_info ( pFormatCtx , NULL ) < 0 )
throw NoStreamsFound ( " No streams found in file. " , path ) ;
videoStream = - 1 ;
audioStream = - 1 ;
// Loop through each stream, and identify the video and audio stream index
for ( unsigned int i = 0 ; i < pFormatCtx - > nb_streams ; i + + )
{
// Is this a video stream?
2018-03-21 02:10:46 -05:00
if ( AV_GET_CODEC_TYPE ( pFormatCtx - > streams [ i ] ) = = AVMEDIA_TYPE_VIDEO & & videoStream < 0 ) {
2012-10-08 15:02:52 -05:00
videoStream = i ;
}
// Is this an audio stream?
2018-03-21 02:10:46 -05:00
if ( AV_GET_CODEC_TYPE ( pFormatCtx - > streams [ i ] ) = = AVMEDIA_TYPE_AUDIO & & audioStream < 0 ) {
2012-10-08 15:02:52 -05:00
audioStream = i ;
}
2011-10-11 08:44:27 -05:00
}
2012-10-08 15:02:52 -05:00
if ( videoStream = = - 1 & & audioStream = = - 1 )
throw NoStreamsFound ( " No video or audio streams found in this file. " , path ) ;
// Is there a video stream?
if ( videoStream ! = - 1 )
{
// Set the stream index
info . video_stream_index = videoStream ;
// Set the codec and codec context pointers
pStream = pFormatCtx - > streams [ videoStream ] ;
2018-03-21 02:10:46 -05:00
// Find the codec ID from stream
AVCodecID codecId = AV_FIND_DECODER_CODEC_ID ( pStream ) ;
// Get codec and codec context from stream
AVCodec * pCodec = avcodec_find_decoder ( codecId ) ;
pCodecCtx = AV_GET_CODEC_CONTEXT ( pStream , pCodec ) ;
2012-10-08 15:02:52 -05:00
2017-10-01 17:54:21 -05:00
// Set number of threads equal to number of processors (not to exceed 16)
2018-09-11 10:48:30 -05:00
pCodecCtx - > thread_count = min ( FF_NUM_PROCESSORS , 16 ) ;
2012-10-08 15:02:52 -05:00
if ( pCodec = = NULL ) {
throw InvalidCodec ( " A valid video codec could not be found for this file. " , path ) ;
}
2018-06-29 15:06:34 -05:00
// Init options
AVDictionary * opts = NULL ;
av_dict_set ( & opts , " strict " , " experimental " , 0 ) ;
2012-10-08 15:02:52 -05:00
// Open video codec
2018-06-29 15:06:34 -05:00
if ( avcodec_open2 ( pCodecCtx , pCodec , & opts ) < 0 )
2012-10-08 15:02:52 -05:00
throw InvalidCodec ( " A video codec was found, but could not be opened. " , path ) ;
2018-06-29 15:06:34 -05:00
// Free options
av_dict_free ( & opts ) ;
2012-10-08 15:02:52 -05:00
// Update the File Info struct with video details (if a video stream is found)
UpdateVideoInfo ( ) ;
2011-10-11 08:44:27 -05:00
}
2012-10-08 15:02:52 -05:00
// Is there an audio stream?
if ( audioStream ! = - 1 )
{
// Set the stream index
info . audio_stream_index = audioStream ;
// Get a pointer to the codec context for the audio stream
aStream = pFormatCtx - > streams [ audioStream ] ;
2018-03-21 02:10:46 -05:00
// Find the codec ID from stream
AVCodecID codecId = AV_FIND_DECODER_CODEC_ID ( aStream ) ;
// Get codec and codec context from stream
AVCodec * aCodec = avcodec_find_decoder ( codecId ) ;
aCodecCtx = AV_GET_CODEC_CONTEXT ( aStream , aCodec ) ;
2012-10-08 15:02:52 -05:00
2017-10-01 17:54:21 -05:00
// Set number of threads equal to number of processors (not to exceed 16)
2018-09-11 10:48:30 -05:00
aCodecCtx - > thread_count = min ( FF_NUM_PROCESSORS , 16 ) ;
2012-10-08 15:02:52 -05:00
if ( aCodec = = NULL ) {
throw InvalidCodec ( " A valid audio codec could not be found for this file. " , path ) ;
}
2018-06-29 15:06:34 -05:00
// Init options
AVDictionary * opts = NULL ;
av_dict_set ( & opts , " strict " , " experimental " , 0 ) ;
2012-10-08 15:02:52 -05:00
// Open audio codec
2018-06-29 15:06:34 -05:00
if ( avcodec_open2 ( aCodecCtx , aCodec , & opts ) < 0 )
2012-10-08 15:02:52 -05:00
throw InvalidCodec ( " An audio codec was found, but could not be opened. " , path ) ;
2018-06-29 15:06:34 -05:00
// Free options
av_dict_free ( & opts ) ;
2012-10-08 15:02:52 -05:00
// Update the File Info struct with audio details (if an audio stream is found)
UpdateAudioInfo ( ) ;
}
2018-02-03 01:57:18 -06:00
// Add format metadata (if any)
AVDictionaryEntry * tag = NULL ;
while ( ( tag = av_dict_get ( pFormatCtx - > metadata , " " , tag , AV_DICT_IGNORE_SUFFIX ) ) ) {
QString str_key = tag - > key ;
QString str_value = tag - > value ;
info . metadata [ str_key . toStdString ( ) ] = str_value . trimmed ( ) . toStdString ( ) ;
}
2012-11-20 10:15:39 -06:00
// Init previous audio location to zero
previous_packet_location . frame = - 1 ;
previous_packet_location . sample_start = 0 ;
2015-06-01 00:20:14 -07:00
// Adjust cache size based on size of frame and audio
2018-04-14 16:25:13 -05:00
working_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * info . fps . ToDouble ( ) * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2016-07-27 13:18:55 -05:00
missing_frames . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
final_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2015-06-01 00:20:14 -07:00
2018-09-11 00:40:31 -05:00
// Initialize OMP threading support
use_omp_threads = openshot : : IsOMPEnabled ( ) ;
2012-10-08 15:02:52 -05:00
// Mark as "open"
is_open = true ;
2011-10-11 08:44:27 -05:00
}
}
void FFmpegReader : : Close ( )
{
2012-10-08 15:02:52 -05:00
// Close all objects, if reader is 'open'
if ( is_open )
2012-07-08 23:26:44 -05:00
{
2015-12-24 16:44:45 -06:00
// Mark as "closed"
is_open = false ;
2016-09-16 17:43:26 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::Close " , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2012-10-08 15:02:52 -05:00
// Close the codec
if ( info . has_video )
{
avcodec_flush_buffers ( pCodecCtx ) ;
2018-03-21 02:10:46 -05:00
AV_FREE_CONTEXT ( pCodecCtx ) ;
2012-10-08 15:02:52 -05:00
}
if ( info . has_audio )
{
avcodec_flush_buffers ( aCodecCtx ) ;
2018-03-21 02:10:46 -05:00
AV_FREE_CONTEXT ( aCodecCtx ) ;
2012-10-08 15:02:52 -05:00
}
2012-10-14 21:09:22 -05:00
// Clear final cache
final_cache . Clear ( ) ;
2012-10-08 15:02:52 -05:00
working_cache . Clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_frames . Clear ( ) ;
2012-10-08 15:02:52 -05:00
2014-08-27 09:44:27 -05:00
// Clear processed lists
2015-08-24 01:05:48 -05:00
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processed_video_frames . clear ( ) ;
processed_audio_frames . clear ( ) ;
processing_video_frames . clear ( ) ;
processing_audio_frames . clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_audio_frames . clear ( ) ;
2015-08-24 01:05:48 -05:00
missing_video_frames . clear ( ) ;
2016-12-07 01:06:16 -06:00
missing_audio_frames_source . clear ( ) ;
missing_video_frames_source . clear ( ) ;
2016-06-29 02:42:00 -05:00
checked_frames . clear ( ) ;
2015-08-24 01:05:48 -05:00
}
2014-08-27 09:44:27 -05:00
2012-10-08 15:02:52 -05:00
// Close the video file
avformat_close_input ( & pFormatCtx ) ;
av_freep ( & pFormatCtx ) ;
2015-02-19 01:03:22 -06:00
// Reset some variables
2012-10-10 14:49:33 -05:00
last_frame = 0 ;
2015-02-19 01:03:22 -06:00
largest_frame_processed = 0 ;
seek_audio_frame_found = 0 ;
seek_video_frame_found = 0 ;
2015-08-24 01:05:48 -05:00
current_video_frame = 0 ;
has_missing_frames = false ;
2012-07-08 23:26:44 -05:00
}
2011-10-11 08:44:27 -05:00
}
void FFmpegReader : : UpdateAudioInfo ( )
{
// Set values of FileInfo struct
info . has_audio = true ;
2012-06-16 02:12:48 -05:00
info . file_size = pFormatCtx - > pb ? avio_size ( pFormatCtx - > pb ) : - 1 ;
2011-10-11 08:44:27 -05:00
info . acodec = aCodecCtx - > codec - > name ;
2018-03-21 02:10:46 -05:00
info . channels = AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ;
if ( AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout = = 0 )
AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout = av_get_default_channel_layout ( AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ) ;
info . channel_layout = ( ChannelLayout ) AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout ;
info . sample_rate = AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > sample_rate ;
info . audio_bit_rate = AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > bit_rate ;
2011-10-11 08:44:27 -05:00
2011-12-11 20:42:50 -06:00
// Set audio timebase
2011-10-11 08:44:27 -05:00
info . audio_timebase . num = aStream - > time_base . num ;
info . audio_timebase . den = aStream - > time_base . den ;
2011-10-27 09:40:03 -05:00
2015-08-05 23:40:58 -05:00
// Get timebase of audio stream (if valid) and greater than the current duration
if ( aStream - > duration > 0.0f & & aStream - > duration > info . duration )
2012-08-12 02:14:15 -05:00
info . duration = aStream - > duration * info . audio_timebase . ToDouble ( ) ;
2011-12-11 20:42:50 -06:00
// Check for an invalid video length
2012-08-12 02:14:15 -05:00
if ( info . has_video & & info . video_length < = 0 )
2011-12-11 20:42:50 -06:00
{
// Calculate the video length from the audio duration
info . video_length = info . duration * info . fps . ToDouble ( ) ;
}
2011-10-27 09:40:03 -05:00
// Set video timebase (if no video stream was found)
if ( ! info . has_video )
{
// Set a few important default video settings (so audio can be divided into frames)
2012-11-12 01:25:35 -06:00
info . fps . num = 24 ;
2011-10-27 09:40:03 -05:00
info . fps . den = 1 ;
2011-12-11 20:42:50 -06:00
info . video_timebase . num = 1 ;
2012-11-12 01:25:35 -06:00
info . video_timebase . den = 24 ;
2011-12-11 20:42:50 -06:00
info . video_length = info . duration * info . fps . ToDouble ( ) ;
2016-01-09 15:50:53 -06:00
info . width = 720 ;
info . height = 480 ;
2011-10-27 09:40:03 -05:00
}
2011-12-11 20:42:50 -06:00
2018-09-17 00:27:30 -05:00
// Fix invalid video lengths for certain types of files (MP3 for example)
if ( info . has_video & & ( ( info . duration * info . fps . ToDouble ( ) ) - info . video_length > 60 ) ) {
info . video_length = info . duration * info . fps . ToDouble ( ) ;
}
2018-02-03 01:57:18 -06:00
// Add audio metadata (if any found)
AVDictionaryEntry * tag = NULL ;
while ( ( tag = av_dict_get ( aStream - > metadata , " " , tag , AV_DICT_IGNORE_SUFFIX ) ) ) {
QString str_key = tag - > key ;
QString str_value = tag - > value ;
info . metadata [ str_key . toStdString ( ) ] = str_value . trimmed ( ) . toStdString ( ) ;
}
2011-10-11 08:44:27 -05:00
}
void FFmpegReader : : UpdateVideoInfo ( )
{
2018-07-25 02:24:01 -05:00
if ( check_fps )
// Already initialized all the video metadata, no reason to do it again
return ;
2011-10-11 08:44:27 -05:00
// Set values of FileInfo struct
info . has_video = true ;
2012-06-16 02:12:48 -05:00
info . file_size = pFormatCtx - > pb ? avio_size ( pFormatCtx - > pb ) : - 1 ;
2018-03-21 02:10:46 -05:00
info . height = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > height ;
info . width = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > width ;
2011-10-11 08:44:27 -05:00
info . vcodec = pCodecCtx - > codec - > name ;
2018-07-25 02:24:01 -05:00
info . video_bit_rate = ( pFormatCtx - > bit_rate / 8 ) ;
// set frames per second (fps)
info . fps . num = pStream - > avg_frame_rate . num ;
info . fps . den = pStream - > avg_frame_rate . den ;
2015-06-01 02:05:17 -07:00
2011-10-11 08:44:27 -05:00
if ( pStream - > sample_aspect_ratio . num ! = 0 )
2011-12-11 20:42:50 -06:00
{
2011-10-11 08:44:27 -05:00
info . pixel_ratio . num = pStream - > sample_aspect_ratio . num ;
2011-12-11 20:42:50 -06:00
info . pixel_ratio . den = pStream - > sample_aspect_ratio . den ;
}
2018-03-21 02:10:46 -05:00
else if ( AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > sample_aspect_ratio . num ! = 0 )
2011-12-11 20:42:50 -06:00
{
2018-03-21 02:10:46 -05:00
info . pixel_ratio . num = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > sample_aspect_ratio . num ;
info . pixel_ratio . den = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > sample_aspect_ratio . den ;
2011-12-11 20:42:50 -06:00
}
2011-10-11 08:44:27 -05:00
else
2011-12-11 20:42:50 -06:00
{
2011-10-11 08:44:27 -05:00
info . pixel_ratio . num = 1 ;
2011-12-11 20:42:50 -06:00
info . pixel_ratio . den = 1 ;
}
2018-03-21 02:10:46 -05:00
info . pixel_format = AV_GET_CODEC_PIXEL_FORMAT ( pStream , pCodecCtx ) ;
2011-10-11 08:44:27 -05:00
// Calculate the DAR (display aspect ratio)
2011-12-11 20:42:50 -06:00
Fraction size ( info . width * info . pixel_ratio . num , info . height * info . pixel_ratio . den ) ;
2011-10-11 08:44:27 -05:00
// Reduce size fraction
size . Reduce ( ) ;
// Set the ratio based on the reduced fraction
info . display_ratio . num = size . num ;
info . display_ratio . den = size . den ;
2011-12-11 20:42:50 -06:00
// Set the video timebase
2011-10-11 08:44:27 -05:00
info . video_timebase . num = pStream - > time_base . num ;
info . video_timebase . den = pStream - > time_base . den ;
2011-12-11 20:42:50 -06:00
// Set the duration in seconds, and video length (# of frames)
info . duration = pStream - > duration * info . video_timebase . ToDouble ( ) ;
2012-08-12 02:14:15 -05:00
2013-09-08 16:08:56 -05:00
// Check for valid duration (if found)
2012-08-12 02:14:15 -05:00
if ( info . duration < = 0.0f & & pFormatCtx - > duration > = 0 )
// Use the format's duration
info . duration = pFormatCtx - > duration / AV_TIME_BASE ;
2013-09-08 16:08:56 -05:00
// Calculate duration from filesize and bitrate (if any)
if ( info . duration < = 0.0f & & info . video_bit_rate > 0 & & info . file_size > 0 )
// Estimate from bitrate, total bytes, and framerate
info . duration = ( info . file_size / info . video_bit_rate ) ;
// No duration found in stream of file
if ( info . duration < = 0.0f )
{
// No duration is found in the video stream
info . duration = - 1 ;
info . video_length = - 1 ;
is_duration_known = false ;
}
else
{
// Yes, a duration was found
is_duration_known = true ;
// Calculate number of frames
info . video_length = round ( info . duration * info . fps . ToDouble ( ) ) ;
}
2011-12-11 20:42:50 -06:00
2012-07-03 02:42:47 -05:00
// Override an invalid framerate
2018-09-17 00:27:30 -05:00
if ( info . fps . ToFloat ( ) > 240.0f | | ( info . fps . num < = 0 | | info . fps . den < = 0 ) | | info . video_length < = 0 ) {
2018-07-25 02:24:01 -05:00
// Calculate FPS, duration, video bit rate, and video length manually
// by scanning through all the video stream packets
CheckFPS ( ) ;
2012-07-08 23:26:44 -05:00
}
2012-07-03 02:42:47 -05:00
2018-02-03 01:57:18 -06:00
// Add video metadata (if any)
AVDictionaryEntry * tag = NULL ;
while ( ( tag = av_dict_get ( pStream - > metadata , " " , tag , AV_DICT_IGNORE_SUFFIX ) ) ) {
QString str_key = tag - > key ;
QString str_value = tag - > value ;
info . metadata [ str_key . toStdString ( ) ] = str_value . trimmed ( ) . toStdString ( ) ;
}
2011-10-11 08:44:27 -05:00
}
2012-10-31 01:17:12 -05:00
2017-10-26 18:44:35 -05:00
std : : shared_ptr < Frame > FFmpegReader : : GetFrame ( int64_t requested_frame )
2011-10-11 08:44:27 -05:00
{
2012-10-09 01:45:34 -05:00
// Check for open reader (or throw exception)
if ( ! is_open )
throw ReaderClosed ( " The FFmpegReader is closed. Call Open() before calling this method . " , path) ;
2014-03-21 01:25:17 -05:00
// Adjust for a requested frame that is too small or too large
if ( requested_frame < 1 )
requested_frame = 1 ;
if ( requested_frame > info . video_length & & is_duration_known )
requested_frame = info . video_length ;
if ( info . has_video & & info . video_length = = 0 )
// Invalid duration of video file
throw InvalidFile ( " Could not detect the duration of the video or audio stream. " , path ) ;
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetFrame " , " requested_frame " , requested_frame , " last_frame " , last_frame , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Check the cache for this frame
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > frame = final_cache . GetFrame ( requested_frame ) ;
2015-08-05 23:40:58 -05:00
if ( frame ) {
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetFrame " , " returned cached frame " , requested_frame , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Return the cached frame
2015-08-05 23:40:58 -05:00
return frame ;
2014-08-27 09:44:27 -05:00
}
2011-10-11 08:44:27 -05:00
else
{
2018-06-21 02:44:08 -05:00
# pragma omp critical (ReadStream)
{
// Check the cache a 2nd time (due to a potential previous lock)
if ( has_missing_frames )
CheckMissingFrame ( requested_frame ) ;
frame = final_cache . GetFrame ( requested_frame ) ;
if ( frame ) {
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetFrame " , " returned cached frame on 2nd look " , requested_frame , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2015-06-01 00:20:14 -07:00
2018-06-21 02:44:08 -05:00
// Return the cached frame
2012-10-10 17:27:46 -05:00
}
2018-06-21 02:44:08 -05:00
else {
// Frame is not in cache
// Reset seek count
seek_count = 0 ;
2012-10-10 17:27:46 -05:00
2018-06-21 02:44:08 -05:00
// Check for first frame (always need to get frame 1 before other frames, to correctly calculate offsets)
if ( last_frame = = 0 & & requested_frame ! = 1 )
// Get first frame
ReadStream ( 1 ) ;
2011-10-11 08:44:27 -05:00
2018-06-21 02:44:08 -05:00
// Are we within X frames of the requested frame?
int64_t diff = requested_frame - last_frame ;
if ( diff > = 1 & & diff < = 20 )
{
// Continue walking the stream
frame = ReadStream ( requested_frame ) ;
}
else
{
// Greater than 30 frames away, or backwards, we need to seek to the nearest key frame
if ( enable_seek )
// Only seek if enabled
Seek ( requested_frame ) ;
else if ( ! enable_seek & & diff < 0 )
{
// Start over, since we can't seek, and the requested frame is smaller than our position
Close ( ) ;
Open ( ) ;
}
// Then continue walking the stream
frame = ReadStream ( requested_frame ) ;
}
}
} //omp critical
return frame ;
2011-10-11 08:44:27 -05:00
}
}
// Read the stream until we find the requested Frame
2017-09-28 16:03:01 -05:00
std : : shared_ptr < Frame > FFmpegReader : : ReadStream ( int64_t requested_frame )
2011-10-11 08:44:27 -05:00
{
// Allocate video frame
2011-10-24 08:22:21 -05:00
bool end_of_stream = false ;
2012-07-03 02:59:38 -05:00
bool check_seek = false ;
2012-07-03 16:58:07 -05:00
bool frame_finished = false ;
int packet_error = - 1 ;
2011-10-11 08:44:27 -05:00
2012-08-24 17:03:23 -05:00
// Minimum number of packets to process (for performance reasons)
2013-01-25 02:24:18 -06:00
int packets_processed = 0 ;
2014-04-02 16:48:27 -05:00
int minimum_packets = OPEN_MP_NUM_PROCESSORS ;
2017-05-18 17:04:34 -05:00
int max_packets = 4096 ;
2014-04-02 16:48:27 -05:00
// Set the number of threads in OpenMP
omp_set_num_threads ( OPEN_MP_NUM_PROCESSORS ) ;
// Allow nested OpenMP sections
2012-11-12 17:21:21 -06:00
omp_set_nested ( true ) ;
2013-09-08 16:08:56 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ReadStream " , " requested_frame " , requested_frame , " OPEN_MP_NUM_PROCESSORS " , OPEN_MP_NUM_PROCESSORS , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2012-06-18 09:26:14 -05:00
# pragma omp parallel
2011-10-11 08:44:27 -05:00
{
2013-02-13 02:46:55 -06:00
# pragma omp single
2011-10-11 08:44:27 -05:00
{
// Loop through the stream until the correct frame is found
while ( true )
{
2015-06-01 00:20:14 -07:00
// Get the next packet into a local variable called packet
2012-07-03 16:58:07 -05:00
packet_error = GetNextPacket ( ) ;
2017-08-20 17:37:39 -05:00
int processing_video_frames_size = 0 ;
int processing_audio_frames_size = 0 ;
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2013-02-15 00:23:55 -06:00
// Wait if too many frames are being processed
2017-08-20 17:37:39 -05:00
while ( processing_video_frames_size + processing_audio_frames_size > = minimum_packets ) {
2016-04-04 23:09:18 -05:00
usleep ( 2500 ) ;
2017-08-20 17:37:39 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2013-02-15 00:23:55 -06:00
2011-10-24 08:22:21 -05:00
// Get the next packet (if any)
2012-07-03 16:58:07 -05:00
if ( packet_error < 0 )
2011-10-24 08:22:21 -05:00
{
// Break loop when no more packets found
end_of_stream = true ;
break ;
}
2011-10-11 08:44:27 -05:00
2015-02-05 00:00:52 -06:00
// Debug output
2017-08-20 17:37:39 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ReadStream (GetNextPacket) " , " requested_frame " , requested_frame , " processing_video_frames_size " , processing_video_frames_size , " processing_audio_frames_size " , processing_audio_frames_size , " minimum_packets " , minimum_packets , " packets_processed " , packets_processed , " is_seeking " , is_seeking ) ;
2015-02-05 00:00:52 -06:00
2011-10-11 08:44:27 -05:00
// Video packet
2016-04-22 02:43:06 -05:00
if ( info . has_video & & packet - > stream_index = = videoStream )
2011-10-11 08:44:27 -05:00
{
2016-01-01 01:39:56 -06:00
// Reset this counter, since we have a video packet
num_packets_since_video_frame = 0 ;
2011-10-24 08:22:21 -05:00
// Check the status of a seek (if any)
2018-04-14 16:25:13 -05:00
if ( is_seeking )
2015-06-01 00:20:14 -07:00
# pragma omp critical (openshot_seek)
2018-04-14 16:25:13 -05:00
check_seek = CheckSeek ( true ) ;
else
check_seek = false ;
2012-07-03 02:59:38 -05:00
2018-04-14 16:25:13 -05:00
if ( check_seek ) {
// Jump to the next iteration of this loop
continue ;
}
2011-10-11 08:44:27 -05:00
2015-06-01 00:20:14 -07:00
// Get the AVFrame from the current packet
2012-07-03 16:58:07 -05:00
frame_finished = GetAVFrame ( ) ;
2011-10-11 08:44:27 -05:00
// Check if the AVFrame is finished and set it
2012-07-03 16:58:07 -05:00
if ( frame_finished )
2011-10-11 08:44:27 -05:00
{
2011-10-24 08:22:21 -05:00
// Update PTS / Frame Offset (if any)
UpdatePTSOffset ( true ) ;
2011-10-11 08:44:27 -05:00
// Process Video Packet
ProcessVideoPacket ( requested_frame ) ;
2018-09-11 00:40:31 -05:00
if ( ! use_omp_threads ) {
// Wait on each OMP task to complete before moving on to the next one. This slows
// down processing considerably, but might be more stable on some systems.
# pragma omp taskwait
}
2011-10-11 08:44:27 -05:00
}
}
// Audio packet
2016-04-22 02:43:06 -05:00
else if ( info . has_audio & & packet - > stream_index = = audioStream )
2011-10-11 08:44:27 -05:00
{
2016-01-01 01:39:56 -06:00
// Increment this (to track # of packets since the last video packet)
num_packets_since_video_frame + + ;
2011-10-24 08:22:21 -05:00
// Check the status of a seek (if any)
2018-04-14 16:25:13 -05:00
if ( is_seeking )
2015-06-01 00:20:14 -07:00
# pragma omp critical (openshot_seek)
2018-04-14 16:25:13 -05:00
check_seek = CheckSeek ( false ) ;
else
check_seek = false ;
2012-07-03 02:59:38 -05:00
2018-04-14 16:25:13 -05:00
if ( check_seek ) {
// Jump to the next iteration of this loop
continue ;
}
2011-10-11 08:44:27 -05:00
2011-10-24 08:22:21 -05:00
// Update PTS / Frame Offset (if any)
UpdatePTSOffset ( false ) ;
// Determine related video frame and starting sample # from audio PTS
2013-09-10 12:59:06 -05:00
AudioLocation location = GetAudioPTSLocation ( packet - > pts ) ;
2012-07-02 00:51:10 -05:00
2011-10-24 08:22:21 -05:00
// Process Audio Packet
ProcessAudioPacket ( requested_frame , location . frame , location . sample_start ) ;
2011-10-11 08:44:27 -05:00
}
2011-10-24 08:22:21 -05:00
// Check if working frames are 'finished'
2016-01-05 01:59:50 -06:00
if ( ! is_seeking ) {
// Check for any missing frames
CheckMissingFrame ( requested_frame ) ;
// Check for final frames
2015-08-24 01:05:48 -05:00
CheckWorkingFrames ( false , requested_frame ) ;
2016-01-05 01:59:50 -06:00
}
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Check if requested 'final' frame is available
2018-09-11 00:40:31 -05:00
bool is_cache_found = ( final_cache . GetFrame ( requested_frame ) ! = NULL ) ;
2012-08-26 02:44:05 -05:00
2015-06-01 00:20:14 -07:00
// Increment frames processed
packets_processed + + ;
2012-07-03 02:42:47 -05:00
2016-01-05 01:59:50 -06:00
// Break once the frame is found
2017-05-17 01:17:42 -05:00
if ( ( is_cache_found & & packets_processed > = minimum_packets ) | | packets_processed > max_packets )
2011-10-24 08:22:21 -05:00
break ;
2011-10-11 08:44:27 -05:00
} // end while
2012-08-28 15:53:18 -05:00
} // end omp single
2018-09-11 00:40:31 -05:00
2011-10-11 08:44:27 -05:00
} // end omp parallel
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ReadStream (Completed) " , " packets_processed " , packets_processed , " end_of_stream " , end_of_stream , " largest_frame_processed " , largest_frame_processed , " Working Cache Count " , working_cache . Count ( ) , " " , - 1 , " " , - 1 ) ;
2011-10-11 08:44:27 -05:00
2014-01-28 02:41:15 -06:00
// End of stream?
2015-08-05 23:40:58 -05:00
if ( end_of_stream )
2014-01-28 02:41:15 -06:00
// Mark the any other working frames as 'finished'
2015-08-24 01:05:48 -05:00
CheckWorkingFrames ( end_of_stream , requested_frame ) ;
2011-10-24 08:22:21 -05:00
// Return requested frame (if found)
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > frame = final_cache . GetFrame ( requested_frame ) ;
2015-08-05 23:40:58 -05:00
if ( frame )
2011-10-24 08:22:21 -05:00
// Return prepared frame
2015-08-05 23:40:58 -05:00
return frame ;
2015-02-05 00:00:52 -06:00
else {
// Check if largest frame is still cached
2015-08-05 23:40:58 -05:00
frame = final_cache . GetFrame ( largest_frame_processed ) ;
if ( frame ) {
2015-02-05 00:00:52 -06:00
// return the largest processed frame (assuming it was the last in the video file)
2015-08-05 23:40:58 -05:00
return frame ;
2015-02-05 00:00:52 -06:00
}
else {
// The largest processed frame is no longer in cache, return a blank frame
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > f = CreateFrame ( largest_frame_processed ) ;
2015-02-05 00:00:52 -06:00
f - > AddColor ( info . width , info . height , " #000 " ) ;
return f ;
}
}
2014-01-28 02:41:15 -06:00
2011-10-11 08:44:27 -05:00
}
// Get the next packet (if any)
int FFmpegReader : : GetNextPacket ( )
{
2015-06-01 00:20:14 -07:00
int found_packet = 0 ;
2012-07-02 00:51:10 -05:00
AVPacket * next_packet = new AVPacket ( ) ;
2015-06-01 00:20:14 -07:00
found_packet = av_read_frame ( pFormatCtx , next_packet ) ;
2012-07-02 00:51:10 -05:00
2017-08-20 17:37:39 -05:00
if ( packet ) {
// Remove previous packet before getting next one
RemoveAVPacket ( packet ) ;
packet = NULL ;
}
2012-07-02 00:51:10 -05:00
if ( found_packet > = 0 )
{
2016-11-14 22:37:44 -06:00
// Update current packet pointer
packet = next_packet ;
2015-06-01 00:20:14 -07:00
}
2012-07-02 00:51:10 -05:00
// Return if packet was found (or error number)
return found_packet ;
2011-10-11 08:44:27 -05:00
}
// Get an AVFrame (if any)
bool FFmpegReader : : GetAVFrame ( )
{
2015-06-01 00:20:14 -07:00
int frameFinished = - 1 ;
2018-03-21 02:10:46 -05:00
int ret = 0 ;
2011-10-11 08:44:27 -05:00
2015-06-01 00:20:14 -07:00
// Decode video frame
2015-09-23 00:27:28 -05:00
AVFrame * next_frame = AV_ALLOCATE_FRAME ( ) ;
2015-06-01 00:20:14 -07:00
# pragma omp critical (packet_cache)
2011-12-11 20:42:50 -06:00
{
2018-03-21 02:10:46 -05:00
# if IS_FFMPEG_3_2
frameFinished = 0 ;
ret = avcodec_send_packet ( pCodecCtx , packet ) ;
if ( ret < 0 | | ret = = AVERROR ( EAGAIN ) | | ret = = AVERROR_EOF ) {
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAVFrame (Packet not sent) " , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2012-07-02 00:51:10 -05:00
}
2018-03-21 02:10:46 -05:00
else {
pFrame = new AVFrame ( ) ;
while ( ret > = 0 ) {
ret = avcodec_receive_frame ( pCodecCtx , next_frame ) ;
if ( ret = = AVERROR ( EAGAIN ) | | ret = = AVERROR_EOF ) {
break ;
}
// TODO also handle possible further frames
// Use only the first frame like avcodec_decode_video2
if ( frameFinished = = 0 ) {
frameFinished = 1 ;
av_image_alloc ( pFrame - > data , pFrame - > linesize , info . width , info . height , ( AVPixelFormat ) ( pStream - > codecpar - > format ) , 1 ) ;
av_image_copy ( pFrame - > data , pFrame - > linesize , ( const uint8_t * * ) next_frame - > data , next_frame - > linesize ,
( AVPixelFormat ) ( pStream - > codecpar - > format ) , info . width , info . height ) ;
if ( ! check_interlace ) {
check_interlace = true ;
info . interlaced_frame = next_frame - > interlaced_frame ;
info . top_field_first = next_frame - > top_field_first ;
}
}
}
}
# else
avcodec_decode_video2 ( pCodecCtx , next_frame , & frameFinished , packet ) ;
// is frame finished
if ( frameFinished ) {
// AVFrames are clobbered on the each call to avcodec_decode_video, so we
// must make a copy of the image data before this method is called again.
pFrame = AV_ALLOCATE_FRAME ( ) ;
avpicture_alloc ( ( AVPicture * ) pFrame , pCodecCtx - > pix_fmt , info . width , info . height ) ;
av_picture_copy ( ( AVPicture * ) pFrame , ( AVPicture * ) next_frame , pCodecCtx - > pix_fmt , info . width ,
info . height ) ;
// Detect interlaced frame (only once)
if ( ! check_interlace ) {
check_interlace = true ;
info . interlaced_frame = next_frame - > interlaced_frame ;
info . top_field_first = next_frame - > top_field_first ;
}
}
# endif
2012-07-03 16:58:07 -05:00
}
2011-12-11 20:42:50 -06:00
2012-10-12 16:41:23 -05:00
// deallocate the frame
2015-09-23 00:27:28 -05:00
AV_FREE_FRAME ( & next_frame ) ;
2012-10-12 16:41:23 -05:00
2011-10-11 08:44:27 -05:00
// Did we get a video frame?
return frameFinished ;
}
// Check the current seek position and determine if we need to seek again
2011-10-24 08:22:21 -05:00
bool FFmpegReader : : CheckSeek ( bool is_video )
2011-10-11 08:44:27 -05:00
{
// Are we seeking for a specific frame?
if ( is_seeking )
{
2014-08-27 09:44:27 -05:00
// Determine if both an audio and video packet have been decoded since the seek happened.
// If not, allow the ReadStream method to keep looping
2014-09-13 16:35:11 -05:00
if ( ( is_video_seek & & ! seek_video_frame_found ) | | ( ! is_video_seek & & ! seek_audio_frame_found ) )
2014-08-27 09:44:27 -05:00
return false ;
2016-01-05 01:59:50 -06:00
// Check for both streams
if ( ( info . has_video & & ! seek_video_frame_found ) | | ( info . has_audio & & ! seek_audio_frame_found ) )
return false ;
2014-09-26 09:35:38 -05:00
// Determine max seeked frame
2017-09-28 16:03:01 -05:00
int64_t max_seeked_frame = seek_audio_frame_found ; // determine max seeked frame
2014-09-26 09:35:38 -05:00
if ( seek_video_frame_found > max_seeked_frame )
max_seeked_frame = seek_video_frame_found ;
2011-10-11 08:44:27 -05:00
// determine if we are "before" the requested frame
2014-09-26 09:35:38 -05:00
if ( max_seeked_frame > = seeking_frame )
2011-10-11 08:44:27 -05:00
{
2012-10-12 16:41:23 -05:00
// SEEKED TOO FAR
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckSeek (Too far, seek again) " , " is_video_seek " , is_video_seek , " max_seeked_frame " , max_seeked_frame , " seeking_frame " , seeking_frame , " seeking_pts " , seeking_pts , " seek_video_frame_found " , seek_video_frame_found , " seek_audio_frame_found " , seek_audio_frame_found ) ;
2011-10-11 08:44:27 -05:00
2012-10-12 16:41:23 -05:00
// Seek again... to the nearest Keyframe
2018-04-14 16:25:13 -05:00
Seek ( seeking_frame - ( 10 * seek_count * seek_count ) ) ;
2012-10-12 16:41:23 -05:00
}
else
{
2014-09-13 16:35:11 -05:00
// SEEK WORKED
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckSeek (Successful) " , " is_video_seek " , is_video_seek , " current_pts " , packet - > pts , " seeking_pts " , seeking_pts , " seeking_frame " , seeking_frame , " seek_video_frame_found " , seek_video_frame_found , " seek_audio_frame_found " , seek_audio_frame_found ) ;
2014-08-27 09:44:27 -05:00
2012-10-12 16:41:23 -05:00
// Seek worked, and we are "before" the requested frame
is_seeking = false ;
seeking_frame = 0 ;
2014-04-05 10:19:20 -05:00
seeking_pts = - 1 ;
2011-10-11 08:44:27 -05:00
}
}
// return the pts to seek to (if any)
return is_seeking ;
}
// Process a video packet
2017-09-28 16:03:01 -05:00
void FFmpegReader : : ProcessVideoPacket ( int64_t requested_frame )
2011-10-11 08:44:27 -05:00
{
2011-10-24 08:22:21 -05:00
// Calculate current frame #
2017-09-28 16:03:01 -05:00
int64_t current_frame = ConvertVideoPTStoFrame ( GetVideoPTS ( ) ) ;
2011-10-11 08:44:27 -05:00
2016-01-05 01:59:50 -06:00
// Track 1st video packet after a successful seek
if ( ! seek_video_frame_found & & is_seeking )
seek_video_frame_found = current_frame ;
2015-08-24 01:05:48 -05:00
// Are we close enough to decode the frame? and is this frame # valid?
2016-12-07 01:06:16 -06:00
if ( ( current_frame < ( requested_frame - 20 ) ) or ( current_frame = = - 1 ) )
2012-07-01 01:43:06 -05:00
{
2015-06-01 00:20:14 -07:00
// Remove frame and packet
RemoveAVFrame ( pFrame ) ;
2012-07-03 16:58:07 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessVideoPacket (Skipped) " , " requested_frame " , requested_frame , " current_frame " , current_frame , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Skip to next frame without decoding or caching
return ;
2012-07-01 01:43:06 -05:00
}
2011-10-11 08:44:27 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessVideoPacket (Before) " , " requested_frame " , requested_frame , " current_frame " , current_frame , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2012-07-01 01:43:06 -05:00
// Init some things local (for OpenMP)
2018-03-21 02:10:46 -05:00
PixelFormat pix_fmt = AV_GET_CODEC_PIXEL_FORMAT ( pStream , pCodecCtx ) ;
2012-06-18 09:26:14 -05:00
int height = info . height ;
int width = info . width ;
2017-09-28 16:03:01 -05:00
int64_t video_length = info . video_length ;
2018-03-21 02:10:46 -05:00
AVFrame * my_frame = pFrame ;
2012-06-18 09:26:14 -05:00
2012-07-06 02:34:18 -05:00
// Add video frame to list of processing video frames
2015-08-24 01:05:48 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2012-07-06 02:34:18 -05:00
processing_video_frames [ current_frame ] = current_frame ;
2017-08-20 17:37:39 -05:00
# pragma omp task firstprivate(current_frame, my_frame, height, width, video_length, pix_fmt)
2011-10-11 08:44:27 -05:00
{
2012-06-29 02:02:12 -05:00
// Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
2012-06-18 09:26:14 -05:00
AVFrame * pFrameRGB = NULL ;
int numBytes ;
uint8_t * buffer = NULL ;
// Allocate an AVFrame structure
2015-09-23 00:27:28 -05:00
pFrameRGB = AV_ALLOCATE_FRAME ( ) ;
2012-06-18 09:26:14 -05:00
if ( pFrameRGB = = NULL )
throw OutOfBoundsFrame ( " Convert Image Broke! " , current_frame , video_length ) ;
2016-09-14 04:11:12 -05:00
// Determine if video needs to be scaled down (for performance reasons)
// Timelines pass their size to the clips, which pass their size to the readers (as max size)
// If a clip is being scaled larger, it will set max_width and max_height = 0 (which means don't down scale)
int original_height = height ;
if ( max_width ! = 0 & & max_height ! = 0 & & max_width < width & & max_height < height ) {
// Override width and height (but maintain aspect ratio)
float ratio = float ( width ) / float ( height ) ;
int possible_width = round ( max_height * ratio ) ;
int possible_height = round ( max_width / ratio ) ;
if ( possible_width < = max_width ) {
// use calculated width, and max_height
width = possible_width ;
height = max_height ;
} else {
// use max_width, and calculated height
width = max_width ;
height = possible_height ;
}
}
2012-06-18 09:26:14 -05:00
// Determine required buffer size and allocate buffer
2018-03-21 02:10:46 -05:00
numBytes = AV_GET_IMAGE_SIZE ( PIX_FMT_RGBA , width , height ) ;
2017-08-20 17:37:39 -05:00
# pragma omp critical (video_buffer)
2016-07-30 16:57:48 -05:00
buffer = ( uint8_t * ) av_malloc ( numBytes * sizeof ( uint8_t ) ) ;
2012-06-18 09:26:14 -05:00
2018-03-21 02:10:46 -05:00
// Copy picture data from one AVFrame (or AVPicture) to another one.
AV_COPY_PICTURE_DATA ( pFrameRGB , buffer , PIX_FMT_RGBA , width , height ) ;
2012-06-18 09:26:14 -05:00
2018-03-21 02:10:46 -05:00
SwsContext * img_convert_ctx = sws_getContext ( info . width , info . height , AV_GET_CODEC_PIXEL_FORMAT ( pStream , pCodecCtx ) , width ,
2018-09-11 00:40:31 -05:00
height , PIX_FMT_RGBA , SWS_LANCZOS , NULL , NULL , NULL ) ;
2016-09-14 04:11:12 -05:00
2012-06-18 09:26:14 -05:00
// Resize / Convert to RGB
2012-07-04 03:07:26 -05:00
sws_scale ( img_convert_ctx , my_frame - > data , my_frame - > linesize , 0 ,
2016-09-14 04:11:12 -05:00
original_height , pFrameRGB - > data , pFrameRGB - > linesize ) ;
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Create or get the existing frame object
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > f = CreateFrame ( current_frame ) ;
2012-07-04 03:07:26 -05:00
2012-08-15 17:27:14 -05:00
// Add Image data to frame
2015-06-01 00:20:14 -07:00
f - > AddImage ( width , height , 4 , QImage : : Format_RGBA8888 , buffer ) ;
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Update working cache
2016-08-31 02:02:54 -05:00
working_cache . Add ( f ) ;
2012-06-18 09:26:14 -05:00
2016-01-05 01:59:50 -06:00
// Keep track of last last_video_frame
2017-08-20 17:37:39 -05:00
# pragma omp critical (video_buffer)
2016-01-05 01:59:50 -06:00
last_video_frame = f ;
2012-06-18 09:26:14 -05:00
// Free the RGB image
av_free ( buffer ) ;
2015-09-23 00:27:28 -05:00
AV_FREE_FRAME ( & pFrameRGB ) ;
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Remove frame and packet
RemoveAVFrame ( my_frame ) ;
2016-09-14 04:11:12 -05:00
sws_freeContext ( img_convert_ctx ) ;
2012-07-03 16:58:07 -05:00
2012-07-06 02:34:18 -05:00
// Remove video frame from list of processing video frames
2014-08-27 09:44:27 -05:00
{
2015-06-01 00:20:14 -07:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_video_frames . erase ( current_frame ) ;
processed_video_frames [ current_frame ] = current_frame ;
2014-08-27 09:44:27 -05:00
}
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessVideoPacket (After) " , " requested_frame " , requested_frame , " current_frame " , current_frame , " f->number " , f - > number , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2012-07-06 02:34:18 -05:00
2011-10-11 08:44:27 -05:00
} // end omp task
2012-06-18 09:26:14 -05:00
2011-10-11 08:44:27 -05:00
}
// Process an audio packet
2017-09-28 16:03:01 -05:00
void FFmpegReader : : ProcessAudioPacket ( int64_t requested_frame , int64_t target_frame , int starting_sample )
2011-10-11 08:44:27 -05:00
{
2016-01-05 01:59:50 -06:00
// Track 1st audio packet after a successful seek
if ( ! seek_audio_frame_found & & is_seeking )
seek_audio_frame_found = target_frame ;
2011-10-11 08:44:27 -05:00
// Are we close enough to decode the frame's audio?
2016-12-07 01:06:16 -06:00
if ( target_frame < ( requested_frame - 20 ) )
2012-07-03 16:58:07 -05:00
{
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Skipped) " , " requested_frame " , requested_frame , " target_frame " , target_frame , " starting_sample " , starting_sample , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Skip to next frame without decoding or caching
return ;
2012-07-03 16:58:07 -05:00
}
2011-10-11 08:44:27 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Before) " , " requested_frame " , requested_frame , " target_frame " , target_frame , " starting_sample " , starting_sample , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2015-02-05 00:00:52 -06:00
// Init an AVFrame to hold the decoded audio samples
int frame_finished = 0 ;
2015-09-23 00:27:28 -05:00
AVFrame * audio_frame = AV_ALLOCATE_FRAME ( ) ;
AV_RESET_FRAME ( audio_frame ) ;
2015-02-05 00:00:52 -06:00
2012-08-21 15:31:52 -05:00
int packet_samples = 0 ;
2015-02-05 00:00:52 -06:00
int data_size = 0 ;
2014-09-22 00:40:21 -05:00
2015-02-05 00:00:52 -06:00
// re-initialize buffer size (it gets changed in the avcodec_decode_audio2 method call)
2018-09-11 00:40:31 -05:00
int buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE ;
2018-03-21 02:10:46 -05:00
# pragma omp critical (ProcessAudioPacket)
{
# if IS_FFMPEG_3_2
int ret = 0 ;
frame_finished = 1 ;
while ( ( packet - > size > 0 | | ( ! packet - > data & & frame_finished ) ) & & ret > = 0 ) {
frame_finished = 0 ;
ret = avcodec_send_packet ( aCodecCtx , packet ) ;
if ( ret < 0 & & ret ! = AVERROR ( EINVAL ) & & ret ! = AVERROR_EOF ) {
avcodec_send_packet ( aCodecCtx , NULL ) ;
break ;
}
if ( ret > = 0 )
packet - > size = 0 ;
ret = avcodec_receive_frame ( aCodecCtx , audio_frame ) ;
if ( ret > = 0 )
frame_finished = 1 ;
if ( ret = = AVERROR ( EINVAL ) | | ret = = AVERROR_EOF ) {
avcodec_flush_buffers ( aCodecCtx ) ;
ret = 0 ;
}
if ( ret > = 0 ) {
ret = frame_finished ;
}
}
if ( ! packet - > data & & ! frame_finished )
{
ret = - 1 ;
}
# else
int used = avcodec_decode_audio4 ( aCodecCtx , audio_frame , & frame_finished , packet ) ;
# endif
}
2012-08-21 15:31:52 -05:00
2015-07-05 22:57:46 -07:00
if ( frame_finished ) {
2015-02-05 00:00:52 -06:00
// determine how many samples were decoded
2018-03-21 02:10:46 -05:00
int planar = av_sample_fmt_is_planar ( ( AVSampleFormat ) AV_GET_CODEC_PIXEL_FORMAT ( aStream , aCodecCtx ) ) ;
2015-02-05 00:00:52 -06:00
int plane_size = - 1 ;
data_size = av_samples_get_buffer_size ( & plane_size ,
2018-03-21 02:10:46 -05:00
AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ,
2015-02-05 00:00:52 -06:00
audio_frame - > nb_samples ,
2018-03-21 02:10:46 -05:00
( AVSampleFormat ) ( AV_GET_SAMPLE_FORMAT ( aStream , aCodecCtx ) ) , 1 ) ;
2012-08-21 15:31:52 -05:00
// Calculate total number of samples
2018-03-21 02:10:46 -05:00
packet_samples = audio_frame - > nb_samples * AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ;
2012-08-21 15:31:52 -05:00
}
2012-11-20 10:15:39 -06:00
// Estimate the # of samples and the end of this packet's location (to prevent GAPS for the next timestamp)
2012-12-03 04:51:17 -06:00
int pts_remaining_samples = packet_samples / info . channels ; // Adjust for zero based array
// DEBUG (FOR AUDIO ISSUES) - Get the audio packet start time (in seconds)
2017-09-28 16:03:01 -05:00
int64_t adjusted_pts = packet - > pts + audio_pts_offset ;
2012-12-03 04:51:17 -06:00
double audio_seconds = double ( adjusted_pts ) * info . audio_timebase . ToDouble ( ) ;
2017-01-24 18:39:17 -06:00
double sample_seconds = double ( pts_total ) / info . sample_rate ;
2012-12-03 04:51:17 -06:00
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Decode Info A) " , " pts_counter " , pts_counter , " PTS " , adjusted_pts , " Offset " , audio_pts_offset , " PTS Diff " , adjusted_pts - prev_pts , " Samples " , pts_remaining_samples , " Sample PTS ratio " , float ( adjusted_pts - prev_pts ) / pts_remaining_samples ) ;
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Decode Info B) " , " Sample Diff " , pts_remaining_samples - prev_samples - prev_pts , " Total " , pts_total , " PTS Seconds " , audio_seconds , " Sample Seconds " , sample_seconds , " Seconds Diff " , audio_seconds - sample_seconds , " raw samples " , packet_samples ) ;
2012-12-03 04:51:17 -06:00
// DEBUG (FOR AUDIO ISSUES)
prev_pts = adjusted_pts ;
pts_total + = pts_remaining_samples ;
pts_counter + + ;
prev_samples = pts_remaining_samples ;
2015-03-07 17:07:37 -06:00
// Add audio frame to list of processing audio frames
2015-08-24 01:05:48 -05:00
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_audio_frames . insert ( pair < int , int > ( previous_packet_location . frame , previous_packet_location . frame ) ) ;
}
2012-12-03 04:51:17 -06:00
2012-11-20 10:15:39 -06:00
while ( pts_remaining_samples )
{
// Get Samples per frame (for this frame number)
2015-03-08 21:42:53 -05:00
int samples_per_frame = Frame : : GetSamplesPerFrame ( previous_packet_location . frame , info . fps , info . sample_rate , info . channels ) ;
2012-11-20 10:15:39 -06:00
// Calculate # of samples to add to this frame
int samples = samples_per_frame - previous_packet_location . sample_start ;
if ( samples > pts_remaining_samples )
samples = pts_remaining_samples ;
// Decrement remaining samples
pts_remaining_samples - = samples ;
if ( pts_remaining_samples > 0 ) {
// next frame
previous_packet_location . frame + + ;
previous_packet_location . sample_start = 0 ;
2015-03-07 17:07:37 -06:00
// Add audio frame to list of processing audio frames
2015-08-24 01:05:48 -05:00
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_audio_frames . insert ( pair < int , int > ( previous_packet_location . frame , previous_packet_location . frame ) ) ;
}
2015-03-07 17:07:37 -06:00
2012-11-20 10:15:39 -06:00
} else {
// Increment sample start
previous_packet_location . sample_start + = samples ;
}
}
2011-10-11 08:44:27 -05:00
2017-08-20 17:37:39 -05:00
// Allocate audio buffer
2018-09-11 00:40:31 -05:00
int16_t * audio_buf = new int16_t [ AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE ] ;
2015-03-07 17:07:37 -06:00
2018-03-21 02:10:46 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (ReSample) " , " packet_samples " , packet_samples , " info.channels " , info . channels , " info.sample_rate " , info . sample_rate , " aCodecCtx->sample_fmt " , AV_GET_SAMPLE_FORMAT ( aStream , aCodecCtx ) , " AV_SAMPLE_FMT_S16 " , AV_SAMPLE_FMT_S16 , " " , - 1 ) ;
2017-08-20 17:37:39 -05:00
// Create output frame
AVFrame * audio_converted = AV_ALLOCATE_FRAME ( ) ;
AV_RESET_FRAME ( audio_converted ) ;
audio_converted - > nb_samples = audio_frame - > nb_samples ;
av_samples_alloc ( audio_converted - > data , audio_converted - > linesize , info . channels , audio_frame - > nb_samples , AV_SAMPLE_FMT_S16 , 0 ) ;
2018-09-11 00:40:31 -05:00
SWRCONTEXT * avr = NULL ;
2017-08-20 17:37:39 -05:00
int nb_samples = 0 ;
// setup resample context
2018-09-11 00:40:31 -05:00
avr = SWR_ALLOC ( ) ;
2018-03-21 02:10:46 -05:00
av_opt_set_int ( avr , " in_channel_layout " , AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout , 0 ) ;
av_opt_set_int ( avr , " out_channel_layout " , AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout , 0 ) ;
av_opt_set_int ( avr , " in_sample_fmt " , AV_GET_SAMPLE_FORMAT ( aStream , aCodecCtx ) , 0 ) ;
2017-08-20 17:37:39 -05:00
av_opt_set_int ( avr , " out_sample_fmt " , AV_SAMPLE_FMT_S16 , 0 ) ;
av_opt_set_int ( avr , " in_sample_rate " , info . sample_rate , 0 ) ;
av_opt_set_int ( avr , " out_sample_rate " , info . sample_rate , 0 ) ;
av_opt_set_int ( avr , " in_channels " , info . channels , 0 ) ;
av_opt_set_int ( avr , " out_channels " , info . channels , 0 ) ;
2018-09-11 00:40:31 -05:00
int r = SWR_INIT ( avr ) ;
2017-08-20 17:37:39 -05:00
// Convert audio samples
2018-09-11 00:40:31 -05:00
nb_samples = SWR_CONVERT ( avr , // audio resample context
2017-08-20 17:37:39 -05:00
audio_converted - > data , // output data pointers
audio_converted - > linesize [ 0 ] , // output plane size, in bytes. (0 if unknown)
audio_converted - > nb_samples , // maximum number of samples that the output buffer can hold
audio_frame - > data , // input data pointers
audio_frame - > linesize [ 0 ] , // input plane size, in bytes (0 if unknown)
audio_frame - > nb_samples ) ; // number of input samples to convert
// Copy audio samples over original samples
memcpy ( audio_buf , audio_converted - > data [ 0 ] , audio_converted - > nb_samples * av_get_bytes_per_sample ( AV_SAMPLE_FMT_S16 ) * info . channels ) ;
// Deallocate resample buffer
2018-09-11 00:40:31 -05:00
SWR_CLOSE ( avr ) ;
SWR_FREE ( & avr ) ;
2017-08-20 17:37:39 -05:00
avr = NULL ;
// Free AVFrames
av_free ( audio_converted - > data [ 0 ] ) ;
AV_FREE_FRAME ( & audio_converted ) ;
2017-09-28 16:03:01 -05:00
int64_t starting_frame_number = - 1 ;
2017-08-20 17:37:39 -05:00
bool partial_frame = true ;
for ( int channel_filter = 0 ; channel_filter < info . channels ; channel_filter + + )
2012-08-21 15:31:52 -05:00
{
2017-08-20 17:37:39 -05:00
// Array of floats (to hold samples for each channel)
starting_frame_number = target_frame ;
int channel_buffer_size = packet_samples / info . channels ;
float * channel_buffer = new float [ channel_buffer_size ] ;
2012-08-21 21:51:00 -05:00
2017-08-20 17:37:39 -05:00
// Init buffer array
for ( int z = 0 ; z < channel_buffer_size ; z + + )
channel_buffer [ z ] = 0.0f ;
2012-08-21 21:51:00 -05:00
2017-08-20 17:37:39 -05:00
// Loop through all samples and add them to our Frame based on channel.
// Toggle through each channel number, since channel data is stored like (left right left right)
int channel = 0 ;
int position = 0 ;
for ( int sample = 0 ; sample < packet_samples ; sample + + )
2015-03-08 21:42:53 -05:00
{
2017-08-20 17:37:39 -05:00
// Only add samples for current channel
if ( channel_filter = = channel )
2012-06-29 02:02:12 -05:00
{
2017-08-20 17:37:39 -05:00
// Add sample (convert from (-32768 to 32768) to (-1.0 to 1.0))
channel_buffer [ position ] = audio_buf [ sample ] * ( 1.0f / ( 1 < < 15 ) ) ;
2011-10-24 08:22:21 -05:00
2017-08-20 17:37:39 -05:00
// Increment audio position
position + + ;
2012-06-29 02:02:12 -05:00
}
2011-10-24 08:22:21 -05:00
2017-08-20 17:37:39 -05:00
// increment channel (if needed)
if ( ( channel + 1 ) < info . channels )
// move to next channel
channel + + ;
else
// reset channel
channel = 0 ;
2011-10-24 08:22:21 -05:00
}
2011-10-11 08:44:27 -05:00
2017-08-20 17:37:39 -05:00
// Loop through samples, and add them to the correct frames
int start = starting_sample ;
int remaining_samples = channel_buffer_size ;
float * iterate_channel_buffer = channel_buffer ; // pointer to channel buffer
while ( remaining_samples > 0 )
2012-07-06 02:34:18 -05:00
{
2017-08-20 17:37:39 -05:00
// Get Samples per frame (for this frame number)
int samples_per_frame = Frame : : GetSamplesPerFrame ( starting_frame_number , info . fps , info . sample_rate , info . channels ) ;
2015-03-07 17:07:37 -06:00
2017-08-20 17:37:39 -05:00
// Calculate # of samples to add to this frame
int samples = samples_per_frame - start ;
if ( samples > remaining_samples )
samples = remaining_samples ;
2015-10-01 13:00:50 -05:00
2017-08-20 17:37:39 -05:00
// Create or get the existing frame object
std : : shared_ptr < Frame > f = CreateFrame ( starting_frame_number ) ;
// Determine if this frame was "partially" filled in
if ( samples_per_frame = = start + samples )
partial_frame = false ;
else
partial_frame = true ;
// Add samples for current channel to the frame. Reduce the volume to 98%, to prevent
// some louder samples from maxing out at 1.0 (not sure why this happens)
f - > AddAudio ( true , channel_filter , start , iterate_channel_buffer , samples , 0.98f ) ;
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (f->AddAudio) " , " frame " , starting_frame_number , " start " , start , " samples " , samples , " channel " , channel_filter , " partial_frame " , partial_frame , " samples_per_frame " , samples_per_frame ) ;
// Add or update cache
working_cache . Add ( f ) ;
// Decrement remaining samples
remaining_samples - = samples ;
// Increment buffer (to next set of samples)
if ( remaining_samples > 0 )
iterate_channel_buffer + = samples ;
// Increment frame number
starting_frame_number + + ;
// Reset starting sample #
start = 0 ;
2012-07-06 02:34:18 -05:00
}
2012-08-21 15:31:52 -05:00
2017-08-20 17:37:39 -05:00
// clear channel buffer
delete [ ] channel_buffer ;
channel_buffer = NULL ;
iterate_channel_buffer = NULL ;
}
2015-03-07 17:07:37 -06:00
2017-08-20 17:37:39 -05:00
// Clean up some arrays
delete [ ] audio_buf ;
audio_buf = NULL ;
2014-08-27 09:44:27 -05:00
2017-08-20 17:37:39 -05:00
// Remove audio frame from list of processing audio frames
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
// Update all frames as completed
2017-09-28 16:03:01 -05:00
for ( int64_t f = target_frame ; f < starting_frame_number ; f + + ) {
2017-08-20 17:37:39 -05:00
// Remove the frame # from the processing list. NOTE: If more than one thread is
// processing this frame, the frame # will be in this list multiple times. We are only
// removing a single instance of it here.
processing_audio_frames . erase ( processing_audio_frames . find ( f ) ) ;
2012-07-08 23:26:44 -05:00
2017-08-20 17:37:39 -05:00
// Check and see if this frame is also being processed by another thread
if ( processing_audio_frames . count ( f ) = = 0 )
// No other thread is processing it. Mark the audio as processed (final)
processed_audio_frames [ f ] = f ;
}
2015-03-07 17:07:37 -06:00
2017-08-20 17:37:39 -05:00
if ( target_frame = = starting_frame_number ) {
// This typically never happens, but just in case, remove the currently processing number
processing_audio_frames . erase ( processing_audio_frames . find ( target_frame ) ) ;
}
}
2016-08-15 00:44:51 -05:00
// Free audio frame
AV_FREE_FRAME ( & audio_frame ) ;
2017-08-20 17:37:39 -05:00
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (After) " , " requested_frame " , requested_frame , " starting_frame " , target_frame , " end_frame " , starting_frame_number - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2011-10-11 08:44:27 -05:00
}
// Seek to a specific frame. This is not always frame accurate, it's more of an estimation on many codecs.
2017-10-26 18:44:35 -05:00
void FFmpegReader : : Seek ( int64_t requested_frame )
2011-10-11 08:44:27 -05:00
{
// Adjust for a requested frame that is too small or too large
if ( requested_frame < 1 )
requested_frame = 1 ;
if ( requested_frame > info . video_length )
requested_frame = info . video_length ;
2017-08-20 17:37:39 -05:00
int processing_video_frames_size = 0 ;
int processing_audio_frames_size = 0 ;
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2014-08-27 09:44:27 -05:00
// Debug output
2018-04-14 16:25:13 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::Seek " , " requested_frame " , requested_frame , " seek_count " , seek_count , " last_frame " , last_frame , " processing_video_frames_size " , processing_video_frames_size , " processing_audio_frames_size " , processing_audio_frames_size , " video_pts_offset " , video_pts_offset ) ;
2016-11-23 01:50:03 -06:00
// Wait for any processing frames to complete
2017-08-20 17:37:39 -05:00
while ( processing_video_frames_size + processing_audio_frames_size > 0 ) {
2016-11-23 01:50:03 -06:00
usleep ( 2500 ) ;
2017-08-20 17:37:39 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2014-08-27 09:44:27 -05:00
2011-10-14 09:47:05 -05:00
// Clear working cache (since we are seeking to another location in the file)
working_cache . Clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_frames . Clear ( ) ;
2011-10-14 09:47:05 -05:00
2014-08-27 09:44:27 -05:00
// Clear processed lists
2015-06-01 00:20:14 -07:00
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
processing_audio_frames . clear ( ) ;
processing_video_frames . clear ( ) ;
processed_video_frames . clear ( ) ;
processed_audio_frames . clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_audio_frames . clear ( ) ;
2015-08-24 01:05:48 -05:00
missing_video_frames . clear ( ) ;
2016-12-07 01:06:16 -06:00
missing_audio_frames_source . clear ( ) ;
missing_video_frames_source . clear ( ) ;
2016-06-29 02:42:00 -05:00
checked_frames . clear ( ) ;
2015-06-01 00:20:14 -07:00
}
2014-08-27 09:44:27 -05:00
2012-07-06 15:17:57 -05:00
// Reset the last frame variable
last_frame = 0 ;
2015-08-24 01:05:48 -05:00
current_video_frame = 0 ;
largest_frame_processed = 0 ;
2016-01-05 01:59:50 -06:00
num_checks_since_final = 0 ;
num_packets_since_video_frame = 0 ;
2015-08-24 01:05:48 -05:00
has_missing_frames = false ;
2017-05-17 01:17:42 -05:00
bool has_audio_override = info . has_audio ;
bool has_video_override = info . has_video ;
2011-10-24 08:22:21 -05:00
2012-10-10 17:27:46 -05:00
// Increment seek count
seek_count + + ;
2015-08-24 01:05:48 -05:00
// If seeking near frame 1, we need to close and re-open the file (this is more reliable than seeking)
2018-05-30 03:20:31 -05:00
int buffer_amount = max ( OPEN_MP_NUM_PROCESSORS , 8 ) ;
2015-08-24 01:05:48 -05:00
if ( requested_frame - buffer_amount < 20 )
2011-10-11 08:44:27 -05:00
{
// Close and re-open file (basically seeking to frame 1)
2012-10-14 21:09:22 -05:00
Close ( ) ;
2011-10-11 08:44:27 -05:00
Open ( ) ;
2017-05-17 01:17:42 -05:00
// Update overrides (since closing and re-opening might update these)
info . has_audio = has_audio_override ;
info . has_video = has_video_override ;
2011-10-11 08:44:27 -05:00
// Not actually seeking, so clear these flags
is_seeking = false ;
2015-08-24 01:05:48 -05:00
if ( seek_count = = 1 ) {
// Don't redefine this on multiple seek attempts for a specific frame
seeking_frame = 1 ;
seeking_pts = ConvertFrameToVideoPTS ( 1 ) ;
}
2014-09-13 16:35:11 -05:00
seek_audio_frame_found = 0 ; // used to detect which frames to throw away after a seek
seek_video_frame_found = 0 ; // used to detect which frames to throw away after a seek
2011-10-11 08:44:27 -05:00
}
else
{
// Seek to nearest key-frame (aka, i-frame)
2014-04-05 10:19:20 -05:00
bool seek_worked = false ;
2015-08-24 23:49:45 -05:00
int64_t seek_target = 0 ;
2012-07-09 00:41:17 -05:00
2015-08-24 01:05:48 -05:00
// Seek video stream (if any)
2015-08-24 23:49:45 -05:00
if ( ! seek_worked & & info . has_video )
{
seek_target = ConvertFrameToVideoPTS ( requested_frame - buffer_amount ) ;
2015-08-24 01:05:48 -05:00
if ( av_seek_frame ( pFormatCtx , info . video_stream_index , seek_target , AVSEEK_FLAG_BACKWARD ) < 0 ) {
2018-09-11 00:40:31 -05:00
fprintf ( stderr , " %s: error while seeking video stream \n " , pFormatCtx - > AV_FILENAME ) ;
2015-08-24 01:05:48 -05:00
} else
{
// VIDEO SEEK
is_video_seek = true ;
seek_worked = true ;
2015-08-24 23:49:45 -05:00
}
}
// Seek audio stream (if not already seeked... and if an audio stream is found)
if ( ! seek_worked & & info . has_audio )
{
seek_target = ConvertFrameToAudioPTS ( requested_frame - buffer_amount ) ;
2016-12-07 01:06:16 -06:00
if ( av_seek_frame ( pFormatCtx , info . audio_stream_index , seek_target , AVSEEK_FLAG_BACKWARD ) < 0 ) {
2018-09-11 00:40:31 -05:00
fprintf ( stderr , " %s: error while seeking audio stream \n " , pFormatCtx - > AV_FILENAME ) ;
2015-08-24 23:49:45 -05:00
} else
{
// AUDIO SEEK
is_video_seek = false ;
seek_worked = true ;
2015-08-24 01:05:48 -05:00
}
}
2012-07-09 00:41:17 -05:00
// Was the seek successful?
if ( seek_worked )
2011-12-11 20:42:50 -06:00
{
2012-10-12 00:54:53 -05:00
// Flush audio buffer
2012-10-14 02:36:05 -05:00
if ( info . has_audio )
avcodec_flush_buffers ( aCodecCtx ) ;
2012-10-12 00:54:53 -05:00
// Flush video buffer
2012-10-14 02:36:05 -05:00
if ( info . has_video )
avcodec_flush_buffers ( pCodecCtx ) ;
2012-10-12 00:54:53 -05:00
2013-01-25 02:24:18 -06:00
// Reset previous audio location to zero
previous_packet_location . frame = - 1 ;
previous_packet_location . sample_start = 0 ;
2012-10-10 17:27:46 -05:00
// init seek flags
is_seeking = true ;
2015-08-24 01:05:48 -05:00
if ( seek_count = = 1 ) {
// Don't redefine this on multiple seek attempts for a specific frame
seeking_pts = seek_target ;
seeking_frame = requested_frame ;
}
2015-02-05 00:00:52 -06:00
seek_audio_frame_found = 0 ; // used to detect which frames to throw away after a seek
seek_video_frame_found = 0 ; // used to detect which frames to throw away after a seek
2012-10-10 17:27:46 -05:00
}
else
{
// seek failed
is_seeking = false ;
seeking_pts = 0 ;
seeking_frame = 0 ;
2016-03-08 23:11:56 -06:00
// dislable seeking for this reader (since it failed)
// TODO: Find a safer way to do this... not sure how common it is for a seek to fail.
enable_seek = false ;
// Close and re-open file (basically seeking to frame 1)
Close ( ) ;
Open ( ) ;
2017-05-17 01:17:42 -05:00
// Update overrides (since closing and re-opening might update these)
info . has_audio = has_audio_override ;
info . has_video = has_video_override ;
2011-12-11 20:42:50 -06:00
}
2011-10-11 08:44:27 -05:00
}
}
2011-10-24 08:22:21 -05:00
// Get the PTS for the current video packet
2017-09-28 16:03:01 -05:00
int64_t FFmpegReader : : GetVideoPTS ( )
2011-10-24 08:22:21 -05:00
{
2017-09-28 16:03:01 -05:00
int64_t current_pts = 0 ;
2012-07-02 00:51:10 -05:00
if ( packet - > dts ! = AV_NOPTS_VALUE )
current_pts = packet - > dts ;
2011-10-24 08:22:21 -05:00
// Return adjusted PTS
return current_pts ;
}
// Update PTS Offset (if any)
void FFmpegReader : : UpdatePTSOffset ( bool is_video )
{
// Determine the offset between the PTS and Frame number (only for 1st frame)
if ( is_video )
{
// VIDEO PACKET
2011-12-11 20:42:50 -06:00
if ( video_pts_offset = = 99999 ) // Has the offset been set yet?
2017-05-17 01:17:42 -05:00
{
// Find the difference between PTS and frame number (no more than 10 timebase units allowed)
2017-09-28 16:31:13 -05:00
video_pts_offset = 0 - max ( GetVideoPTS ( ) , ( int64_t ) info . video_timebase . ToInt ( ) * 10 ) ;
2017-05-17 01:17:42 -05:00
// debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::UpdatePTSOffset (Video) " , " video_pts_offset " , video_pts_offset , " is_video " , is_video , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
}
2011-10-24 08:22:21 -05:00
}
else
{
// AUDIO PACKET
2011-12-11 20:42:50 -06:00
if ( audio_pts_offset = = 99999 ) // Has the offset been set yet?
2017-05-17 01:17:42 -05:00
{
// Find the difference between PTS and frame number (no more than 10 timebase units allowed)
2017-05-17 01:29:32 -05:00
audio_pts_offset = 0 - max ( packet - > pts , ( int64_t ) info . audio_timebase . ToInt ( ) * 10 ) ;
2017-05-17 01:17:42 -05:00
// debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::UpdatePTSOffset (Audio) " , " audio_pts_offset " , audio_pts_offset , " is_video " , is_video , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
}
2011-10-24 08:22:21 -05:00
}
2011-10-11 08:44:27 -05:00
}
2011-12-11 20:42:50 -06:00
// Convert PTS into Frame Number
2017-09-28 16:03:01 -05:00
int64_t FFmpegReader : : ConvertVideoPTStoFrame ( int64_t pts )
2011-10-11 08:44:27 -05:00
{
2011-12-11 20:42:50 -06:00
// Apply PTS offset
pts = pts + video_pts_offset ;
2017-09-28 16:03:01 -05:00
int64_t previous_video_frame = current_video_frame ;
2011-12-11 20:42:50 -06:00
2011-11-07 17:12:25 -06:00
// Get the video packet start time (in seconds)
2011-12-11 20:42:50 -06:00
double video_seconds = double ( pts ) * info . video_timebase . ToDouble ( ) ;
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
2017-09-28 16:03:01 -05:00
int64_t frame = round ( video_seconds * info . fps . ToDouble ( ) ) + 1 ;
2015-08-24 01:05:48 -05:00
// Keep track of the expected video frame #
if ( current_video_frame = = 0 )
current_video_frame = frame ;
else {
// Sometimes frames are duplicated due to identical (or similar) timestamps
if ( frame = = previous_video_frame ) {
// return -1 frame number
frame = - 1 ;
}
else
// Increment expected frame
current_video_frame + + ;
if ( current_video_frame < frame )
// has missing frames
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ConvertVideoPTStoFrame (detected missing frame) " , " calculated frame " , frame , " previous_video_frame " , previous_video_frame , " current_video_frame " , current_video_frame , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2015-08-24 01:05:48 -05:00
// Sometimes frames are missing due to varying timestamps, or they were dropped. Determine
// if we are missing a video frame.
2016-06-29 02:42:00 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2015-08-24 01:05:48 -05:00
while ( current_video_frame < frame ) {
2016-06-29 02:42:00 -05:00
if ( ! missing_video_frames . count ( current_video_frame ) ) {
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ConvertVideoPTStoFrame (tracking missing frame) " , " current_video_frame " , current_video_frame , " previous_video_frame " , previous_video_frame , " " , - 1 , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2017-09-28 16:03:01 -05:00
missing_video_frames . insert ( pair < int64_t , int64_t > ( current_video_frame , previous_video_frame ) ) ;
missing_video_frames_source . insert ( pair < int64_t , int64_t > ( previous_video_frame , current_video_frame ) ) ;
2016-06-29 02:42:00 -05:00
}
2015-08-24 01:05:48 -05:00
// Mark this reader as containing missing frames
has_missing_frames = true ;
// Increment current frame
current_video_frame + + ;
}
}
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Return frame #
2011-11-07 17:12:25 -06:00
return frame ;
2011-10-11 08:44:27 -05:00
}
2011-10-24 08:22:21 -05:00
// Convert Frame Number into Video PTS
2017-09-28 16:03:01 -05:00
int64_t FFmpegReader : : ConvertFrameToVideoPTS ( int64_t frame_number )
2011-10-11 08:44:27 -05:00
{
2011-11-07 17:12:25 -06:00
// Get timestamp of this frame (in seconds)
2011-12-11 20:42:50 -06:00
double seconds = double ( frame_number ) / info . fps . ToDouble ( ) ;
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Calculate the # of video packets in this timestamp
2017-09-28 16:03:01 -05:00
int64_t video_pts = round ( seconds / info . video_timebase . ToDouble ( ) ) ;
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Apply PTS offset (opposite)
2011-11-07 17:12:25 -06:00
return video_pts - video_pts_offset ;
2011-10-11 08:44:27 -05:00
}
2011-12-11 20:42:50 -06:00
// Convert Frame Number into Video PTS
2017-09-28 16:03:01 -05:00
int64_t FFmpegReader : : ConvertFrameToAudioPTS ( int64_t frame_number )
2011-10-24 08:22:21 -05:00
{
2011-11-07 17:12:25 -06:00
// Get timestamp of this frame (in seconds)
2011-12-11 20:42:50 -06:00
double seconds = double ( frame_number ) / info . fps . ToDouble ( ) ;
2011-10-24 08:22:21 -05:00
2011-12-11 20:42:50 -06:00
// Calculate the # of audio packets in this timestamp
2017-09-28 16:03:01 -05:00
int64_t audio_pts = round ( seconds / info . audio_timebase . ToDouble ( ) ) ;
2011-10-24 08:22:21 -05:00
2011-12-11 20:42:50 -06:00
// Apply PTS offset (opposite)
2011-10-24 08:22:21 -05:00
return audio_pts - audio_pts_offset ;
}
// Calculate Starting video frame and sample # for an audio PTS
2017-09-28 16:03:01 -05:00
AudioLocation FFmpegReader : : GetAudioPTSLocation ( int64_t pts )
2011-10-14 09:47:05 -05:00
{
2011-12-11 20:42:50 -06:00
// Apply PTS offset
pts = pts + audio_pts_offset ;
2011-10-14 09:47:05 -05:00
2011-12-11 20:42:50 -06:00
// Get the audio packet start time (in seconds)
double audio_seconds = double ( pts ) * info . audio_timebase . ToDouble ( ) ;
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
double frame = ( audio_seconds * info . fps . ToDouble ( ) ) + 1 ;
2011-10-24 08:22:21 -05:00
// Frame # as a whole number (no more decimals)
2017-09-28 16:03:01 -05:00
int64_t whole_frame = int64_t ( frame ) ;
2011-10-24 08:22:21 -05:00
// Remove the whole number, and only get the decimal of the frame
double sample_start_percentage = frame - double ( whole_frame ) ;
// Get Samples per frame
2015-03-08 21:42:53 -05:00
int samples_per_frame = Frame : : GetSamplesPerFrame ( whole_frame , info . fps , info . sample_rate , info . channels ) ;
2011-11-07 17:12:25 -06:00
// Calculate the sample # to start on
2011-10-26 14:34:14 -05:00
int sample_start = round ( double ( samples_per_frame ) * sample_start_percentage ) ;
2011-10-24 08:22:21 -05:00
2012-12-03 04:51:17 -06:00
// Protect against broken (i.e. negative) timestamps
if ( whole_frame < 1 )
whole_frame = 1 ;
if ( sample_start < 0 )
sample_start = 0 ;
2011-10-24 08:22:21 -05:00
// Prepare final audio packet location
2013-09-10 12:59:06 -05:00
AudioLocation location = { whole_frame , sample_start } ;
2011-10-24 08:22:21 -05:00
2012-11-20 10:15:39 -06:00
// Compare to previous audio packet (and fix small gaps due to varying PTS timestamps)
2017-01-23 23:53:50 -06:00
if ( previous_packet_location . frame ! = - 1 ) {
if ( location . is_near ( previous_packet_location , samples_per_frame , samples_per_frame ) )
{
2017-09-28 16:03:01 -05:00
int64_t orig_frame = location . frame ;
2017-01-23 23:53:50 -06:00
int orig_start = location . sample_start ;
2012-11-20 10:15:39 -06:00
2017-01-23 23:53:50 -06:00
// Update sample start, to prevent gaps in audio
location . sample_start = previous_packet_location . sample_start ;
location . frame = previous_packet_location . frame ;
2012-11-20 16:22:50 -06:00
2017-01-23 23:53:50 -06:00
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAudioPTSLocation (Audio Gap Detected) " , " Source Frame " , orig_frame , " Source Audio Sample " , orig_start , " Target Frame " , location . frame , " Target Audio Sample " , location . sample_start , " pts " , pts , " " , - 1 ) ;
2014-08-27 09:44:27 -05:00
2017-01-23 23:53:50 -06:00
} else {
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAudioPTSLocation (Audio Gap Ignored - too big) " , " Previous location frame " , previous_packet_location . frame , " Target Frame " , location . frame , " Target Audio Sample " , location . sample_start , " pts " , pts , " " , - 1 , " " , - 1 ) ;
2016-06-29 02:42:00 -05:00
2017-01-23 23:53:50 -06:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2017-09-28 16:03:01 -05:00
for ( int64_t audio_frame = previous_packet_location . frame ; audio_frame < location . frame ; audio_frame + + ) {
2017-01-23 23:53:50 -06:00
if ( ! missing_audio_frames . count ( audio_frame ) ) {
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAudioPTSLocation (tracking missing frame) " , " missing_audio_frame " , audio_frame , " previous_audio_frame " , previous_packet_location . frame , " new location frame " , location . frame , " " , - 1 , " " , - 1 , " " , - 1 ) ;
2017-09-28 16:03:01 -05:00
missing_audio_frames . insert ( pair < int64_t , int64_t > ( previous_packet_location . frame - 1 , audio_frame ) ) ;
2017-01-23 23:53:50 -06:00
}
2016-06-29 02:42:00 -05:00
}
}
2012-11-20 10:15:39 -06:00
}
// Set previous location
previous_packet_location = location ;
2011-10-24 08:22:21 -05:00
// Return the associated video frame and starting sample #
return location ;
2011-10-14 09:47:05 -05:00
}
2011-10-24 08:22:21 -05:00
// Create a new Frame (or return an existing one) and add it to the working queue.
2017-09-28 16:03:01 -05:00
std : : shared_ptr < Frame > FFmpegReader : : CreateFrame ( int64_t requested_frame )
2011-10-24 08:22:21 -05:00
{
// Check working cache
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > output = working_cache . GetFrame ( requested_frame ) ;
2015-08-24 01:05:48 -05:00
if ( ! output )
2011-10-24 08:22:21 -05:00
{
// Create a new frame on the working cache
2017-08-20 17:37:39 -05:00
output = std : : make_shared < Frame > ( requested_frame , info . width , info . height , " #000000 " , Frame : : GetSamplesPerFrame ( requested_frame , info . fps , info . sample_rate , info . channels ) , info . channels ) ;
2015-06-01 00:20:14 -07:00
output - > SetPixelRatio ( info . pixel_ratio . num , info . pixel_ratio . den ) ; // update pixel ratio
output - > ChannelsLayout ( info . channel_layout ) ; // update audio channel layout from the parent reader
output - > SampleRate ( info . sample_rate ) ; // update the frame's sample rate of the parent reader
2012-07-02 00:51:10 -05:00
2016-08-31 02:02:54 -05:00
working_cache . Add ( output ) ;
2011-10-24 08:22:21 -05:00
2014-03-21 01:25:17 -05:00
// Set the largest processed frame (if this is larger)
if ( requested_frame > largest_frame_processed )
largest_frame_processed = requested_frame ;
2011-10-24 08:22:21 -05:00
}
2015-06-01 00:20:14 -07:00
// Return new frame
return output ;
2011-10-24 08:22:21 -05:00
}
2014-09-13 16:35:11 -05:00
// Determine if frame is partial due to seek
2017-09-28 16:03:01 -05:00
bool FFmpegReader : : IsPartialFrame ( int64_t requested_frame ) {
2014-09-13 16:35:11 -05:00
// Sometimes a seek gets partial frames, and we need to remove them
bool seek_trash = false ;
2017-09-28 16:03:01 -05:00
int64_t max_seeked_frame = seek_audio_frame_found ; // determine max seeked frame
2014-09-13 16:35:11 -05:00
if ( seek_video_frame_found > max_seeked_frame )
max_seeked_frame = seek_video_frame_found ;
2014-09-26 09:35:38 -05:00
if ( ( info . has_audio & & seek_audio_frame_found & & max_seeked_frame > = requested_frame ) | |
( info . has_video & & seek_video_frame_found & & max_seeked_frame > = requested_frame ) )
2014-09-13 16:35:11 -05:00
seek_trash = true ;
return seek_trash ;
}
2015-08-24 01:05:48 -05:00
// Check if a frame is missing and attempt to replace it's frame image (and
2017-09-28 16:03:01 -05:00
bool FFmpegReader : : CheckMissingFrame ( int64_t requested_frame )
2015-08-24 01:05:48 -05:00
{
2016-06-29 02:42:00 -05:00
// Lock
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
// Init # of times this frame has been checked so far
int checked_count = 0 ;
// Increment check count for this frame (or init to 1)
if ( checked_frames . count ( requested_frame ) = = 0 )
checked_frames [ requested_frame ] = 1 ;
else
checked_frames [ requested_frame ] + + ;
checked_count = checked_frames [ requested_frame ] ;
2015-08-24 01:05:48 -05:00
// Debug output
2016-06-29 02:42:00 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckMissingFrame " , " requested_frame " , requested_frame , " has_missing_frames " , has_missing_frames , " missing_video_frames.size() " , missing_video_frames . size ( ) , " checked_count " , checked_count , " " , - 1 , " " , - 1 ) ;
2015-08-24 01:05:48 -05:00
// Missing frames (sometimes frame #'s are skipped due to invalid or missing timestamps)
2017-09-28 16:03:01 -05:00
map < int64_t , int64_t > : : iterator itr ;
2015-08-24 01:05:48 -05:00
bool found_missing_frame = false ;
2016-12-07 01:06:16 -06:00
// Check if requested frame is a missing frame
if ( missing_video_frames . count ( requested_frame ) | | missing_audio_frames . count ( requested_frame ) ) {
2017-09-28 16:03:01 -05:00
int64_t missing_source_frame = - 1 ;
2016-12-07 01:06:16 -06:00
if ( missing_video_frames . count ( requested_frame ) )
missing_source_frame = missing_video_frames . find ( requested_frame ) - > second ;
else if ( missing_audio_frames . count ( requested_frame ) )
missing_source_frame = missing_audio_frames . find ( requested_frame ) - > second ;
2015-08-24 01:05:48 -05:00
2016-12-07 01:06:16 -06:00
// Increment missing source frame check count (or init to 1)
if ( checked_frames . count ( missing_source_frame ) = = 0 )
checked_frames [ missing_source_frame ] = 1 ;
else
checked_frames [ missing_source_frame ] + + ;
2015-08-24 01:05:48 -05:00
2016-12-07 01:06:16 -06:00
// Get the previous frame of this missing frame (if it's available in missing cache)
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > parent_frame = missing_frames . GetFrame ( missing_source_frame ) ;
2016-12-07 01:06:16 -06:00
if ( parent_frame = = NULL ) {
parent_frame = final_cache . GetFrame ( missing_source_frame ) ;
2016-06-29 02:42:00 -05:00
if ( parent_frame ! = NULL ) {
2016-12-07 01:06:16 -06:00
// Add missing final frame to missing cache
missing_frames . Add ( parent_frame ) ;
2016-06-29 02:42:00 -05:00
}
}
2016-12-07 01:06:16 -06:00
// Create blank missing frame
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > missing_frame = CreateFrame ( requested_frame ) ;
2016-12-07 01:06:16 -06:00
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckMissingFrame (Is Previous Video Frame Final) " , " requested_frame " , requested_frame , " missing_frame->number " , missing_frame - > number , " missing_source_frame " , missing_source_frame , " " , - 1 , " " , - 1 , " " , - 1 ) ;
// If previous frame found, copy image from previous to missing frame (else we'll just wait a bit and try again later)
if ( parent_frame ! = NULL ) {
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckMissingFrame (AddImage from Previous Video Frame) " , " requested_frame " , requested_frame , " missing_frame->number " , missing_frame - > number , " missing_source_frame " , missing_source_frame , " " , - 1 , " " , - 1 , " " , - 1 ) ;
// Add this frame to the processed map (since it's already done)
2017-08-20 17:37:39 -05:00
std : : shared_ptr < QImage > parent_image = parent_frame - > GetImage ( ) ;
if ( parent_image ) {
missing_frame - > AddImage ( std : : shared_ptr < QImage > ( new QImage ( * parent_image ) ) ) ;
2016-12-07 01:06:16 -06:00
2017-08-20 17:37:39 -05:00
processed_video_frames [ missing_frame - > number ] = missing_frame - > number ;
processed_audio_frames [ missing_frame - > number ] = missing_frame - > number ;
2016-12-07 01:06:16 -06:00
2017-08-20 17:37:39 -05:00
// Move frame to final cache
final_cache . Add ( missing_frame ) ;
2016-12-07 01:06:16 -06:00
2017-08-20 17:37:39 -05:00
// Remove frame from working cache
working_cache . Remove ( missing_frame - > number ) ;
2016-12-07 01:06:16 -06:00
2017-08-20 17:37:39 -05:00
// Update last_frame processed
last_frame = missing_frame - > number ;
}
2016-06-29 02:42:00 -05:00
}
2016-12-07 01:06:16 -06:00
2015-08-24 01:05:48 -05:00
}
return found_missing_frame ;
}
2011-10-24 08:22:21 -05:00
// Check the working queue, and move finished frames to the finished queue
2017-09-28 16:03:01 -05:00
void FFmpegReader : : CheckWorkingFrames ( bool end_of_stream , int64_t requested_frame )
2011-10-24 08:22:21 -05:00
{
// Loop through all working queue frames
2017-05-17 01:17:42 -05:00
bool checked_count_tripped = false ;
int max_checked_count = 80 ;
2011-10-24 08:22:21 -05:00
while ( true )
{
// Get the front frame of working cache
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > f ( working_cache . GetSmallestFrame ( ) ) ;
2011-10-24 08:22:21 -05:00
2015-08-24 01:05:48 -05:00
// Was a frame found?
if ( ! f )
// No frames found
break ;
2018-04-14 18:01:27 -05:00
// Remove frames which are too old
2018-05-27 00:12:50 -05:00
if ( f & & f - > number < ( requested_frame - ( OPEN_MP_NUM_PROCESSORS * 2 ) ) ) {
2018-04-14 18:01:27 -05:00
working_cache . Remove ( f - > number ) ;
}
2016-06-29 02:42:00 -05:00
// Check if this frame is 'missing'
CheckMissingFrame ( f - > number ) ;
// Init # of times this frame has been checked so far
int checked_count = 0 ;
2017-08-20 17:37:39 -05:00
int checked_frames_size = 0 ;
2016-01-01 01:39:56 -06:00
2015-06-01 00:20:14 -07:00
bool is_video_ready = false ;
bool is_audio_ready = false ;
{ // limit scope of next few lines
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
is_video_ready = processed_video_frames . count ( f - > number ) ;
is_audio_ready = processed_audio_frames . count ( f - > number ) ;
2016-06-29 02:42:00 -05:00
// Get check count for this frame
2017-08-20 17:37:39 -05:00
checked_frames_size = checked_frames . size ( ) ;
2017-05-17 01:17:42 -05:00
if ( ! checked_count_tripped | | f - > number > = requested_frame )
2018-05-30 03:20:31 -05:00
checked_count = checked_frames [ f - > number ] ;
2017-05-17 01:17:42 -05:00
else
// Force checked count over the limit
checked_count = max_checked_count ;
2015-06-01 00:20:14 -07:00
}
2015-03-16 15:29:37 -05:00
if ( previous_packet_location . frame = = f - > number & & ! end_of_stream )
is_audio_ready = false ; // don't finalize the last processed audio frame
2014-09-13 16:35:11 -05:00
bool is_seek_trash = IsPartialFrame ( f - > number ) ;
2014-08-27 09:44:27 -05:00
2015-02-05 00:00:52 -06:00
// Adjust for available streams
if ( ! info . has_video ) is_video_ready = true ;
if ( ! info . has_audio ) is_audio_ready = true ;
2016-01-01 01:39:56 -06:00
// Make final any frames that get stuck (for whatever reason)
2017-05-17 01:17:42 -05:00
if ( checked_count > = max_checked_count & & ( ! is_video_ready | | ! is_audio_ready ) ) {
2016-06-29 02:42:00 -05:00
// Debug output
2017-08-20 17:37:39 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames (exceeded checked_count) " , " requested_frame " , requested_frame , " frame_number " , f - > number , " is_video_ready " , is_video_ready , " is_audio_ready " , is_audio_ready , " checked_count " , checked_count , " checked_frames_size " , checked_frames_size ) ;
2016-06-29 02:42:00 -05:00
2017-05-17 01:17:42 -05:00
// Trigger checked count tripped mode (clear out all frames before requested frame)
checked_count_tripped = true ;
2016-07-30 16:57:48 -05:00
if ( info . has_video & & ! is_video_ready & & last_video_frame ) {
2016-01-05 01:59:50 -06:00
// Copy image from last frame
2017-08-20 17:37:39 -05:00
f - > AddImage ( std : : shared_ptr < QImage > ( new QImage ( * last_video_frame - > GetImage ( ) ) ) ) ;
2016-06-29 02:42:00 -05:00
is_video_ready = true ;
2016-01-05 01:59:50 -06:00
}
2016-01-01 01:39:56 -06:00
2016-01-05 01:59:50 -06:00
if ( info . has_audio & & ! is_audio_ready ) {
// Mark audio as processed, and indicate the frame has audio data
2016-06-29 02:42:00 -05:00
is_audio_ready = true ;
2016-01-05 01:59:50 -06:00
}
2016-01-01 01:39:56 -06:00
}
2014-08-27 09:44:27 -05:00
// Debug output
2017-08-20 17:37:39 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames " , " requested_frame " , requested_frame , " frame_number " , f - > number , " is_video_ready " , is_video_ready , " is_audio_ready " , is_audio_ready , " checked_count " , checked_count , " checked_frames_size " , checked_frames_size ) ;
2012-07-06 16:52:13 -05:00
2011-10-24 08:22:21 -05:00
// Check if working frame is final
2017-01-19 15:29:46 -06:00
if ( ( ! end_of_stream & & is_video_ready & & is_audio_ready ) | | end_of_stream | | is_seek_trash )
2011-10-24 08:22:21 -05:00
{
2014-08-27 09:44:27 -05:00
// Debug output
2018-01-06 16:44:54 -06:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames (mark frame as final) " , " requested_frame " , requested_frame , " f->number " , f - > number , " is_seek_trash " , is_seek_trash , " Working Cache Count " , working_cache . Count ( ) , " Final Cache Count " , final_cache . Count ( ) , " end_of_stream " , end_of_stream ) ;
2014-08-27 09:44:27 -05:00
2014-09-13 16:35:11 -05:00
if ( ! is_seek_trash )
2012-10-22 17:05:34 -05:00
{
2018-01-06 16:44:54 -06:00
// Add missing image (if needed - sometimes end_of_stream causes frames with only audio)
if ( info . has_video & & ! is_video_ready & & last_video_frame )
// Copy image from last frame
f - > AddImage ( std : : shared_ptr < QImage > ( new QImage ( * last_video_frame - > GetImage ( ) ) ) ) ;
2016-01-01 01:39:56 -06:00
// Reset counter since last 'final' frame
num_checks_since_final = 0 ;
2012-10-22 17:05:34 -05:00
// Move frame to final cache
2016-08-31 02:02:54 -05:00
final_cache . Add ( f ) ;
2012-07-06 15:17:57 -05:00
2016-06-29 02:42:00 -05:00
// Add to missing cache (if another frame depends on it)
{
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2016-12-07 01:06:16 -06:00
if ( missing_video_frames_source . count ( f - > number ) ) {
2016-06-29 02:42:00 -05:00
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames (add frame to missing cache) " , " f->number " , f - > number , " is_seek_trash " , is_seek_trash , " Missing Cache Count " , missing_frames . Count ( ) , " Working Cache Count " , working_cache . Count ( ) , " Final Cache Count " , final_cache . Count ( ) , " " , - 1 ) ;
2016-08-31 02:02:54 -05:00
missing_frames . Add ( f ) ;
2016-06-29 02:42:00 -05:00
}
2017-05-22 04:43:21 -05:00
// Remove from 'checked' count
checked_frames . erase ( f - > number ) ;
2016-06-29 02:42:00 -05:00
}
2012-10-22 17:05:34 -05:00
// Remove frame from working cache
working_cache . Remove ( f - > number ) ;
// Update last frame processed
last_frame = f - > number ;
2014-04-05 10:19:20 -05:00
2012-10-22 17:05:34 -05:00
} else {
// Seek trash, so delete the frame from the working cache, and never add it to the final cache.
working_cache . Remove ( f - > number ) ;
}
2011-10-24 08:22:21 -05:00
}
else
// Stop looping
break ;
}
}
2011-12-15 16:11:48 -06:00
// Check for the correct frames per second (FPS) value by scanning the 1st few seconds of video packets.
void FFmpegReader : : CheckFPS ( )
{
check_fps = true ;
2018-09-11 00:40:31 -05:00
2011-12-15 16:11:48 -06:00
int first_second_counter = 0 ;
int second_second_counter = 0 ;
int third_second_counter = 0 ;
int forth_second_counter = 0 ;
int fifth_second_counter = 0 ;
2018-07-25 02:24:01 -05:00
int frames_detected = 0 ;
2018-09-17 00:27:30 -05:00
int64_t pts = 0 ;
2012-07-03 02:42:47 -05:00
2011-12-15 16:11:48 -06:00
// Loop through the stream
while ( true )
{
// Get the next packet (if any)
if ( GetNextPacket ( ) < 0 )
// Break loop when no more packets found
break ;
// Video packet
2012-07-02 00:51:10 -05:00
if ( packet - > stream_index = = videoStream )
2011-12-15 16:11:48 -06:00
{
// Check if the AVFrame is finished and set it
if ( GetAVFrame ( ) )
{
// Update PTS / Frame Offset (if any)
UpdatePTSOffset ( true ) ;
// Get PTS of this packet
2018-09-17 00:27:30 -05:00
pts = GetVideoPTS ( ) ;
2011-12-15 16:11:48 -06:00
2012-07-03 16:58:07 -05:00
// Remove pFrame
RemoveAVFrame ( pFrame ) ;
2011-12-15 16:11:48 -06:00
// Apply PTS offset
pts + = video_pts_offset ;
// Get the video packet start time (in seconds)
double video_seconds = double ( pts ) * info . video_timebase . ToDouble ( ) ;
// Increment the correct counter
if ( video_seconds < = 1.0 )
first_second_counter + + ;
else if ( video_seconds > 1.0 & & video_seconds < = 2.0 )
second_second_counter + + ;
else if ( video_seconds > 2.0 & & video_seconds < = 3.0 )
third_second_counter + + ;
else if ( video_seconds > 3.0 & & video_seconds < = 4.0 )
forth_second_counter + + ;
else if ( video_seconds > 4.0 & & video_seconds < = 5.0 )
fifth_second_counter + + ;
2018-07-25 02:24:01 -05:00
// Increment counters
frames_detected + + ;
2011-12-15 16:11:48 -06:00
}
}
2012-07-03 02:42:47 -05:00
}
2011-12-15 16:11:48 -06:00
2012-02-26 16:40:53 -06:00
// Double check that all counters have greater than zero (or give up)
2018-07-25 02:24:01 -05:00
if ( second_second_counter ! = 0 & & third_second_counter ! = 0 & & forth_second_counter ! = 0 & & fifth_second_counter ! = 0 ) {
2018-09-17 00:27:30 -05:00
// Calculate average FPS (average of first few seconds)
2018-07-25 02:24:01 -05:00
int sum_fps = second_second_counter + third_second_counter + forth_second_counter + fifth_second_counter ;
int avg_fps = round ( sum_fps / 4.0f ) ;
2012-02-26 16:45:50 -06:00
2018-07-25 02:24:01 -05:00
// Update FPS
info . fps = Fraction ( avg_fps , 1 ) ;
// Update Duration and Length
info . video_length = frames_detected ;
2018-09-17 00:27:30 -05:00
info . duration = frames_detected / ( sum_fps / 4.0f ) ;
// Update video bit rate
info . video_bit_rate = info . file_size / info . duration ;
} else if ( second_second_counter ! = 0 & & third_second_counter ! = 0 ) {
// Calculate average FPS (only on second 2)
int sum_fps = second_second_counter ;
// Update FPS
info . fps = Fraction ( sum_fps , 1 ) ;
// Update Duration and Length
info . video_length = frames_detected ;
info . duration = frames_detected / float ( sum_fps ) ;
2018-07-25 02:24:01 -05:00
// Update video bit rate
info . video_bit_rate = info . file_size / info . duration ;
2018-08-02 00:40:44 -05:00
} else {
// Too short to determine framerate, just default FPS
// Set a few important default video settings (so audio can be divided into frames)
info . fps . num = 30 ;
info . fps . den = 1 ;
// Calculate number of frames
info . video_length = frames_detected ;
2018-09-17 00:27:30 -05:00
info . duration = frames_detected / info . fps . ToFloat ( ) ;
2012-02-26 16:45:50 -06:00
}
2011-12-15 16:11:48 -06:00
}
2012-07-03 16:58:07 -05:00
// Remove AVFrame from cache (and deallocate it's memory)
2018-03-21 02:10:46 -05:00
void FFmpegReader : : RemoveAVFrame ( AVFrame * remove_frame )
2012-07-03 16:58:07 -05:00
{
2017-08-20 17:37:39 -05:00
// Remove pFrame (if exists)
if ( remove_frame )
{
// Free memory
2018-03-21 02:10:46 -05:00
av_freep ( & remove_frame - > data [ 0 ] ) ;
}
2012-07-03 16:58:07 -05:00
}
// Remove AVPacket from cache (and deallocate it's memory)
void FFmpegReader : : RemoveAVPacket ( AVPacket * remove_packet )
{
2016-11-14 22:37:44 -06:00
// deallocate memory for packet
2017-08-20 17:37:39 -05:00
AV_FREE_PACKET ( remove_packet ) ;
2012-07-03 16:58:07 -05:00
2016-11-14 22:37:44 -06:00
// Delete the object
delete remove_packet ;
2012-07-03 16:58:07 -05:00
}
2012-07-06 02:34:18 -05:00
/// Get the smallest video frame that is still being processed
2017-09-28 16:03:01 -05:00
int64_t FFmpegReader : : GetSmallestVideoFrame ( )
2012-07-06 02:34:18 -05:00
{
// Loop through frame numbers
2017-09-28 16:03:01 -05:00
map < int64_t , int64_t > : : iterator itr ;
int64_t smallest_frame = - 1 ;
2017-08-20 17:37:39 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2012-07-06 02:34:18 -05:00
for ( itr = processing_video_frames . begin ( ) ; itr ! = processing_video_frames . end ( ) ; + + itr )
{
if ( itr - > first < smallest_frame | | smallest_frame = = - 1 )
smallest_frame = itr - > first ;
}
// Return frame number
return smallest_frame ;
}
/// Get the smallest audio frame that is still being processed
2017-09-28 16:03:01 -05:00
int64_t FFmpegReader : : GetSmallestAudioFrame ( )
2012-07-06 02:34:18 -05:00
{
// Loop through frame numbers
2017-09-28 16:03:01 -05:00
map < int64_t , int64_t > : : iterator itr ;
int64_t smallest_frame = - 1 ;
2015-08-24 01:05:48 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2012-07-06 02:34:18 -05:00
for ( itr = processing_audio_frames . begin ( ) ; itr ! = processing_audio_frames . end ( ) ; + + itr )
{
if ( itr - > first < smallest_frame | | smallest_frame = = - 1 )
smallest_frame = itr - > first ;
}
// Return frame number
return smallest_frame ;
}
2013-12-07 21:09:55 -06:00
// Generate JSON string of this object
string FFmpegReader : : Json ( ) {
// Return formatted string
return JsonValue ( ) . toStyledString ( ) ;
}
2013-12-07 16:52:09 -06:00
// Generate Json::JsonValue for this object
Json : : Value FFmpegReader : : JsonValue ( ) {
2012-07-03 16:58:07 -05:00
2013-12-07 16:52:09 -06:00
// Create root json object
Json : : Value root = ReaderBase : : JsonValue ( ) ; // get parent properties
2013-12-07 21:09:55 -06:00
root [ " type " ] = " FFmpegReader " ;
2013-12-07 16:52:09 -06:00
root [ " path " ] = path ;
// return JsonValue
return root ;
}
2013-12-07 21:09:55 -06:00
// Load JSON string into this object
2017-10-26 18:44:35 -05:00
void FFmpegReader : : SetJson ( string value ) {
2013-12-07 21:09:55 -06:00
// Parse JSON string into JSON objects
Json : : Value root ;
Json : : Reader reader ;
bool success = reader . parse ( value , root ) ;
if ( ! success )
// Raise exception
throw InvalidJSON ( " JSON could not be parsed (or is invalid) " , " " ) ;
try
{
// Set all values that match
SetJsonValue ( root ) ;
}
catch ( exception e )
{
// Error parsing JSON (or missing keys)
throw InvalidJSON ( " JSON is invalid (missing keys or invalid data types) " , " " ) ;
}
}
2013-12-07 16:52:09 -06:00
// Load Json::JsonValue into this object
2017-10-26 18:44:35 -05:00
void FFmpegReader : : SetJsonValue ( Json : : Value root ) {
2013-12-07 16:52:09 -06:00
// Set parent data
2013-12-07 21:09:55 -06:00
ReaderBase : : SetJsonValue ( root ) ;
2013-12-07 16:52:09 -06:00
// Set data from Json (if key is found)
2014-01-08 01:43:58 -06:00
if ( ! root [ " path " ] . isNull ( ) )
2013-12-07 16:52:09 -06:00
path = root [ " path " ] . asString ( ) ;
2013-12-07 21:09:55 -06:00
// Re-Open path, and re-init everything (if needed)
if ( is_open )
{
Close ( ) ;
Open ( ) ;
}
2013-12-07 16:52:09 -06:00
}