2013-09-09 23:32:16 -05:00
/**
* @ file
* @ brief Source file for FFmpegReader class
2013-09-12 17:52:10 -05:00
* @ author Jonathan Thomas < jonathan @ openshot . org > , Fabrice Bellard
2013-09-09 23:32:16 -05:00
*
2019-06-09 08:31:04 -04:00
* @ ref License
*/
/* LICENSE
2013-09-09 23:32:16 -05:00
*
2019-06-11 06:51:37 -04:00
* Copyright ( c ) 2008 - 2019 OpenShot Studios , LLC , Fabrice Bellard
2013-09-09 23:32:16 -05:00
* ( http : //www.openshotstudios.com). This file is part of
* OpenShot Library ( http : //www.openshot.org), an open-source project
* dedicated to delivering high quality video editing and animation solutions
* to the world .
*
2013-09-12 17:52:10 -05:00
* This file is originally based on the Libavformat API example , and then modified
* by the libopenshot project .
*
2014-03-29 18:49:22 -05:00
* OpenShot Library ( libopenshot ) is free software : you can redistribute it
2014-07-11 16:52:14 -05:00
* and / or modify it under the terms of the GNU Lesser General Public License
2014-03-29 18:49:22 -05:00
* as published by the Free Software Foundation , either version 3 of the
* License , or ( at your option ) any later version .
2013-09-09 23:32:16 -05:00
*
2014-03-29 18:49:22 -05:00
* OpenShot Library ( libopenshot ) is distributed in the hope that it will be
* useful , but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
2014-07-11 16:52:14 -05:00
* GNU Lesser General Public License for more details .
2013-09-09 23:32:16 -05:00
*
2014-07-11 16:52:14 -05:00
* You should have received a copy of the GNU Lesser General Public License
2014-03-29 18:49:22 -05:00
* along with OpenShot Library . If not , see < http : //www.gnu.org/licenses/>.
2013-09-09 23:32:16 -05:00
*/
2020-10-18 07:43:37 -04:00
# include "FFmpegReader.h"
2011-10-11 08:44:27 -05:00
2020-09-02 02:07:54 -04:00
# include <thread> // for std::this_thread::sleep_for
# include <chrono> // for std::chrono::milliseconds
2019-04-18 01:07:57 -05:00
# define ENABLE_VAAPI 0
2018-09-14 14:40:29 -07:00
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-18 12:38:53 -07:00
# pragma message "You are compiling with experimental hardware decode"
# else
2018-09-18 13:08:42 -07:00
# pragma message "You are compiling only with software decode"
2018-09-18 12:38:53 -07:00
# endif
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-13 12:37:32 -07:00
# define MAX_SUPPORTED_WIDTH 1950
# define MAX_SUPPORTED_HEIGHT 1100
2018-09-14 14:40:29 -07:00
2019-04-18 01:07:57 -05:00
# if ENABLE_VAAPI
2018-09-14 14:40:29 -07:00
# include "libavutil/hwcontext_vaapi.h"
2018-09-13 12:37:32 -07:00
typedef struct VAAPIDecodeContext {
2019-04-18 01:07:57 -05:00
VAProfile va_profile ;
VAEntrypoint va_entrypoint ;
VAConfigID va_config ;
VAContextID va_context ;
2018-09-13 12:37:32 -07:00
2019-04-18 01:07:57 -05:00
# if FF_API_STRUCT_VAAPI_CONTEXT
// FF_DISABLE_DEPRECATION_WARNINGS
int have_old_context ;
struct vaapi_context * old_context ;
AVBufferRef * device_ref ;
// FF_ENABLE_DEPRECATION_WARNINGS
# endif
2018-09-13 12:37:32 -07:00
2019-04-18 01:07:57 -05:00
AVHWDeviceContext * device ;
AVVAAPIDeviceContext * hwctx ;
2018-09-13 12:37:32 -07:00
2019-04-18 01:07:57 -05:00
AVHWFramesContext * frames ;
AVVAAPIFramesContext * hwfc ;
2018-09-13 12:37:32 -07:00
2019-04-18 01:07:57 -05:00
enum AVPixelFormat surface_format ;
int surface_count ;
2018-09-13 12:37:32 -07:00
} VAAPIDecodeContext ;
2020-02-10 01:50:31 -05:00
# endif // ENABLE_VAAPI
# endif // HAVE_HW_ACCEL
2018-09-13 12:37:32 -07:00
2011-10-11 08:44:27 -05:00
using namespace openshot ;
2019-04-18 01:07:57 -05:00
int hw_de_on = 0 ;
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2019-04-29 17:05:13 -05:00
AVPixelFormat hw_de_av_pix_fmt_global = AV_PIX_FMT_NONE ;
2019-04-18 01:07:57 -05:00
AVHWDeviceType hw_de_av_device_type_global = AV_HWDEVICE_TYPE_NONE ;
2018-09-08 22:30:16 -07:00
# endif
2018-08-31 21:36:23 -07:00
2020-06-08 16:02:02 -04:00
FFmpegReader : : FFmpegReader ( const std : : string & path , bool inspect_reader )
2019-04-18 01:07:57 -05:00
: last_frame ( 0 ) , is_seeking ( 0 ) , seeking_pts ( 0 ) , seeking_frame ( 0 ) , seek_count ( 0 ) ,
audio_pts_offset ( 99999 ) , video_pts_offset ( 99999 ) , path ( path ) , is_video_seek ( true ) , check_interlace ( false ) ,
check_fps ( false ) , enable_seek ( true ) , is_open ( false ) , seek_audio_frame_found ( 0 ) , seek_video_frame_found ( 0 ) ,
prev_samples ( 0 ) , prev_pts ( 0 ) , pts_total ( 0 ) , pts_counter ( 0 ) , is_duration_known ( false ) , largest_frame_processed ( 0 ) ,
current_video_frame ( 0 ) , has_missing_frames ( false ) , num_packets_since_video_frame ( 0 ) , num_checks_since_final ( 0 ) ,
packet ( NULL ) {
2011-10-11 08:44:27 -05:00
2020-06-08 16:07:04 -04:00
// Configure OpenMP parallelism
// Default number of threads per section
omp_set_num_threads ( OPEN_MP_NUM_PROCESSORS ) ;
// Allow nested parallel sections as deeply as supported
omp_set_max_active_levels ( OPEN_MP_MAX_ACTIVE ) ;
2016-09-16 17:43:26 -05:00
// Initialize FFMpeg, and register all formats and codecs
2018-09-11 00:40:31 -05:00
AV_REGISTER_ALL
AVCODEC_REGISTER_ALL
2016-09-16 17:43:26 -05:00
// Init cache
2018-04-14 16:25:13 -05:00
working_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * info . fps . ToDouble ( ) * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2016-09-16 17:43:26 -05:00
missing_frames . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
final_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2019-03-14 09:26:56 -07:00
// Open and Close the reader, to populate its attributes (such as height, width, etc...)
2016-09-16 17:43:26 -05:00
if ( inspect_reader ) {
Open ( ) ;
Close ( ) ;
}
}
2015-12-24 16:44:45 -06:00
FFmpegReader : : ~ FFmpegReader ( ) {
if ( is_open )
// Auto close reader if not already done
Close ( ) ;
}
2013-09-08 23:09:54 -05:00
// This struct holds the associated video frame and starting sample # for an audio packet.
2019-04-18 01:07:57 -05:00
bool AudioLocation : : is_near ( AudioLocation location , int samples_per_frame , int64_t amount ) {
2013-09-08 23:09:54 -05:00
// Is frame even close to this one?
if ( abs ( location . frame - frame ) > = 2 )
// This is too far away to be considered
return false ;
2017-01-07 17:34:11 -05:00
// Note that samples_per_frame can vary slightly frame to frame when the
// audio sampling rate is not an integer multiple of the video fps.
2017-09-28 16:03:01 -05:00
int64_t diff = samples_per_frame * ( location . frame - frame ) + location . sample_start - sample_start ;
2017-01-07 17:34:11 -05:00
if ( abs ( diff ) < = amount )
2013-09-08 23:09:54 -05:00
// close
return true ;
// not close
return false ;
}
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-08-31 21:36:23 -07:00
2019-04-30 17:43:15 -05:00
// Get hardware pix format
static enum AVPixelFormat get_hw_dec_format ( AVCodecContext * ctx , const enum AVPixelFormat * pix_fmts )
2011-10-11 08:44:27 -05:00
{
2019-04-28 17:18:43 -05:00
const enum AVPixelFormat * p ;
for ( p = pix_fmts ; * p ! = AV_PIX_FMT_NONE ; p + + ) {
switch ( * p ) {
2019-04-30 17:43:15 -05:00
# if defined(__linux__)
// Linux pix formats
case AV_PIX_FMT_VAAPI :
hw_de_av_pix_fmt_global = AV_PIX_FMT_VAAPI ;
hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VAAPI ;
return * p ;
break ;
case AV_PIX_FMT_VDPAU :
hw_de_av_pix_fmt_global = AV_PIX_FMT_VDPAU ;
hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VDPAU ;
return * p ;
break ;
# endif
# if defined(_WIN32)
// Windows pix formats
case AV_PIX_FMT_DXVA2_VLD :
hw_de_av_pix_fmt_global = AV_PIX_FMT_DXVA2_VLD ;
hw_de_av_device_type_global = AV_HWDEVICE_TYPE_DXVA2 ;
return * p ;
break ;
case AV_PIX_FMT_D3D11 :
hw_de_av_pix_fmt_global = AV_PIX_FMT_D3D11 ;
hw_de_av_device_type_global = AV_HWDEVICE_TYPE_D3D11VA ;
return * p ;
break ;
# endif
# if defined(__APPLE__)
// Apple pix formats
case AV_PIX_FMT_VIDEOTOOLBOX :
2019-12-15 07:14:01 -05:00
hw_de_av_pix_fmt_global = AV_PIX_FMT_VIDEOTOOLBOX ;
hw_de_av_device_type_global = AV_HWDEVICE_TYPE_VIDEOTOOLBOX ;
return * p ;
break ;
2019-04-30 17:43:15 -05:00
# endif
// Cross-platform pix formats
case AV_PIX_FMT_CUDA :
hw_de_av_pix_fmt_global = AV_PIX_FMT_CUDA ;
hw_de_av_device_type_global = AV_HWDEVICE_TYPE_CUDA ;
return * p ;
break ;
2019-04-28 17:18:43 -05:00
case AV_PIX_FMT_QSV :
hw_de_av_pix_fmt_global = AV_PIX_FMT_QSV ;
hw_de_av_device_type_global = AV_HWDEVICE_TYPE_QSV ;
return * p ;
break ;
2019-12-15 07:14:01 -05:00
default :
// This is only here to silence unused-enum warnings
break ;
2019-04-28 17:18:43 -05:00
}
}
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::get_hw_dec_format (Unable to decode this file using hardware decode) " ) ;
2019-04-28 17:18:43 -05:00
return AV_PIX_FMT_NONE ;
}
2019-04-18 01:07:57 -05:00
int FFmpegReader : : IsHardwareDecodeSupported ( int codecid )
2018-09-08 21:17:24 -07:00
{
int ret ;
switch ( codecid ) {
case AV_CODEC_ID_H264 :
case AV_CODEC_ID_MPEG2VIDEO :
case AV_CODEC_ID_VC1 :
case AV_CODEC_ID_WMV1 :
case AV_CODEC_ID_WMV2 :
case AV_CODEC_ID_WMV3 :
2019-04-30 17:43:15 -05:00
ret = 1 ;
break ;
default :
ret = 0 ;
break ;
2018-09-08 21:17:24 -07:00
}
return ret ;
}
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2018-08-31 21:36:23 -07:00
2019-04-18 01:07:57 -05:00
void FFmpegReader : : Open ( ) {
2012-10-08 15:02:52 -05:00
// Open reader if not already open
2019-04-18 01:07:57 -05:00
if ( ! is_open ) {
2012-10-08 15:02:52 -05:00
// Initialize format context
pFormatCtx = NULL ;
2019-01-30 09:58:54 -08:00
{
2019-04-18 01:07:57 -05:00
hw_de_on = ( openshot : : Settings : : Instance ( ) - > HARDWARE_DECODER = = 0 ? 0 : 1 ) ;
2019-01-30 09:58:54 -08:00
}
2012-10-08 15:02:52 -05:00
// Open video file
if ( avformat_open_input ( & pFormatCtx , path . c_str ( ) , NULL , NULL ) ! = 0 )
throw InvalidFile ( " File could not be opened. " , path ) ;
// Retrieve stream information
if ( avformat_find_stream_info ( pFormatCtx , NULL ) < 0 )
throw NoStreamsFound ( " No streams found in file. " , path ) ;
videoStream = - 1 ;
audioStream = - 1 ;
// Loop through each stream, and identify the video and audio stream index
2019-04-18 01:07:57 -05:00
for ( unsigned int i = 0 ; i < pFormatCtx - > nb_streams ; i + + ) {
2012-10-08 15:02:52 -05:00
// Is this a video stream?
2018-03-21 02:10:46 -05:00
if ( AV_GET_CODEC_TYPE ( pFormatCtx - > streams [ i ] ) = = AVMEDIA_TYPE_VIDEO & & videoStream < 0 ) {
2012-10-08 15:02:52 -05:00
videoStream = i ;
}
// Is this an audio stream?
2018-03-21 02:10:46 -05:00
if ( AV_GET_CODEC_TYPE ( pFormatCtx - > streams [ i ] ) = = AVMEDIA_TYPE_AUDIO & & audioStream < 0 ) {
2012-10-08 15:02:52 -05:00
audioStream = i ;
}
2011-10-11 08:44:27 -05:00
}
2012-10-08 15:02:52 -05:00
if ( videoStream = = - 1 & & audioStream = = - 1 )
throw NoStreamsFound ( " No video or audio streams found in this file. " , path ) ;
// Is there a video stream?
2019-04-18 01:07:57 -05:00
if ( videoStream ! = - 1 ) {
2012-10-08 15:02:52 -05:00
// Set the stream index
info . video_stream_index = videoStream ;
// Set the codec and codec context pointers
pStream = pFormatCtx - > streams [ videoStream ] ;
2018-03-21 02:10:46 -05:00
// Find the codec ID from stream
AVCodecID codecId = AV_FIND_DECODER_CODEC_ID ( pStream ) ;
// Get codec and codec context from stream
AVCodec * pCodec = avcodec_find_decoder ( codecId ) ;
2018-06-29 15:06:34 -05:00
AVDictionary * opts = NULL ;
2018-09-11 20:18:11 -07:00
int retry_decode_open = 2 ;
2019-04-18 01:07:57 -05:00
// If hw accel is selected but hardware cannot handle repeat with software decoding
2018-09-11 20:18:11 -07:00
do {
pCodecCtx = AV_GET_CODEC_CONTEXT ( pStream , pCodec ) ;
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-11 20:18:11 -07:00
if ( hw_de_on & & ( retry_decode_open = = 2 ) ) {
// Up to here no decision is made if hardware or software decode
2019-04-18 01:07:57 -05:00
hw_de_supported = IsHardwareDecodeSupported ( pCodecCtx - > codec_id ) ;
2018-09-11 20:18:11 -07:00
}
2019-04-18 01:07:57 -05:00
# endif
2018-09-11 20:18:11 -07:00
retry_decode_open = 0 ;
2018-06-29 15:06:34 -05:00
2018-09-11 20:18:11 -07:00
// Set number of threads equal to number of processors (not to exceed 16)
2019-08-04 23:51:02 -04:00
pCodecCtx - > thread_count = std : : min ( FF_NUM_PROCESSORS , 16 ) ;
2012-10-08 15:02:52 -05:00
2018-09-11 20:18:11 -07:00
if ( pCodec = = NULL ) {
throw InvalidCodec ( " A valid video codec could not be found for this file. " , path ) ;
}
// Init options
av_dict_set ( & opts , " strict " , " experimental " , 0 ) ;
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-11 20:18:11 -07:00
if ( hw_de_on & & hw_de_supported ) {
// Open Hardware Acceleration
2019-01-30 09:58:54 -08:00
int i_decoder_hw = 0 ;
2019-01-31 09:42:26 -08:00
char adapter [ 256 ] ;
char * adapter_ptr = NULL ;
int adapter_num ;
2019-01-30 20:44:36 -08:00
adapter_num = openshot : : Settings : : Instance ( ) - > HW_DE_DEVICE_SET ;
2019-09-13 07:43:46 -04:00
fprintf ( stderr , " Hardware decoding device number: %d \n " , adapter_num ) ;
2019-04-18 01:07:57 -05:00
2019-04-30 17:43:15 -05:00
// Set hardware pix format (callback)
pCodecCtx - > get_format = get_hw_dec_format ;
2019-01-31 09:42:26 -08:00
if ( adapter_num < 3 & & adapter_num > = 0 ) {
2019-04-18 01:07:57 -05:00
# if defined(__linux__)
2019-04-30 17:43:15 -05:00
snprintf ( adapter , sizeof ( adapter ) , " /dev/dri/renderD%d " , adapter_num + 128 ) ;
adapter_ptr = adapter ;
i_decoder_hw = openshot : : Settings : : Instance ( ) - > HARDWARE_DECODER ;
switch ( i_decoder_hw ) {
case 1 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI ;
break ;
2019-01-30 09:58:54 -08:00
case 2 :
2019-01-31 09:42:26 -08:00
hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA ;
2019-01-30 09:58:54 -08:00
break ;
2019-04-30 17:43:15 -05:00
case 6 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_VDPAU ;
2019-04-18 01:07:57 -05:00
break ;
2019-04-28 17:18:43 -05:00
case 7 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV ;
break ;
2019-01-30 09:58:54 -08:00
default :
2019-04-30 17:43:15 -05:00
hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI ;
2019-01-30 09:58:54 -08:00
break ;
}
2019-04-30 17:43:15 -05:00
# elif defined(_WIN32)
adapter_ptr = NULL ;
i_decoder_hw = openshot : : Settings : : Instance ( ) - > HARDWARE_DECODER ;
switch ( i_decoder_hw ) {
case 2 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA ;
break ;
case 3 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2 ;
break ;
case 4 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_D3D11VA ;
break ;
case 7 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV ;
break ;
default :
hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2 ;
break ;
}
2019-04-18 01:07:57 -05:00
# elif defined(__APPLE__)
2019-04-30 17:43:15 -05:00
adapter_ptr = NULL ;
i_decoder_hw = openshot : : Settings : : Instance ( ) - > HARDWARE_DECODER ;
switch ( i_decoder_hw ) {
case 5 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX ;
break ;
case 7 :
hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV ;
break ;
default :
hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX ;
break ;
}
2019-04-18 01:07:57 -05:00
# endif
2018-12-08 10:34:24 -08:00
2019-04-18 01:07:57 -05:00
} else {
2019-01-31 09:42:26 -08:00
adapter_ptr = NULL ; // Just to be sure
}
2019-04-18 01:07:57 -05:00
2019-01-31 09:42:26 -08:00
// Check if it is there and writable
2019-04-18 01:07:57 -05:00
# if defined(__linux__)
2019-02-01 03:38:44 -08:00
if ( adapter_ptr ! = NULL & & access ( adapter_ptr , W_OK ) = = 0 ) {
2019-04-18 01:07:57 -05:00
# elif defined(_WIN32)
2019-01-31 09:42:26 -08:00
if ( adapter_ptr ! = NULL ) {
2019-04-18 01:07:57 -05:00
# elif defined(__APPLE__)
2019-01-31 09:42:26 -08:00
if ( adapter_ptr ! = NULL ) {
2019-04-18 01:07:57 -05:00
# endif
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " Decode Device present using device " ) ;
2019-01-30 20:44:36 -08:00
}
else {
2019-01-31 09:42:26 -08:00
adapter_ptr = NULL ; // use default
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " Decode Device not present using default " ) ;
2019-01-31 09:42:26 -08:00
}
2019-04-18 01:07:57 -05:00
2018-09-11 20:18:11 -07:00
hw_device_ctx = NULL ;
// Here the first hardware initialisations are made
2018-09-19 17:51:21 -07:00
if ( av_hwdevice_ctx_create ( & hw_device_ctx , hw_de_av_device_type , adapter_ptr , NULL , 0 ) > = 0 ) {
2018-09-11 20:18:11 -07:00
if ( ! ( pCodecCtx - > hw_device_ctx = av_buffer_ref ( hw_device_ctx ) ) ) {
2018-12-08 15:54:29 -08:00
throw InvalidCodec ( " Hardware device reference create failed. " , path ) ;
2018-09-11 20:18:11 -07:00
}
2018-12-19 09:12:15 -08:00
2019-04-18 01:07:57 -05:00
/*
av_buffer_unref ( & ist - > hw_frames_ctx ) ;
ist - > hw_frames_ctx = av_hwframe_ctx_alloc ( hw_device_ctx ) ;
if ( ! ist - > hw_frames_ctx ) {
av_log ( avctx , AV_LOG_ERROR , " Error creating a CUDA frames context \n " ) ;
return AVERROR ( ENOMEM ) ;
}
2018-12-19 09:12:15 -08:00
2019-04-18 01:07:57 -05:00
frames_ctx = ( AVHWFramesContext * ) ist - > hw_frames_ctx - > data ;
2018-12-19 09:12:15 -08:00
2019-04-18 01:07:57 -05:00
frames_ctx - > format = AV_PIX_FMT_CUDA ;
frames_ctx - > sw_format = avctx - > sw_pix_fmt ;
frames_ctx - > width = avctx - > width ;
frames_ctx - > height = avctx - > height ;
av_log ( avctx , AV_LOG_DEBUG , " Initializing CUDA frames context: sw_format = %s, width = %d, height = %d \n " ,
av_get_pix_fmt_name ( frames_ctx - > sw_format ) , frames_ctx - > width , frames_ctx - > height ) ;
2018-12-19 09:12:15 -08:00
2019-04-18 01:07:57 -05:00
ret = av_hwframe_ctx_init ( pCodecCtx - > hw_device_ctx ) ;
ret = av_hwframe_ctx_init ( ist - > hw_frames_ctx ) ;
if ( ret < 0 ) {
av_log ( avctx , AV_LOG_ERROR , " Error initializing a CUDA frame pool \n " ) ;
return ret ;
}
*/
2018-09-11 20:18:11 -07:00
}
else {
2018-12-08 10:34:24 -08:00
throw InvalidCodec ( " Hardware device create failed. " , path ) ;
2018-08-31 21:36:23 -07:00
}
}
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2019-04-18 01:07:57 -05:00
2018-09-11 20:18:11 -07:00
// Open video codec
if ( avcodec_open2 ( pCodecCtx , pCodec , & opts ) < 0 )
throw InvalidCodec ( " A video codec was found, but could not be opened. " , path ) ;
2012-10-08 15:02:52 -05:00
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-11 20:18:11 -07:00
if ( hw_de_on & & hw_de_supported ) {
AVHWFramesConstraints * constraints = NULL ;
2018-09-13 12:37:32 -07:00
void * hwconfig = NULL ;
hwconfig = av_hwdevice_hwconfig_alloc ( hw_device_ctx ) ;
2019-04-18 01:07:57 -05:00
// TODO: needs va_config!
# if ENABLE_VAAPI
2018-09-14 14:40:29 -07:00
( ( AVVAAPIHWConfig * ) hwconfig ) - > config_id = ( ( VAAPIDecodeContext * ) ( pCodecCtx - > priv_data ) ) - > va_config ;
2018-09-13 12:37:32 -07:00
constraints = av_hwdevice_get_hwframe_constraints ( hw_device_ctx , hwconfig ) ;
2020-02-10 01:50:31 -05:00
# endif // ENABLE_VAAPI
2018-09-11 20:18:11 -07:00
if ( constraints ) {
if ( pCodecCtx - > coded_width < constraints - > min_width | |
pCodecCtx - > coded_height < constraints - > min_height | |
pCodecCtx - > coded_width > constraints - > max_width | |
pCodecCtx - > coded_height > constraints - > max_height ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " DIMENSIONS ARE TOO LARGE for hardware acceleration \n " ) ;
2018-09-11 20:18:11 -07:00
hw_de_supported = 0 ;
retry_decode_open = 1 ;
AV_FREE_CONTEXT ( pCodecCtx ) ;
if ( hw_device_ctx ) {
av_buffer_unref ( & hw_device_ctx ) ;
hw_device_ctx = NULL ;
}
}
else {
// All is just peachy
2019-01-31 09:42:26 -08:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " \n Decode hardware acceleration is used \n " , " Min width : " , constraints - > min_width , " Min Height : " , constraints - > min_height , " MaxWidth : " , constraints - > max_width , " MaxHeight : " , constraints - > max_height , " Frame width : " , pCodecCtx - > coded_width , " Frame height : " , pCodecCtx - > coded_height ) ;
2018-09-11 20:18:11 -07:00
retry_decode_open = 0 ;
}
av_hwframe_constraints_free ( & constraints ) ;
2018-09-13 12:37:32 -07:00
if ( hwconfig ) {
av_freep ( & hwconfig ) ;
}
2018-09-11 20:18:11 -07:00
}
else {
2018-09-13 14:45:09 -07:00
int max_h , max_w ;
2019-01-30 09:58:54 -08:00
//max_h = ((getenv( "LIMIT_HEIGHT_MAX" )==NULL) ? MAX_SUPPORTED_HEIGHT : atoi(getenv( "LIMIT_HEIGHT_MAX" )));
2019-01-30 20:44:36 -08:00
max_h = openshot : : Settings : : Instance ( ) - > DE_LIMIT_HEIGHT_MAX ;
2019-01-30 09:58:54 -08:00
//max_w = ((getenv( "LIMIT_WIDTH_MAX" )==NULL) ? MAX_SUPPORTED_WIDTH : atoi(getenv( "LIMIT_WIDTH_MAX" )));
2019-01-30 20:44:36 -08:00
max_w = openshot : : Settings : : Instance ( ) - > DE_LIMIT_WIDTH_MAX ;
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " Constraints could not be found using default limit \n " ) ;
2018-09-18 15:31:34 -07:00
//cerr << "Constraints could not be found using default limit\n";
2018-09-13 12:37:32 -07:00
if ( pCodecCtx - > coded_width < 0 | |
pCodecCtx - > coded_height < 0 | |
2018-09-13 14:45:09 -07:00
pCodecCtx - > coded_width > max_w | |
pCodecCtx - > coded_height > max_h ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " DIMENSIONS ARE TOO LARGE for hardware acceleration \n " , " Max Width : " , max_w , " Max Height : " , max_h , " Frame width : " , pCodecCtx - > coded_width , " Frame height : " , pCodecCtx - > coded_height ) ;
2018-09-13 12:37:32 -07:00
hw_de_supported = 0 ;
retry_decode_open = 1 ;
AV_FREE_CONTEXT ( pCodecCtx ) ;
if ( hw_device_ctx ) {
av_buffer_unref ( & hw_device_ctx ) ;
hw_device_ctx = NULL ;
}
}
else {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " \n Decode hardware acceleration is used \n " , " Max Width : " , max_w , " Max Height : " , max_h , " Frame width : " , pCodecCtx - > coded_width , " Frame height : " , pCodecCtx - > coded_height ) ;
2018-09-13 12:37:32 -07:00
retry_decode_open = 0 ;
}
2018-09-11 20:18:11 -07:00
}
} // if hw_de_on && hw_de_supported
2019-01-31 09:42:26 -08:00
else {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " \n Decode in software is used \n " ) ;
2019-01-31 09:42:26 -08:00
}
2019-04-18 01:07:57 -05:00
# else
2018-09-11 20:18:11 -07:00
retry_decode_open = 0 ;
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2018-09-11 20:18:11 -07:00
} while ( retry_decode_open ) ; // retry_decode_open
2018-06-29 15:06:34 -05:00
// Free options
av_dict_free ( & opts ) ;
2012-10-08 15:02:52 -05:00
// Update the File Info struct with video details (if a video stream is found)
UpdateVideoInfo ( ) ;
2011-10-11 08:44:27 -05:00
}
2012-10-08 15:02:52 -05:00
// Is there an audio stream?
2019-04-18 01:07:57 -05:00
if ( audioStream ! = - 1 ) {
2012-10-08 15:02:52 -05:00
// Set the stream index
info . audio_stream_index = audioStream ;
// Get a pointer to the codec context for the audio stream
aStream = pFormatCtx - > streams [ audioStream ] ;
2018-03-21 02:10:46 -05:00
// Find the codec ID from stream
AVCodecID codecId = AV_FIND_DECODER_CODEC_ID ( aStream ) ;
// Get codec and codec context from stream
AVCodec * aCodec = avcodec_find_decoder ( codecId ) ;
aCodecCtx = AV_GET_CODEC_CONTEXT ( aStream , aCodec ) ;
2012-10-08 15:02:52 -05:00
2017-10-01 17:54:21 -05:00
// Set number of threads equal to number of processors (not to exceed 16)
2019-08-04 23:51:02 -04:00
aCodecCtx - > thread_count = std : : min ( FF_NUM_PROCESSORS , 16 ) ;
2012-10-08 15:02:52 -05:00
if ( aCodec = = NULL ) {
throw InvalidCodec ( " A valid audio codec could not be found for this file. " , path ) ;
}
2018-06-29 15:06:34 -05:00
// Init options
AVDictionary * opts = NULL ;
av_dict_set ( & opts , " strict " , " experimental " , 0 ) ;
2012-10-08 15:02:52 -05:00
// Open audio codec
2018-06-29 15:06:34 -05:00
if ( avcodec_open2 ( aCodecCtx , aCodec , & opts ) < 0 )
2012-10-08 15:02:52 -05:00
throw InvalidCodec ( " An audio codec was found, but could not be opened. " , path ) ;
2018-06-29 15:06:34 -05:00
// Free options
av_dict_free ( & opts ) ;
2012-10-08 15:02:52 -05:00
// Update the File Info struct with audio details (if an audio stream is found)
UpdateAudioInfo ( ) ;
}
2018-02-03 01:57:18 -06:00
// Add format metadata (if any)
AVDictionaryEntry * tag = NULL ;
while ( ( tag = av_dict_get ( pFormatCtx - > metadata , " " , tag , AV_DICT_IGNORE_SUFFIX ) ) ) {
QString str_key = tag - > key ;
QString str_value = tag - > value ;
info . metadata [ str_key . toStdString ( ) ] = str_value . trimmed ( ) . toStdString ( ) ;
}
2012-11-20 10:15:39 -06:00
// Init previous audio location to zero
previous_packet_location . frame = - 1 ;
previous_packet_location . sample_start = 0 ;
2015-06-01 00:20:14 -07:00
// Adjust cache size based on size of frame and audio
2018-04-14 16:25:13 -05:00
working_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * info . fps . ToDouble ( ) * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2016-07-27 13:18:55 -05:00
missing_frames . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
final_cache . SetMaxBytesFromInfo ( OPEN_MP_NUM_PROCESSORS * 2 , info . width , info . height , info . sample_rate , info . channels ) ;
2015-06-01 00:20:14 -07:00
2012-10-08 15:02:52 -05:00
// Mark as "open"
is_open = true ;
2011-10-11 08:44:27 -05:00
}
}
2019-04-18 01:07:57 -05:00
void FFmpegReader : : Close ( ) {
2012-10-08 15:02:52 -05:00
// Close all objects, if reader is 'open'
2019-04-18 01:07:57 -05:00
if ( is_open ) {
2015-12-24 16:44:45 -06:00
// Mark as "closed"
is_open = false ;
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::Close " ) ;
2016-09-16 17:43:26 -05:00
2019-05-08 14:53:23 -07:00
if ( packet ) {
// Remove previous packet before getting next one
RemoveAVPacket ( packet ) ;
packet = NULL ;
}
2012-10-08 15:02:52 -05:00
// Close the codec
2019-04-18 01:07:57 -05:00
if ( info . has_video ) {
2012-10-08 15:02:52 -05:00
avcodec_flush_buffers ( pCodecCtx ) ;
2018-03-21 02:10:46 -05:00
AV_FREE_CONTEXT ( pCodecCtx ) ;
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-08 21:17:24 -07:00
if ( hw_de_on ) {
if ( hw_device_ctx ) {
av_buffer_unref ( & hw_device_ctx ) ;
hw_device_ctx = NULL ;
2018-08-31 21:36:23 -07:00
}
2018-09-08 21:17:24 -07:00
}
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2012-10-08 15:02:52 -05:00
}
2019-04-18 01:07:57 -05:00
if ( info . has_audio ) {
2012-10-08 15:02:52 -05:00
avcodec_flush_buffers ( aCodecCtx ) ;
2018-03-21 02:10:46 -05:00
AV_FREE_CONTEXT ( aCodecCtx ) ;
2012-10-08 15:02:52 -05:00
}
2012-10-14 21:09:22 -05:00
// Clear final cache
final_cache . Clear ( ) ;
2012-10-08 15:02:52 -05:00
working_cache . Clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_frames . Clear ( ) ;
2012-10-08 15:02:52 -05:00
2014-08-27 09:44:27 -05:00
// Clear processed lists
2015-08-24 01:05:48 -05:00
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2015-08-24 01:05:48 -05:00
processed_video_frames . clear ( ) ;
processed_audio_frames . clear ( ) ;
processing_video_frames . clear ( ) ;
processing_audio_frames . clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_audio_frames . clear ( ) ;
2015-08-24 01:05:48 -05:00
missing_video_frames . clear ( ) ;
2016-12-07 01:06:16 -06:00
missing_audio_frames_source . clear ( ) ;
missing_video_frames_source . clear ( ) ;
2016-06-29 02:42:00 -05:00
checked_frames . clear ( ) ;
2015-08-24 01:05:48 -05:00
}
2014-08-27 09:44:27 -05:00
2012-10-08 15:02:52 -05:00
// Close the video file
avformat_close_input ( & pFormatCtx ) ;
av_freep ( & pFormatCtx ) ;
2015-02-19 01:03:22 -06:00
// Reset some variables
2012-10-10 14:49:33 -05:00
last_frame = 0 ;
2015-02-19 01:03:22 -06:00
largest_frame_processed = 0 ;
seek_audio_frame_found = 0 ;
seek_video_frame_found = 0 ;
2015-08-24 01:05:48 -05:00
current_video_frame = 0 ;
has_missing_frames = false ;
2019-05-08 14:53:23 -07:00
last_video_frame . reset ( ) ;
2012-07-08 23:26:44 -05:00
}
2011-10-11 08:44:27 -05:00
}
2019-04-18 01:07:57 -05:00
void FFmpegReader : : UpdateAudioInfo ( ) {
2011-10-11 08:44:27 -05:00
// Set values of FileInfo struct
info . has_audio = true ;
2012-06-16 02:12:48 -05:00
info . file_size = pFormatCtx - > pb ? avio_size ( pFormatCtx - > pb ) : - 1 ;
2011-10-11 08:44:27 -05:00
info . acodec = aCodecCtx - > codec - > name ;
2018-03-21 02:10:46 -05:00
info . channels = AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ;
if ( AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout = = 0 )
2019-04-18 01:07:57 -05:00
AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout = av_get_default_channel_layout ( AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ) ;
2018-03-21 02:10:46 -05:00
info . channel_layout = ( ChannelLayout ) AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout ;
info . sample_rate = AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > sample_rate ;
info . audio_bit_rate = AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > bit_rate ;
2011-10-11 08:44:27 -05:00
2011-12-11 20:42:50 -06:00
// Set audio timebase
2011-10-11 08:44:27 -05:00
info . audio_timebase . num = aStream - > time_base . num ;
info . audio_timebase . den = aStream - > time_base . den ;
2011-10-27 09:40:03 -05:00
2015-08-05 23:40:58 -05:00
// Get timebase of audio stream (if valid) and greater than the current duration
if ( aStream - > duration > 0.0f & & aStream - > duration > info . duration )
2012-08-12 02:14:15 -05:00
info . duration = aStream - > duration * info . audio_timebase . ToDouble ( ) ;
2011-12-11 20:42:50 -06:00
// Check for an invalid video length
2019-04-18 01:07:57 -05:00
if ( info . has_video & & info . video_length < = 0 ) {
2011-12-11 20:42:50 -06:00
// Calculate the video length from the audio duration
info . video_length = info . duration * info . fps . ToDouble ( ) ;
}
2011-10-27 09:40:03 -05:00
// Set video timebase (if no video stream was found)
2019-04-18 01:07:57 -05:00
if ( ! info . has_video ) {
2011-10-27 09:40:03 -05:00
// Set a few important default video settings (so audio can be divided into frames)
2012-11-12 01:25:35 -06:00
info . fps . num = 24 ;
2011-10-27 09:40:03 -05:00
info . fps . den = 1 ;
2011-12-11 20:42:50 -06:00
info . video_timebase . num = 1 ;
2012-11-12 01:25:35 -06:00
info . video_timebase . den = 24 ;
2011-12-11 20:42:50 -06:00
info . video_length = info . duration * info . fps . ToDouble ( ) ;
2016-01-09 15:50:53 -06:00
info . width = 720 ;
info . height = 480 ;
2011-10-27 09:40:03 -05:00
}
2011-12-11 20:42:50 -06:00
2018-09-17 00:27:30 -05:00
// Fix invalid video lengths for certain types of files (MP3 for example)
if ( info . has_video & & ( ( info . duration * info . fps . ToDouble ( ) ) - info . video_length > 60 ) ) {
info . video_length = info . duration * info . fps . ToDouble ( ) ;
}
2018-02-03 01:57:18 -06:00
// Add audio metadata (if any found)
AVDictionaryEntry * tag = NULL ;
while ( ( tag = av_dict_get ( aStream - > metadata , " " , tag , AV_DICT_IGNORE_SUFFIX ) ) ) {
QString str_key = tag - > key ;
QString str_value = tag - > value ;
info . metadata [ str_key . toStdString ( ) ] = str_value . trimmed ( ) . toStdString ( ) ;
}
2011-10-11 08:44:27 -05:00
}
2019-04-18 01:07:57 -05:00
void FFmpegReader : : UpdateVideoInfo ( ) {
2018-07-25 02:24:01 -05:00
if ( check_fps )
// Already initialized all the video metadata, no reason to do it again
return ;
2011-10-11 08:44:27 -05:00
// Set values of FileInfo struct
info . has_video = true ;
2012-06-16 02:12:48 -05:00
info . file_size = pFormatCtx - > pb ? avio_size ( pFormatCtx - > pb ) : - 1 ;
2018-03-21 02:10:46 -05:00
info . height = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > height ;
info . width = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > width ;
2011-10-11 08:44:27 -05:00
info . vcodec = pCodecCtx - > codec - > name ;
2018-07-25 02:24:01 -05:00
info . video_bit_rate = ( pFormatCtx - > bit_rate / 8 ) ;
2020-02-24 09:35:13 +02:00
// Frame rate from the container and codec
AVRational framerate = av_guess_frame_rate ( pFormatCtx , pStream , NULL ) ;
info . fps . num = framerate . num ;
info . fps . den = framerate . den ;
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::UpdateVideoInfo " , " info.fps.num " , info . fps . num , " info.fps.den " , info . fps . den ) ;
// TODO: remove excessive debug info in the next releases
// The debug info below is just for comparison and troubleshooting on users side during the transition period
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::UpdateVideoInfo (pStream->avg_frame_rate) " , " num " , pStream - > avg_frame_rate . num , " den " , pStream - > avg_frame_rate . den ) ;
2015-06-01 02:05:17 -07:00
2019-04-18 01:07:57 -05:00
if ( pStream - > sample_aspect_ratio . num ! = 0 ) {
2011-10-11 08:44:27 -05:00
info . pixel_ratio . num = pStream - > sample_aspect_ratio . num ;
2011-12-11 20:42:50 -06:00
info . pixel_ratio . den = pStream - > sample_aspect_ratio . den ;
2019-04-18 01:07:57 -05:00
} else if ( AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > sample_aspect_ratio . num ! = 0 ) {
2018-03-21 02:10:46 -05:00
info . pixel_ratio . num = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > sample_aspect_ratio . num ;
info . pixel_ratio . den = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > sample_aspect_ratio . den ;
2019-04-18 01:07:57 -05:00
} else {
2011-10-11 08:44:27 -05:00
info . pixel_ratio . num = 1 ;
2011-12-11 20:42:50 -06:00
info . pixel_ratio . den = 1 ;
}
2018-03-21 02:10:46 -05:00
info . pixel_format = AV_GET_CODEC_PIXEL_FORMAT ( pStream , pCodecCtx ) ;
2011-10-11 08:44:27 -05:00
// Calculate the DAR (display aspect ratio)
2011-12-11 20:42:50 -06:00
Fraction size ( info . width * info . pixel_ratio . num , info . height * info . pixel_ratio . den ) ;
2011-10-11 08:44:27 -05:00
// Reduce size fraction
size . Reduce ( ) ;
// Set the ratio based on the reduced fraction
info . display_ratio . num = size . num ;
info . display_ratio . den = size . den ;
2019-08-20 04:32:47 -04:00
// Get scan type and order from codec context/params
if ( ! check_interlace ) {
check_interlace = true ;
AVFieldOrder field_order = AV_GET_CODEC_ATTRIBUTES ( pStream , pCodecCtx ) - > field_order ;
switch ( field_order ) {
case AV_FIELD_PROGRESSIVE :
info . interlaced_frame = false ;
break ;
case AV_FIELD_TT :
case AV_FIELD_TB :
info . interlaced_frame = true ;
info . top_field_first = true ;
break ;
case AV_FIELD_BT :
case AV_FIELD_BB :
info . interlaced_frame = true ;
info . top_field_first = false ;
break ;
case AV_FIELD_UNKNOWN :
// Check again later?
check_interlace = false ;
break ;
}
2019-09-22 01:37:32 -04:00
// check_interlace will prevent these checks being repeated,
// unless it was cleared because we got an AV_FIELD_UNKNOWN response.
2019-08-20 04:32:47 -04:00
}
2011-12-11 20:42:50 -06:00
// Set the video timebase
2011-10-11 08:44:27 -05:00
info . video_timebase . num = pStream - > time_base . num ;
info . video_timebase . den = pStream - > time_base . den ;
2011-12-11 20:42:50 -06:00
// Set the duration in seconds, and video length (# of frames)
info . duration = pStream - > duration * info . video_timebase . ToDouble ( ) ;
2012-08-12 02:14:15 -05:00
2013-09-08 16:08:56 -05:00
// Check for valid duration (if found)
2012-08-12 02:14:15 -05:00
if ( info . duration < = 0.0f & & pFormatCtx - > duration > = 0 )
// Use the format's duration
info . duration = pFormatCtx - > duration / AV_TIME_BASE ;
2013-09-08 16:08:56 -05:00
// Calculate duration from filesize and bitrate (if any)
if ( info . duration < = 0.0f & & info . video_bit_rate > 0 & & info . file_size > 0 )
// Estimate from bitrate, total bytes, and framerate
info . duration = ( info . file_size / info . video_bit_rate ) ;
// No duration found in stream of file
2019-04-18 01:07:57 -05:00
if ( info . duration < = 0.0f ) {
2013-09-08 16:08:56 -05:00
// No duration is found in the video stream
info . duration = - 1 ;
info . video_length = - 1 ;
is_duration_known = false ;
2019-04-18 01:07:57 -05:00
} else {
2013-09-08 16:08:56 -05:00
// Yes, a duration was found
is_duration_known = true ;
// Calculate number of frames
info . video_length = round ( info . duration * info . fps . ToDouble ( ) ) ;
}
2011-12-11 20:42:50 -06:00
2012-07-03 02:42:47 -05:00
// Override an invalid framerate
2018-09-17 00:27:30 -05:00
if ( info . fps . ToFloat ( ) > 240.0f | | ( info . fps . num < = 0 | | info . fps . den < = 0 ) | | info . video_length < = 0 ) {
2018-07-25 02:24:01 -05:00
// Calculate FPS, duration, video bit rate, and video length manually
// by scanning through all the video stream packets
CheckFPS ( ) ;
2012-07-08 23:26:44 -05:00
}
2012-07-03 02:42:47 -05:00
2018-02-03 01:57:18 -06:00
// Add video metadata (if any)
AVDictionaryEntry * tag = NULL ;
while ( ( tag = av_dict_get ( pStream - > metadata , " " , tag , AV_DICT_IGNORE_SUFFIX ) ) ) {
QString str_key = tag - > key ;
QString str_value = tag - > value ;
info . metadata [ str_key . toStdString ( ) ] = str_value . trimmed ( ) . toStdString ( ) ;
}
2011-10-11 08:44:27 -05:00
}
2012-10-31 01:17:12 -05:00
2019-04-18 01:07:57 -05:00
std : : shared_ptr < Frame > FFmpegReader : : GetFrame ( int64_t requested_frame ) {
2012-10-09 01:45:34 -05:00
// Check for open reader (or throw exception)
if ( ! is_open )
throw ReaderClosed ( " The FFmpegReader is closed. Call Open() before calling this method . " , path) ;
2014-03-21 01:25:17 -05:00
// Adjust for a requested frame that is too small or too large
if ( requested_frame < 1 )
requested_frame = 1 ;
if ( requested_frame > info . video_length & & is_duration_known )
requested_frame = info . video_length ;
if ( info . has_video & & info . video_length = = 0 )
// Invalid duration of video file
throw InvalidFile ( " Could not detect the duration of the video or audio stream. " , path ) ;
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetFrame " , " requested_frame " , requested_frame , " last_frame " , last_frame ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Check the cache for this frame
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > frame = final_cache . GetFrame ( requested_frame ) ;
2015-08-05 23:40:58 -05:00
if ( frame ) {
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetFrame " , " returned cached frame " , requested_frame ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Return the cached frame
2015-08-05 23:40:58 -05:00
return frame ;
2019-04-18 01:07:57 -05:00
} else {
# pragma omp critical (ReadStream)
2019-01-31 09:42:26 -08:00
{
2018-06-21 02:44:08 -05:00
// Check the cache a 2nd time (due to a potential previous lock)
frame = final_cache . GetFrame ( requested_frame ) ;
if ( frame ) {
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetFrame " , " returned cached frame on 2nd look " , requested_frame ) ;
2015-06-01 00:20:14 -07:00
2018-06-21 02:44:08 -05:00
// Return the cached frame
2019-04-18 01:07:57 -05:00
} else {
2018-06-21 02:44:08 -05:00
// Frame is not in cache
// Reset seek count
seek_count = 0 ;
2012-10-10 17:27:46 -05:00
2018-06-21 02:44:08 -05:00
// Check for first frame (always need to get frame 1 before other frames, to correctly calculate offsets)
if ( last_frame = = 0 & & requested_frame ! = 1 )
// Get first frame
ReadStream ( 1 ) ;
2011-10-11 08:44:27 -05:00
2018-06-21 02:44:08 -05:00
// Are we within X frames of the requested frame?
int64_t diff = requested_frame - last_frame ;
2019-04-18 01:07:57 -05:00
if ( diff > = 1 & & diff < = 20 ) {
2018-06-21 02:44:08 -05:00
// Continue walking the stream
frame = ReadStream ( requested_frame ) ;
2019-04-18 01:07:57 -05:00
} else {
2018-06-21 02:44:08 -05:00
// Greater than 30 frames away, or backwards, we need to seek to the nearest key frame
if ( enable_seek )
// Only seek if enabled
Seek ( requested_frame ) ;
2019-04-18 01:07:57 -05:00
else if ( ! enable_seek & & diff < 0 ) {
2018-06-21 02:44:08 -05:00
// Start over, since we can't seek, and the requested frame is smaller than our position
Close ( ) ;
Open ( ) ;
}
// Then continue walking the stream
frame = ReadStream ( requested_frame ) ;
}
}
2019-01-31 09:42:26 -08:00
} //omp critical
return frame ;
2011-10-11 08:44:27 -05:00
}
}
// Read the stream until we find the requested Frame
2019-04-18 01:07:57 -05:00
std : : shared_ptr < Frame > FFmpegReader : : ReadStream ( int64_t requested_frame ) {
2011-10-11 08:44:27 -05:00
// Allocate video frame
2011-10-24 08:22:21 -05:00
bool end_of_stream = false ;
2012-07-03 02:59:38 -05:00
bool check_seek = false ;
2012-07-03 16:58:07 -05:00
bool frame_finished = false ;
int packet_error = - 1 ;
2011-10-11 08:44:27 -05:00
2012-08-24 17:03:23 -05:00
// Minimum number of packets to process (for performance reasons)
2013-01-25 02:24:18 -06:00
int packets_processed = 0 ;
2014-04-02 16:48:27 -05:00
int minimum_packets = OPEN_MP_NUM_PROCESSORS ;
2019-04-18 01:07:57 -05:00
int max_packets = 4096 ;
2014-04-02 16:48:27 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ReadStream " , " requested_frame " , requested_frame , " OPEN_MP_NUM_PROCESSORS " , OPEN_MP_NUM_PROCESSORS ) ;
2014-08-27 09:44:27 -05:00
2019-04-18 01:07:57 -05:00
# pragma omp parallel
2011-10-11 08:44:27 -05:00
{
2019-04-18 01:07:57 -05:00
# pragma omp single
2011-10-11 08:44:27 -05:00
{
// Loop through the stream until the correct frame is found
2019-04-18 01:07:57 -05:00
while ( true ) {
2015-06-01 00:20:14 -07:00
// Get the next packet into a local variable called packet
2012-07-03 16:58:07 -05:00
packet_error = GetNextPacket ( ) ;
2017-08-20 17:37:39 -05:00
int processing_video_frames_size = 0 ;
int processing_audio_frames_size = 0 ;
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2017-08-20 17:37:39 -05:00
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2013-02-15 00:23:55 -06:00
// Wait if too many frames are being processed
2017-08-20 17:37:39 -05:00
while ( processing_video_frames_size + processing_audio_frames_size > = minimum_packets ) {
2020-09-02 02:07:54 -04:00
std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 3 ) ) ;
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2017-08-20 17:37:39 -05:00
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2013-02-15 00:23:55 -06:00
2011-10-24 08:22:21 -05:00
// Get the next packet (if any)
2019-04-18 01:07:57 -05:00
if ( packet_error < 0 ) {
2011-10-24 08:22:21 -05:00
// Break loop when no more packets found
end_of_stream = true ;
break ;
}
2011-10-11 08:44:27 -05:00
2015-02-05 00:00:52 -06:00
// Debug output
2017-08-20 17:37:39 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ReadStream (GetNextPacket) " , " requested_frame " , requested_frame , " processing_video_frames_size " , processing_video_frames_size , " processing_audio_frames_size " , processing_audio_frames_size , " minimum_packets " , minimum_packets , " packets_processed " , packets_processed , " is_seeking " , is_seeking ) ;
2015-02-05 00:00:52 -06:00
2011-10-11 08:44:27 -05:00
// Video packet
2019-04-18 01:07:57 -05:00
if ( info . has_video & & packet - > stream_index = = videoStream ) {
2016-01-01 01:39:56 -06:00
// Reset this counter, since we have a video packet
num_packets_since_video_frame = 0 ;
2011-10-24 08:22:21 -05:00
// Check the status of a seek (if any)
2019-04-18 01:07:57 -05:00
if ( is_seeking )
# pragma omp critical (openshot_seek)
check_seek = CheckSeek ( true ) ;
else
check_seek = false ;
2012-07-03 02:59:38 -05:00
2019-04-18 01:07:57 -05:00
if ( check_seek ) {
// Jump to the next iteration of this loop
continue ;
}
2011-10-11 08:44:27 -05:00
2019-07-30 21:30:09 +03:00
// Packet may become NULL on Close inside Seek if CheckSeek returns false
if ( ! packet )
// Jump to the next iteration of this loop
continue ;
2015-06-01 00:20:14 -07:00
// Get the AVFrame from the current packet
2012-07-03 16:58:07 -05:00
frame_finished = GetAVFrame ( ) ;
2011-10-11 08:44:27 -05:00
// Check if the AVFrame is finished and set it
2019-04-18 01:07:57 -05:00
if ( frame_finished ) {
2011-10-24 08:22:21 -05:00
// Update PTS / Frame Offset (if any)
UpdatePTSOffset ( true ) ;
2011-10-11 08:44:27 -05:00
// Process Video Packet
ProcessVideoPacket ( requested_frame ) ;
2018-09-11 00:40:31 -05:00
2019-01-09 16:50:40 -06:00
if ( openshot : : Settings : : Instance ( ) - > WAIT_FOR_VIDEO_PROCESSING_TASK ) {
2018-09-11 00:40:31 -05:00
// Wait on each OMP task to complete before moving on to the next one. This slows
// down processing considerably, but might be more stable on some systems.
2019-04-18 01:07:57 -05:00
# pragma omp taskwait
2018-09-11 00:40:31 -05:00
}
2011-10-11 08:44:27 -05:00
}
}
// Audio packet
2019-04-18 01:07:57 -05:00
else if ( info . has_audio & & packet - > stream_index = = audioStream ) {
2016-01-01 01:39:56 -06:00
// Increment this (to track # of packets since the last video packet)
num_packets_since_video_frame + + ;
2011-10-24 08:22:21 -05:00
// Check the status of a seek (if any)
2019-01-09 16:50:40 -06:00
if ( is_seeking )
2019-04-18 01:07:57 -05:00
# pragma omp critical (openshot_seek)
2019-01-09 16:50:40 -06:00
check_seek = CheckSeek ( false ) ;
else
check_seek = false ;
2012-07-03 02:59:38 -05:00
2019-01-09 16:50:40 -06:00
if ( check_seek ) {
// Jump to the next iteration of this loop
continue ;
}
2011-10-11 08:44:27 -05:00
2019-07-30 21:30:09 +03:00
// Packet may become NULL on Close inside Seek if CheckSeek returns false
if ( ! packet )
// Jump to the next iteration of this loop
continue ;
2011-10-24 08:22:21 -05:00
// Update PTS / Frame Offset (if any)
UpdatePTSOffset ( false ) ;
// Determine related video frame and starting sample # from audio PTS
2013-09-10 12:59:06 -05:00
AudioLocation location = GetAudioPTSLocation ( packet - > pts ) ;
2012-07-02 00:51:10 -05:00
2011-10-24 08:22:21 -05:00
// Process Audio Packet
ProcessAudioPacket ( requested_frame , location . frame , location . sample_start ) ;
2011-10-11 08:44:27 -05:00
}
2011-10-24 08:22:21 -05:00
// Check if working frames are 'finished'
2016-01-05 01:59:50 -06:00
if ( ! is_seeking ) {
// Check for final frames
2015-08-24 01:05:48 -05:00
CheckWorkingFrames ( false , requested_frame ) ;
2016-01-05 01:59:50 -06:00
}
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Check if requested 'final' frame is available
2018-09-11 00:40:31 -05:00
bool is_cache_found = ( final_cache . GetFrame ( requested_frame ) ! = NULL ) ;
2012-08-26 02:44:05 -05:00
2015-06-01 00:20:14 -07:00
// Increment frames processed
packets_processed + + ;
2012-07-03 02:42:47 -05:00
2016-01-05 01:59:50 -06:00
// Break once the frame is found
2017-05-17 01:17:42 -05:00
if ( ( is_cache_found & & packets_processed > = minimum_packets ) | | packets_processed > max_packets )
2011-10-24 08:22:21 -05:00
break ;
2011-10-11 08:44:27 -05:00
} // end while
2012-08-28 15:53:18 -05:00
} // end omp single
2018-09-11 00:40:31 -05:00
2011-10-11 08:44:27 -05:00
} // end omp parallel
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ReadStream (Completed) " , " packets_processed " , packets_processed , " end_of_stream " , end_of_stream , " largest_frame_processed " , largest_frame_processed , " Working Cache Count " , working_cache . Count ( ) ) ;
2011-10-11 08:44:27 -05:00
2014-01-28 02:41:15 -06:00
// End of stream?
2015-08-05 23:40:58 -05:00
if ( end_of_stream )
2014-01-28 02:41:15 -06:00
// Mark the any other working frames as 'finished'
2015-08-24 01:05:48 -05:00
CheckWorkingFrames ( end_of_stream , requested_frame ) ;
2011-10-24 08:22:21 -05:00
// Return requested frame (if found)
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > frame = final_cache . GetFrame ( requested_frame ) ;
2015-08-05 23:40:58 -05:00
if ( frame )
2011-10-24 08:22:21 -05:00
// Return prepared frame
2015-08-05 23:40:58 -05:00
return frame ;
2015-02-05 00:00:52 -06:00
else {
// Check if largest frame is still cached
2015-08-05 23:40:58 -05:00
frame = final_cache . GetFrame ( largest_frame_processed ) ;
if ( frame ) {
2015-02-05 00:00:52 -06:00
// return the largest processed frame (assuming it was the last in the video file)
2015-08-05 23:40:58 -05:00
return frame ;
2019-04-18 01:07:57 -05:00
} else {
2015-02-05 00:00:52 -06:00
// The largest processed frame is no longer in cache, return a blank frame
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > f = CreateFrame ( largest_frame_processed ) ;
2015-02-05 00:00:52 -06:00
f - > AddColor ( info . width , info . height , " #000 " ) ;
return f ;
}
}
2014-01-28 02:41:15 -06:00
2011-10-11 08:44:27 -05:00
}
// Get the next packet (if any)
2019-04-18 01:07:57 -05:00
int FFmpegReader : : GetNextPacket ( ) {
2015-06-01 00:20:14 -07:00
int found_packet = 0 ;
2018-08-31 21:36:23 -07:00
AVPacket * next_packet ;
2019-04-18 01:07:57 -05:00
# pragma omp critical(getnextpacket)
2012-07-02 00:51:10 -05:00
{
2019-04-18 01:07:57 -05:00
next_packet = new AVPacket ( ) ;
found_packet = av_read_frame ( pFormatCtx , next_packet ) ;
2012-07-02 00:51:10 -05:00
2018-08-31 21:36:23 -07:00
2019-04-18 01:07:57 -05:00
if ( packet ) {
// Remove previous packet before getting next one
RemoveAVPacket ( packet ) ;
packet = NULL ;
}
if ( found_packet > = 0 ) {
// Update current packet pointer
packet = next_packet ;
}
2019-05-15 10:27:48 -05:00
else
delete next_packet ;
2019-01-31 09:42:26 -08:00
}
2012-07-02 00:51:10 -05:00
// Return if packet was found (or error number)
return found_packet ;
2011-10-11 08:44:27 -05:00
}
// Get an AVFrame (if any)
2019-04-18 01:07:57 -05:00
bool FFmpegReader : : GetAVFrame ( ) {
2015-06-01 00:20:14 -07:00
int frameFinished = - 1 ;
2018-03-21 02:10:46 -05:00
int ret = 0 ;
2011-10-11 08:44:27 -05:00
2015-06-01 00:20:14 -07:00
// Decode video frame
2015-09-23 00:27:28 -05:00
AVFrame * next_frame = AV_ALLOCATE_FRAME ( ) ;
2019-04-18 01:07:57 -05:00
# pragma omp critical (packet_cache)
2011-12-11 20:42:50 -06:00
{
2019-04-18 01:07:57 -05:00
# if IS_FFMPEG_3_2
2018-03-21 02:10:46 -05:00
frameFinished = 0 ;
2018-08-31 21:36:23 -07:00
2018-03-21 02:10:46 -05:00
ret = avcodec_send_packet ( pCodecCtx , packet ) ;
2018-08-31 21:36:23 -07:00
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-09 10:54:31 -07:00
// Get the format from the variables set in get_hw_dec_format
hw_de_av_pix_fmt = hw_de_av_pix_fmt_global ;
hw_de_av_device_type = hw_de_av_device_type_global ;
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2018-03-21 02:10:46 -05:00
if ( ret < 0 | | ret = = AVERROR ( EAGAIN ) | | ret = = AVERROR_EOF ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAVFrame (Packet not sent) " ) ;
2012-07-02 00:51:10 -05:00
}
2018-03-21 02:10:46 -05:00
else {
2019-08-20 04:32:47 -04:00
AVFrame * next_frame2 ;
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2019-08-20 04:32:47 -04:00
if ( hw_de_on & & hw_de_supported ) {
next_frame2 = AV_ALLOCATE_FRAME ( ) ;
}
else
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2019-08-20 04:32:47 -04:00
{
next_frame2 = next_frame ;
}
2019-05-08 14:53:23 -07:00
pFrame = AV_ALLOCATE_FRAME ( ) ;
2018-03-21 02:10:46 -05:00
while ( ret > = 0 ) {
2019-04-18 01:07:57 -05:00
ret = avcodec_receive_frame ( pCodecCtx , next_frame2 ) ;
if ( ret = = AVERROR ( EAGAIN ) | | ret = = AVERROR_EOF ) {
break ;
}
if ( ret ! = 0 ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAVFrame (invalid return frame received) " ) ;
2019-04-18 01:07:57 -05:00
}
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2019-04-18 01:07:57 -05:00
if ( hw_de_on & & hw_de_supported ) {
int err ;
if ( next_frame2 - > format = = hw_de_av_pix_fmt ) {
next_frame - > format = AV_PIX_FMT_YUV420P ;
if ( ( err = av_hwframe_transfer_data ( next_frame , next_frame2 , 0 ) ) < 0 ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAVFrame (Failed to transfer data to output frame) " ) ;
2019-04-18 01:07:57 -05:00
}
if ( ( err = av_frame_copy_props ( next_frame , next_frame2 ) ) < 0 ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAVFrame (Failed to copy props to output frame) " ) ;
2018-08-31 21:36:23 -07:00
}
}
2019-04-18 01:07:57 -05:00
}
else
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2019-04-18 01:07:57 -05:00
{ // No hardware acceleration used -> no copy from GPU memory needed
next_frame = next_frame2 ;
}
2018-03-21 02:10:46 -05:00
// TODO also handle possible further frames
// Use only the first frame like avcodec_decode_video2
if ( frameFinished = = 0 ) {
frameFinished = 1 ;
av_image_alloc ( pFrame - > data , pFrame - > linesize , info . width , info . height , ( AVPixelFormat ) ( pStream - > codecpar - > format ) , 1 ) ;
av_image_copy ( pFrame - > data , pFrame - > linesize , ( const uint8_t * * ) next_frame - > data , next_frame - > linesize ,
( AVPixelFormat ) ( pStream - > codecpar - > format ) , info . width , info . height ) ;
}
}
2020-02-10 01:50:31 -05:00
# if HAVE_HW_ACCEL
2018-09-08 21:17:24 -07:00
if ( hw_de_on & & hw_de_supported ) {
AV_FREE_FRAME ( & next_frame2 ) ;
}
2020-02-10 01:50:31 -05:00
# endif // HAVE_HW_ACCEL
2018-03-21 02:10:46 -05:00
}
2019-04-18 01:07:57 -05:00
# else
2018-03-21 02:10:46 -05:00
avcodec_decode_video2 ( pCodecCtx , next_frame , & frameFinished , packet ) ;
2019-05-10 11:39:26 -07:00
// always allocate pFrame (because we do that in the ffmpeg >= 3.2 as well); it will always be freed later
pFrame = AV_ALLOCATE_FRAME ( ) ;
2018-03-21 02:10:46 -05:00
// is frame finished
if ( frameFinished ) {
// AVFrames are clobbered on the each call to avcodec_decode_video, so we
// must make a copy of the image data before this method is called again.
avpicture_alloc ( ( AVPicture * ) pFrame , pCodecCtx - > pix_fmt , info . width , info . height ) ;
av_picture_copy ( ( AVPicture * ) pFrame , ( AVPicture * ) next_frame , pCodecCtx - > pix_fmt , info . width ,
info . height ) ;
}
2020-02-10 01:50:31 -05:00
# endif // IS_FFMPEG_3_2
2012-07-03 16:58:07 -05:00
}
2011-12-11 20:42:50 -06:00
2012-10-12 16:41:23 -05:00
// deallocate the frame
2015-09-23 00:27:28 -05:00
AV_FREE_FRAME ( & next_frame ) ;
2012-10-12 16:41:23 -05:00
2011-10-11 08:44:27 -05:00
// Did we get a video frame?
return frameFinished ;
}
// Check the current seek position and determine if we need to seek again
2019-04-18 01:07:57 -05:00
bool FFmpegReader : : CheckSeek ( bool is_video ) {
2011-10-11 08:44:27 -05:00
// Are we seeking for a specific frame?
2019-04-18 01:07:57 -05:00
if ( is_seeking ) {
2014-08-27 09:44:27 -05:00
// Determine if both an audio and video packet have been decoded since the seek happened.
// If not, allow the ReadStream method to keep looping
2014-09-13 16:35:11 -05:00
if ( ( is_video_seek & & ! seek_video_frame_found ) | | ( ! is_video_seek & & ! seek_audio_frame_found ) )
2014-08-27 09:44:27 -05:00
return false ;
2016-01-05 01:59:50 -06:00
// Check for both streams
if ( ( info . has_video & & ! seek_video_frame_found ) | | ( info . has_audio & & ! seek_audio_frame_found ) )
return false ;
2014-09-26 09:35:38 -05:00
// Determine max seeked frame
2017-09-28 16:03:01 -05:00
int64_t max_seeked_frame = seek_audio_frame_found ; // determine max seeked frame
2014-09-26 09:35:38 -05:00
if ( seek_video_frame_found > max_seeked_frame )
max_seeked_frame = seek_video_frame_found ;
2011-10-11 08:44:27 -05:00
// determine if we are "before" the requested frame
2019-04-18 01:07:57 -05:00
if ( max_seeked_frame > = seeking_frame ) {
2012-10-12 16:41:23 -05:00
// SEEKED TOO FAR
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckSeek (Too far, seek again) " , " is_video_seek " , is_video_seek , " max_seeked_frame " , max_seeked_frame , " seeking_frame " , seeking_frame , " seeking_pts " , seeking_pts , " seek_video_frame_found " , seek_video_frame_found , " seek_audio_frame_found " , seek_audio_frame_found ) ;
2011-10-11 08:44:27 -05:00
2012-10-12 16:41:23 -05:00
// Seek again... to the nearest Keyframe
2018-04-14 16:25:13 -05:00
Seek ( seeking_frame - ( 10 * seek_count * seek_count ) ) ;
2019-04-18 01:07:57 -05:00
} else {
2014-09-13 16:35:11 -05:00
// SEEK WORKED
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckSeek (Successful) " , " is_video_seek " , is_video_seek , " current_pts " , packet - > pts , " seeking_pts " , seeking_pts , " seeking_frame " , seeking_frame , " seek_video_frame_found " , seek_video_frame_found , " seek_audio_frame_found " , seek_audio_frame_found ) ;
2014-08-27 09:44:27 -05:00
2012-10-12 16:41:23 -05:00
// Seek worked, and we are "before" the requested frame
is_seeking = false ;
seeking_frame = 0 ;
2014-04-05 10:19:20 -05:00
seeking_pts = - 1 ;
2011-10-11 08:44:27 -05:00
}
}
// return the pts to seek to (if any)
return is_seeking ;
}
// Process a video packet
2019-04-18 01:07:57 -05:00
void FFmpegReader : : ProcessVideoPacket ( int64_t requested_frame ) {
2011-10-24 08:22:21 -05:00
// Calculate current frame #
2017-09-28 16:03:01 -05:00
int64_t current_frame = ConvertVideoPTStoFrame ( GetVideoPTS ( ) ) ;
2011-10-11 08:44:27 -05:00
2016-01-05 01:59:50 -06:00
// Track 1st video packet after a successful seek
if ( ! seek_video_frame_found & & is_seeking )
seek_video_frame_found = current_frame ;
2015-08-24 01:05:48 -05:00
// Are we close enough to decode the frame? and is this frame # valid?
2019-04-18 01:07:57 -05:00
if ( ( current_frame < ( requested_frame - 20 ) ) or ( current_frame = = - 1 ) ) {
2015-06-01 00:20:14 -07:00
// Remove frame and packet
RemoveAVFrame ( pFrame ) ;
2012-07-03 16:58:07 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessVideoPacket (Skipped) " , " requested_frame " , requested_frame , " current_frame " , current_frame ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Skip to next frame without decoding or caching
return ;
2012-07-01 01:43:06 -05:00
}
2011-10-11 08:44:27 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessVideoPacket (Before) " , " requested_frame " , requested_frame , " current_frame " , current_frame ) ;
2014-08-27 09:44:27 -05:00
2012-07-01 01:43:06 -05:00
// Init some things local (for OpenMP)
2018-03-21 02:10:46 -05:00
PixelFormat pix_fmt = AV_GET_CODEC_PIXEL_FORMAT ( pStream , pCodecCtx ) ;
2012-06-18 09:26:14 -05:00
int height = info . height ;
int width = info . width ;
2017-09-28 16:03:01 -05:00
int64_t video_length = info . video_length ;
2018-03-21 02:10:46 -05:00
AVFrame * my_frame = pFrame ;
2019-05-08 14:53:23 -07:00
pFrame = NULL ;
2012-06-18 09:26:14 -05:00
2012-07-06 02:34:18 -05:00
// Add video frame to list of processing video frames
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2012-07-06 02:34:18 -05:00
processing_video_frames [ current_frame ] = current_frame ;
2019-04-18 01:07:57 -05:00
# pragma omp task firstprivate(current_frame, my_frame, height, width, video_length, pix_fmt)
2011-10-11 08:44:27 -05:00
{
2012-06-29 02:02:12 -05:00
// Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
2012-06-18 09:26:14 -05:00
AVFrame * pFrameRGB = NULL ;
int numBytes ;
uint8_t * buffer = NULL ;
// Allocate an AVFrame structure
2015-09-23 00:27:28 -05:00
pFrameRGB = AV_ALLOCATE_FRAME ( ) ;
2012-06-18 09:26:14 -05:00
if ( pFrameRGB = = NULL )
throw OutOfBoundsFrame ( " Convert Image Broke! " , current_frame , video_length ) ;
2019-01-19 02:18:52 -06:00
// Determine the max size of this source image (based on the timeline's size, the scaling mode,
// and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
// without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
// method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
// the future.
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
int max_width = info . width ;
int max_height = info . height ;
2019-01-19 02:18:52 -06:00
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
Clip * parent = ( Clip * ) ParentClip ( ) ;
2019-01-19 02:18:52 -06:00
if ( parent ) {
Large refactor of Timeline, TimelineBase, ClipBase, and Clip, to allow a Clip access to the parent timeline instance (if available), and thus, certain properties (preview size, timeline FPS, etc...). This allows for a simpler rendering of Clip keyframes (during the Clip::GetFrame method), and a simpler Timeline class, that can change the preview window size dynamically and no longer requires a Singleton Settings class.
- Also removed "crop" from Clip class, as it was never implmeneted correctly, and we have a fully functional "crop" effect when needed
- Added caching to Clip class, to optimize previewing of cached frames (much faster than previous)
2020-10-04 16:59:21 -05:00
if ( parent - > ParentTimeline ( ) ) {
// Set max width/height based on parent clip's timeline (if attached to a timeline)
max_width = parent - > ParentTimeline ( ) - > preview_width ;
max_height = parent - > ParentTimeline ( ) - > preview_height ;
}
2019-01-19 02:18:52 -06:00
if ( parent - > scale = = SCALE_FIT | | parent - > scale = = SCALE_STRETCH ) {
// Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
float max_scale_x = parent - > scale_x . GetMaxPoint ( ) . co . Y ;
float max_scale_y = parent - > scale_y . GetMaxPoint ( ) . co . Y ;
2019-08-04 23:51:02 -04:00
max_width = std : : max ( float ( max_width ) , max_width * max_scale_x ) ;
max_height = std : : max ( float ( max_height ) , max_height * max_scale_y ) ;
2019-01-19 02:18:52 -06:00
} else if ( parent - > scale = = SCALE_CROP ) {
// Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
float max_scale_x = parent - > scale_x . GetMaxPoint ( ) . co . Y ;
float max_scale_y = parent - > scale_y . GetMaxPoint ( ) . co . Y ;
QSize width_size ( max_width * max_scale_x ,
round ( max_width / ( float ( info . width ) / float ( info . height ) ) ) ) ;
QSize height_size ( round ( max_height / ( float ( info . height ) / float ( info . width ) ) ) ,
max_height * max_scale_y ) ;
// respect aspect ratio
if ( width_size . width ( ) > = max_width & & width_size . height ( ) > = max_height ) {
2019-08-04 23:51:02 -04:00
max_width = std : : max ( max_width , width_size . width ( ) ) ;
max_height = std : : max ( max_height , width_size . height ( ) ) ;
2019-04-18 01:07:57 -05:00
} else {
2019-08-04 23:51:02 -04:00
max_width = std : : max ( max_width , height_size . width ( ) ) ;
max_height = std : : max ( max_height , height_size . height ( ) ) ;
2019-01-19 02:18:52 -06:00
}
} else {
// No scaling, use original image size (slower)
max_width = info . width ;
max_height = info . height ;
}
}
// Determine if image needs to be scaled (for performance reasons)
2016-09-14 04:11:12 -05:00
int original_height = height ;
if ( max_width ! = 0 & & max_height ! = 0 & & max_width < width & & max_height < height ) {
// Override width and height (but maintain aspect ratio)
float ratio = float ( width ) / float ( height ) ;
int possible_width = round ( max_height * ratio ) ;
int possible_height = round ( max_width / ratio ) ;
if ( possible_width < = max_width ) {
// use calculated width, and max_height
width = possible_width ;
height = max_height ;
} else {
// use max_width, and calculated height
width = max_width ;
height = possible_height ;
}
}
2012-06-18 09:26:14 -05:00
// Determine required buffer size and allocate buffer
2018-03-21 02:10:46 -05:00
numBytes = AV_GET_IMAGE_SIZE ( PIX_FMT_RGBA , width , height ) ;
2019-04-18 01:07:57 -05:00
# pragma omp critical (video_buffer)
2016-07-30 16:57:48 -05:00
buffer = ( uint8_t * ) av_malloc ( numBytes * sizeof ( uint8_t ) ) ;
2012-06-18 09:26:14 -05:00
2018-03-21 02:10:46 -05:00
// Copy picture data from one AVFrame (or AVPicture) to another one.
AV_COPY_PICTURE_DATA ( pFrameRGB , buffer , PIX_FMT_RGBA , width , height ) ;
2012-06-18 09:26:14 -05:00
2019-01-09 16:50:40 -06:00
int scale_mode = SWS_FAST_BILINEAR ;
if ( openshot : : Settings : : Instance ( ) - > HIGH_QUALITY_SCALING ) {
2019-11-17 16:34:50 -06:00
scale_mode = SWS_BICUBIC ;
2019-01-09 16:50:40 -06:00
}
2018-03-21 02:10:46 -05:00
SwsContext * img_convert_ctx = sws_getContext ( info . width , info . height , AV_GET_CODEC_PIXEL_FORMAT ( pStream , pCodecCtx ) , width ,
2020-10-14 03:06:30 -05:00
height , PIX_FMT_RGBA , scale_mode , NULL , NULL , NULL ) ;
2016-09-14 04:11:12 -05:00
2012-06-18 09:26:14 -05:00
// Resize / Convert to RGB
2012-07-04 03:07:26 -05:00
sws_scale ( img_convert_ctx , my_frame - > data , my_frame - > linesize , 0 ,
2016-09-14 04:11:12 -05:00
original_height , pFrameRGB - > data , pFrameRGB - > linesize ) ;
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Create or get the existing frame object
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > f = CreateFrame ( current_frame ) ;
2012-07-04 03:07:26 -05:00
2012-08-15 17:27:14 -05:00
// Add Image data to frame
2020-12-31 17:35:49 -06:00
if ( ! ffmpeg_has_alpha ( AV_GET_CODEC_PIXEL_FORMAT ( pStream , pCodecCtx ) ) ) {
// Add image with no alpha channel, Speed optimization
f - > AddImage ( width , height , 4 , QImage : : Format_RGBA8888_Premultiplied , buffer ) ;
} else {
// Add image with alpha channel (this will be converted to premultipled when needed, but is slower)
f - > AddImage ( width , height , 4 , QImage : : Format_RGBA8888 , buffer ) ;
}
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Update working cache
2016-08-31 02:02:54 -05:00
working_cache . Add ( f ) ;
2012-06-18 09:26:14 -05:00
2016-01-05 01:59:50 -06:00
// Keep track of last last_video_frame
2019-04-18 01:07:57 -05:00
# pragma omp critical (video_buffer)
2016-01-05 01:59:50 -06:00
last_video_frame = f ;
2012-06-18 09:26:14 -05:00
// Free the RGB image
av_free ( buffer ) ;
2015-09-23 00:27:28 -05:00
AV_FREE_FRAME ( & pFrameRGB ) ;
2012-06-18 09:26:14 -05:00
2015-06-01 00:20:14 -07:00
// Remove frame and packet
RemoveAVFrame ( my_frame ) ;
2016-09-14 04:11:12 -05:00
sws_freeContext ( img_convert_ctx ) ;
2012-07-03 16:58:07 -05:00
2012-07-06 02:34:18 -05:00
// Remove video frame from list of processing video frames
2014-08-27 09:44:27 -05:00
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2015-06-01 00:20:14 -07:00
processing_video_frames . erase ( current_frame ) ;
processed_video_frames [ current_frame ] = current_frame ;
2014-08-27 09:44:27 -05:00
}
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessVideoPacket (After) " , " requested_frame " , requested_frame , " current_frame " , current_frame , " f->number " , f - > number ) ;
2012-07-06 02:34:18 -05:00
2011-10-11 08:44:27 -05:00
} // end omp task
2012-06-18 09:26:14 -05:00
2011-10-11 08:44:27 -05:00
}
// Process an audio packet
2019-04-18 01:07:57 -05:00
void FFmpegReader : : ProcessAudioPacket ( int64_t requested_frame , int64_t target_frame , int starting_sample ) {
2016-01-05 01:59:50 -06:00
// Track 1st audio packet after a successful seek
if ( ! seek_audio_frame_found & & is_seeking )
seek_audio_frame_found = target_frame ;
2011-10-11 08:44:27 -05:00
// Are we close enough to decode the frame's audio?
2019-04-18 01:07:57 -05:00
if ( target_frame < ( requested_frame - 20 ) ) {
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Skipped) " , " requested_frame " , requested_frame , " target_frame " , target_frame , " starting_sample " , starting_sample ) ;
2014-08-27 09:44:27 -05:00
2011-10-11 08:44:27 -05:00
// Skip to next frame without decoding or caching
return ;
2012-07-03 16:58:07 -05:00
}
2011-10-11 08:44:27 -05:00
2014-08-27 09:44:27 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Before) " , " requested_frame " , requested_frame , " target_frame " , target_frame , " starting_sample " , starting_sample ) ;
2014-08-27 09:44:27 -05:00
2015-02-05 00:00:52 -06:00
// Init an AVFrame to hold the decoded audio samples
int frame_finished = 0 ;
2015-09-23 00:27:28 -05:00
AVFrame * audio_frame = AV_ALLOCATE_FRAME ( ) ;
AV_RESET_FRAME ( audio_frame ) ;
2015-02-05 00:00:52 -06:00
2012-08-21 15:31:52 -05:00
int packet_samples = 0 ;
2015-02-05 00:00:52 -06:00
int data_size = 0 ;
2014-09-22 00:40:21 -05:00
2019-04-18 01:07:57 -05:00
# pragma omp critical (ProcessAudioPacket)
2018-03-21 02:10:46 -05:00
{
2019-04-18 01:07:57 -05:00
# if IS_FFMPEG_3_2
2018-03-21 02:10:46 -05:00
int ret = 0 ;
frame_finished = 1 ;
while ( ( packet - > size > 0 | | ( ! packet - > data & & frame_finished ) ) & & ret > = 0 ) {
frame_finished = 0 ;
ret = avcodec_send_packet ( aCodecCtx , packet ) ;
if ( ret < 0 & & ret ! = AVERROR ( EINVAL ) & & ret ! = AVERROR_EOF ) {
avcodec_send_packet ( aCodecCtx , NULL ) ;
break ;
}
if ( ret > = 0 )
packet - > size = 0 ;
ret = avcodec_receive_frame ( aCodecCtx , audio_frame ) ;
if ( ret > = 0 )
frame_finished = 1 ;
if ( ret = = AVERROR ( EINVAL ) | | ret = = AVERROR_EOF ) {
avcodec_flush_buffers ( aCodecCtx ) ;
ret = 0 ;
}
if ( ret > = 0 ) {
ret = frame_finished ;
}
}
if ( ! packet - > data & & ! frame_finished )
{
ret = - 1 ;
}
2019-04-18 01:07:57 -05:00
# else
2018-03-21 02:10:46 -05:00
int used = avcodec_decode_audio4 ( aCodecCtx , audio_frame , & frame_finished , packet ) ;
# endif
}
2012-08-21 15:31:52 -05:00
2015-07-05 22:57:46 -07:00
if ( frame_finished ) {
2015-02-05 00:00:52 -06:00
// determine how many samples were decoded
int plane_size = - 1 ;
data_size = av_samples_get_buffer_size ( & plane_size ,
2019-04-18 01:07:57 -05:00
AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ,
audio_frame - > nb_samples ,
( AVSampleFormat ) ( AV_GET_SAMPLE_FORMAT ( aStream , aCodecCtx ) ) , 1 ) ;
2012-08-21 15:31:52 -05:00
// Calculate total number of samples
2018-03-21 02:10:46 -05:00
packet_samples = audio_frame - > nb_samples * AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channels ;
2012-08-21 15:31:52 -05:00
}
2012-11-20 10:15:39 -06:00
// Estimate the # of samples and the end of this packet's location (to prevent GAPS for the next timestamp)
2012-12-03 04:51:17 -06:00
int pts_remaining_samples = packet_samples / info . channels ; // Adjust for zero based array
// DEBUG (FOR AUDIO ISSUES) - Get the audio packet start time (in seconds)
2017-09-28 16:03:01 -05:00
int64_t adjusted_pts = packet - > pts + audio_pts_offset ;
2012-12-03 04:51:17 -06:00
double audio_seconds = double ( adjusted_pts ) * info . audio_timebase . ToDouble ( ) ;
2017-01-24 18:39:17 -06:00
double sample_seconds = double ( pts_total ) / info . sample_rate ;
2012-12-03 04:51:17 -06:00
2014-08-27 09:44:27 -05:00
// Debug output
2016-04-21 01:39:17 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Decode Info A) " , " pts_counter " , pts_counter , " PTS " , adjusted_pts , " Offset " , audio_pts_offset , " PTS Diff " , adjusted_pts - prev_pts , " Samples " , pts_remaining_samples , " Sample PTS ratio " , float ( adjusted_pts - prev_pts ) / pts_remaining_samples ) ;
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (Decode Info B) " , " Sample Diff " , pts_remaining_samples - prev_samples - prev_pts , " Total " , pts_total , " PTS Seconds " , audio_seconds , " Sample Seconds " , sample_seconds , " Seconds Diff " , audio_seconds - sample_seconds , " raw samples " , packet_samples ) ;
2012-12-03 04:51:17 -06:00
// DEBUG (FOR AUDIO ISSUES)
prev_pts = adjusted_pts ;
pts_total + = pts_remaining_samples ;
pts_counter + + ;
prev_samples = pts_remaining_samples ;
2015-03-07 17:07:37 -06:00
// Add audio frame to list of processing audio frames
2015-08-24 01:05:48 -05:00
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2019-08-04 22:23:33 -04:00
processing_audio_frames . insert ( std : : pair < int , int > ( previous_packet_location . frame , previous_packet_location . frame ) ) ;
2015-08-24 01:05:48 -05:00
}
2012-12-03 04:51:17 -06:00
2019-04-18 01:07:57 -05:00
while ( pts_remaining_samples ) {
2012-11-20 10:15:39 -06:00
// Get Samples per frame (for this frame number)
2015-03-08 21:42:53 -05:00
int samples_per_frame = Frame : : GetSamplesPerFrame ( previous_packet_location . frame , info . fps , info . sample_rate , info . channels ) ;
2012-11-20 10:15:39 -06:00
// Calculate # of samples to add to this frame
int samples = samples_per_frame - previous_packet_location . sample_start ;
if ( samples > pts_remaining_samples )
samples = pts_remaining_samples ;
// Decrement remaining samples
pts_remaining_samples - = samples ;
if ( pts_remaining_samples > 0 ) {
// next frame
previous_packet_location . frame + + ;
previous_packet_location . sample_start = 0 ;
2015-03-07 17:07:37 -06:00
// Add audio frame to list of processing audio frames
2015-08-24 01:05:48 -05:00
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2019-08-04 22:23:33 -04:00
processing_audio_frames . insert ( std : : pair < int , int > ( previous_packet_location . frame , previous_packet_location . frame ) ) ;
2015-08-24 01:05:48 -05:00
}
2015-03-07 17:07:37 -06:00
2012-11-20 10:15:39 -06:00
} else {
// Increment sample start
previous_packet_location . sample_start + = samples ;
}
}
2011-10-11 08:44:27 -05:00
2017-08-20 17:37:39 -05:00
// Allocate audio buffer
2018-09-11 00:40:31 -05:00
int16_t * audio_buf = new int16_t [ AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE ] ;
2015-03-07 17:07:37 -06:00
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (ReSample) " , " packet_samples " , packet_samples , " info.channels " , info . channels , " info.sample_rate " , info . sample_rate , " aCodecCtx->sample_fmt " , AV_GET_SAMPLE_FORMAT ( aStream , aCodecCtx ) , " AV_SAMPLE_FMT_S16 " , AV_SAMPLE_FMT_S16 ) ;
2017-08-20 17:37:39 -05:00
// Create output frame
AVFrame * audio_converted = AV_ALLOCATE_FRAME ( ) ;
AV_RESET_FRAME ( audio_converted ) ;
audio_converted - > nb_samples = audio_frame - > nb_samples ;
av_samples_alloc ( audio_converted - > data , audio_converted - > linesize , info . channels , audio_frame - > nb_samples , AV_SAMPLE_FMT_S16 , 0 ) ;
2018-09-11 00:40:31 -05:00
SWRCONTEXT * avr = NULL ;
2017-08-20 17:37:39 -05:00
int nb_samples = 0 ;
// setup resample context
2018-09-11 00:40:31 -05:00
avr = SWR_ALLOC ( ) ;
2019-04-18 01:07:57 -05:00
av_opt_set_int ( avr , " in_channel_layout " , AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout , 0 ) ;
2018-03-21 02:10:46 -05:00
av_opt_set_int ( avr , " out_channel_layout " , AV_GET_CODEC_ATTRIBUTES ( aStream , aCodecCtx ) - > channel_layout , 0 ) ;
2019-04-18 01:07:57 -05:00
av_opt_set_int ( avr , " in_sample_fmt " , AV_GET_SAMPLE_FORMAT ( aStream , aCodecCtx ) , 0 ) ;
av_opt_set_int ( avr , " out_sample_fmt " , AV_SAMPLE_FMT_S16 , 0 ) ;
av_opt_set_int ( avr , " in_sample_rate " , info . sample_rate , 0 ) ;
av_opt_set_int ( avr , " out_sample_rate " , info . sample_rate , 0 ) ;
av_opt_set_int ( avr , " in_channels " , info . channels , 0 ) ;
av_opt_set_int ( avr , " out_channels " , info . channels , 0 ) ;
2020-04-22 02:02:55 -04:00
SWR_INIT ( avr ) ;
2017-08-20 17:37:39 -05:00
// Convert audio samples
2019-04-18 01:07:57 -05:00
nb_samples = SWR_CONVERT ( avr , // audio resample context
audio_converted - > data , // output data pointers
audio_converted - > linesize [ 0 ] , // output plane size, in bytes. (0 if unknown)
audio_converted - > nb_samples , // maximum number of samples that the output buffer can hold
audio_frame - > data , // input data pointers
audio_frame - > linesize [ 0 ] , // input plane size, in bytes (0 if unknown)
audio_frame - > nb_samples ) ; // number of input samples to convert
2017-08-20 17:37:39 -05:00
// Copy audio samples over original samples
memcpy ( audio_buf , audio_converted - > data [ 0 ] , audio_converted - > nb_samples * av_get_bytes_per_sample ( AV_SAMPLE_FMT_S16 ) * info . channels ) ;
// Deallocate resample buffer
2018-09-11 00:40:31 -05:00
SWR_CLOSE ( avr ) ;
SWR_FREE ( & avr ) ;
2017-08-20 17:37:39 -05:00
avr = NULL ;
// Free AVFrames
av_free ( audio_converted - > data [ 0 ] ) ;
AV_FREE_FRAME ( & audio_converted ) ;
2017-09-28 16:03:01 -05:00
int64_t starting_frame_number = - 1 ;
2017-08-20 17:37:39 -05:00
bool partial_frame = true ;
2019-04-18 01:07:57 -05:00
for ( int channel_filter = 0 ; channel_filter < info . channels ; channel_filter + + ) {
2017-08-20 17:37:39 -05:00
// Array of floats (to hold samples for each channel)
starting_frame_number = target_frame ;
int channel_buffer_size = packet_samples / info . channels ;
float * channel_buffer = new float [ channel_buffer_size ] ;
2012-08-21 21:51:00 -05:00
2017-08-20 17:37:39 -05:00
// Init buffer array
for ( int z = 0 ; z < channel_buffer_size ; z + + )
channel_buffer [ z ] = 0.0f ;
2012-08-21 21:51:00 -05:00
2017-08-20 17:37:39 -05:00
// Loop through all samples and add them to our Frame based on channel.
// Toggle through each channel number, since channel data is stored like (left right left right)
int channel = 0 ;
int position = 0 ;
2019-04-18 01:07:57 -05:00
for ( int sample = 0 ; sample < packet_samples ; sample + + ) {
2017-08-20 17:37:39 -05:00
// Only add samples for current channel
2019-04-18 01:07:57 -05:00
if ( channel_filter = = channel ) {
2017-08-20 17:37:39 -05:00
// Add sample (convert from (-32768 to 32768) to (-1.0 to 1.0))
channel_buffer [ position ] = audio_buf [ sample ] * ( 1.0f / ( 1 < < 15 ) ) ;
2011-10-24 08:22:21 -05:00
2017-08-20 17:37:39 -05:00
// Increment audio position
position + + ;
2012-06-29 02:02:12 -05:00
}
2011-10-24 08:22:21 -05:00
2017-08-20 17:37:39 -05:00
// increment channel (if needed)
if ( ( channel + 1 ) < info . channels )
// move to next channel
2019-04-18 01:07:57 -05:00
channel + + ;
2017-08-20 17:37:39 -05:00
else
// reset channel
channel = 0 ;
2011-10-24 08:22:21 -05:00
}
2011-10-11 08:44:27 -05:00
2017-08-20 17:37:39 -05:00
// Loop through samples, and add them to the correct frames
int start = starting_sample ;
int remaining_samples = channel_buffer_size ;
2019-04-18 01:07:57 -05:00
float * iterate_channel_buffer = channel_buffer ; // pointer to channel buffer
while ( remaining_samples > 0 ) {
2017-08-20 17:37:39 -05:00
// Get Samples per frame (for this frame number)
int samples_per_frame = Frame : : GetSamplesPerFrame ( starting_frame_number , info . fps , info . sample_rate , info . channels ) ;
2015-03-07 17:07:37 -06:00
2017-08-20 17:37:39 -05:00
// Calculate # of samples to add to this frame
int samples = samples_per_frame - start ;
if ( samples > remaining_samples )
samples = remaining_samples ;
2015-10-01 13:00:50 -05:00
2017-08-20 17:37:39 -05:00
// Create or get the existing frame object
std : : shared_ptr < Frame > f = CreateFrame ( starting_frame_number ) ;
// Determine if this frame was "partially" filled in
if ( samples_per_frame = = start + samples )
partial_frame = false ;
else
partial_frame = true ;
2020-03-16 14:49:41 +02:00
// Add samples for current channel to the frame.
f - > AddAudio ( true , channel_filter , start , iterate_channel_buffer , samples , 1.0f ) ;
2017-08-20 17:37:39 -05:00
// Debug output
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (f->AddAudio) " , " frame " , starting_frame_number , " start " , start , " samples " , samples , " channel " , channel_filter , " partial_frame " , partial_frame , " samples_per_frame " , samples_per_frame ) ;
// Add or update cache
working_cache . Add ( f ) ;
// Decrement remaining samples
remaining_samples - = samples ;
// Increment buffer (to next set of samples)
if ( remaining_samples > 0 )
iterate_channel_buffer + = samples ;
// Increment frame number
starting_frame_number + + ;
// Reset starting sample #
start = 0 ;
2012-07-06 02:34:18 -05:00
}
2012-08-21 15:31:52 -05:00
2017-08-20 17:37:39 -05:00
// clear channel buffer
delete [ ] channel_buffer ;
channel_buffer = NULL ;
iterate_channel_buffer = NULL ;
}
2015-03-07 17:07:37 -06:00
2017-08-20 17:37:39 -05:00
// Clean up some arrays
delete [ ] audio_buf ;
audio_buf = NULL ;
2014-08-27 09:44:27 -05:00
2017-08-20 17:37:39 -05:00
// Remove audio frame from list of processing audio frames
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2017-08-20 17:37:39 -05:00
// Update all frames as completed
2017-09-28 16:03:01 -05:00
for ( int64_t f = target_frame ; f < starting_frame_number ; f + + ) {
2017-08-20 17:37:39 -05:00
// Remove the frame # from the processing list. NOTE: If more than one thread is
// processing this frame, the frame # will be in this list multiple times. We are only
// removing a single instance of it here.
processing_audio_frames . erase ( processing_audio_frames . find ( f ) ) ;
2012-07-08 23:26:44 -05:00
2017-08-20 17:37:39 -05:00
// Check and see if this frame is also being processed by another thread
if ( processing_audio_frames . count ( f ) = = 0 )
// No other thread is processing it. Mark the audio as processed (final)
processed_audio_frames [ f ] = f ;
}
2015-03-07 17:07:37 -06:00
2017-08-20 17:37:39 -05:00
if ( target_frame = = starting_frame_number ) {
// This typically never happens, but just in case, remove the currently processing number
processing_audio_frames . erase ( processing_audio_frames . find ( target_frame ) ) ;
}
}
2016-08-15 00:44:51 -05:00
// Free audio frame
AV_FREE_FRAME ( & audio_frame ) ;
2017-08-20 17:37:39 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ProcessAudioPacket (After) " , " requested_frame " , requested_frame , " starting_frame " , target_frame , " end_frame " , starting_frame_number - 1 ) ;
2017-08-20 17:37:39 -05:00
2011-10-11 08:44:27 -05:00
}
// Seek to a specific frame. This is not always frame accurate, it's more of an estimation on many codecs.
2019-04-18 01:07:57 -05:00
void FFmpegReader : : Seek ( int64_t requested_frame ) {
2011-10-11 08:44:27 -05:00
// Adjust for a requested frame that is too small or too large
if ( requested_frame < 1 )
requested_frame = 1 ;
if ( requested_frame > info . video_length )
requested_frame = info . video_length ;
2017-08-20 17:37:39 -05:00
int processing_video_frames_size = 0 ;
int processing_audio_frames_size = 0 ;
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2017-08-20 17:37:39 -05:00
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2014-08-27 09:44:27 -05:00
// Debug output
2018-04-14 16:25:13 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::Seek " , " requested_frame " , requested_frame , " seek_count " , seek_count , " last_frame " , last_frame , " processing_video_frames_size " , processing_video_frames_size , " processing_audio_frames_size " , processing_audio_frames_size , " video_pts_offset " , video_pts_offset ) ;
2016-11-23 01:50:03 -06:00
// Wait for any processing frames to complete
2017-08-20 17:37:39 -05:00
while ( processing_video_frames_size + processing_audio_frames_size > 0 ) {
2020-09-02 02:07:54 -04:00
std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 3 ) ) ;
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2017-08-20 17:37:39 -05:00
processing_video_frames_size = processing_video_frames . size ( ) ;
processing_audio_frames_size = processing_audio_frames . size ( ) ;
}
2014-08-27 09:44:27 -05:00
2011-10-14 09:47:05 -05:00
// Clear working cache (since we are seeking to another location in the file)
working_cache . Clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_frames . Clear ( ) ;
2011-10-14 09:47:05 -05:00
2014-08-27 09:44:27 -05:00
// Clear processed lists
2015-06-01 00:20:14 -07:00
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2015-06-01 00:20:14 -07:00
processing_audio_frames . clear ( ) ;
processing_video_frames . clear ( ) ;
processed_video_frames . clear ( ) ;
processed_audio_frames . clear ( ) ;
2016-06-29 02:42:00 -05:00
missing_audio_frames . clear ( ) ;
2015-08-24 01:05:48 -05:00
missing_video_frames . clear ( ) ;
2016-12-07 01:06:16 -06:00
missing_audio_frames_source . clear ( ) ;
missing_video_frames_source . clear ( ) ;
2016-06-29 02:42:00 -05:00
checked_frames . clear ( ) ;
2015-06-01 00:20:14 -07:00
}
2014-08-27 09:44:27 -05:00
2012-07-06 15:17:57 -05:00
// Reset the last frame variable
last_frame = 0 ;
2015-08-24 01:05:48 -05:00
current_video_frame = 0 ;
largest_frame_processed = 0 ;
2016-01-05 01:59:50 -06:00
num_checks_since_final = 0 ;
num_packets_since_video_frame = 0 ;
2015-08-24 01:05:48 -05:00
has_missing_frames = false ;
2017-05-17 01:17:42 -05:00
bool has_audio_override = info . has_audio ;
bool has_video_override = info . has_video ;
2011-10-24 08:22:21 -05:00
2012-10-10 17:27:46 -05:00
// Increment seek count
seek_count + + ;
2015-08-24 01:05:48 -05:00
// If seeking near frame 1, we need to close and re-open the file (this is more reliable than seeking)
2019-08-04 23:51:02 -04:00
int buffer_amount = std : : max ( OPEN_MP_NUM_PROCESSORS , 8 ) ;
2019-04-18 01:07:57 -05:00
if ( requested_frame - buffer_amount < 20 ) {
2011-10-11 08:44:27 -05:00
// Close and re-open file (basically seeking to frame 1)
2012-10-14 21:09:22 -05:00
Close ( ) ;
2011-10-11 08:44:27 -05:00
Open ( ) ;
2017-05-17 01:17:42 -05:00
// Update overrides (since closing and re-opening might update these)
info . has_audio = has_audio_override ;
info . has_video = has_video_override ;
2011-10-11 08:44:27 -05:00
// Not actually seeking, so clear these flags
is_seeking = false ;
2015-08-24 01:05:48 -05:00
if ( seek_count = = 1 ) {
// Don't redefine this on multiple seek attempts for a specific frame
seeking_frame = 1 ;
seeking_pts = ConvertFrameToVideoPTS ( 1 ) ;
}
2014-09-13 16:35:11 -05:00
seek_audio_frame_found = 0 ; // used to detect which frames to throw away after a seek
seek_video_frame_found = 0 ; // used to detect which frames to throw away after a seek
2019-04-18 01:07:57 -05:00
} else {
2011-10-11 08:44:27 -05:00
// Seek to nearest key-frame (aka, i-frame)
2014-04-05 10:19:20 -05:00
bool seek_worked = false ;
2015-08-24 23:49:45 -05:00
int64_t seek_target = 0 ;
2012-07-09 00:41:17 -05:00
2015-08-24 01:05:48 -05:00
// Seek video stream (if any)
2019-04-18 01:07:57 -05:00
if ( ! seek_worked & & info . has_video ) {
2015-08-24 23:49:45 -05:00
seek_target = ConvertFrameToVideoPTS ( requested_frame - buffer_amount ) ;
2015-08-24 01:05:48 -05:00
if ( av_seek_frame ( pFormatCtx , info . video_stream_index , seek_target , AVSEEK_FLAG_BACKWARD ) < 0 ) {
2018-09-11 00:40:31 -05:00
fprintf ( stderr , " %s: error while seeking video stream \n " , pFormatCtx - > AV_FILENAME ) ;
2019-04-18 01:07:57 -05:00
} else {
2015-08-24 01:05:48 -05:00
// VIDEO SEEK
is_video_seek = true ;
seek_worked = true ;
2015-08-24 23:49:45 -05:00
}
}
// Seek audio stream (if not already seeked... and if an audio stream is found)
2019-04-18 01:07:57 -05:00
if ( ! seek_worked & & info . has_audio ) {
2015-08-24 23:49:45 -05:00
seek_target = ConvertFrameToAudioPTS ( requested_frame - buffer_amount ) ;
2016-12-07 01:06:16 -06:00
if ( av_seek_frame ( pFormatCtx , info . audio_stream_index , seek_target , AVSEEK_FLAG_BACKWARD ) < 0 ) {
2018-09-11 00:40:31 -05:00
fprintf ( stderr , " %s: error while seeking audio stream \n " , pFormatCtx - > AV_FILENAME ) ;
2019-04-18 01:07:57 -05:00
} else {
2015-08-24 23:49:45 -05:00
// AUDIO SEEK
is_video_seek = false ;
seek_worked = true ;
2015-08-24 01:05:48 -05:00
}
}
2012-07-09 00:41:17 -05:00
// Was the seek successful?
2019-04-18 01:07:57 -05:00
if ( seek_worked ) {
2012-10-12 00:54:53 -05:00
// Flush audio buffer
2012-10-14 02:36:05 -05:00
if ( info . has_audio )
avcodec_flush_buffers ( aCodecCtx ) ;
2012-10-12 00:54:53 -05:00
// Flush video buffer
2012-10-14 02:36:05 -05:00
if ( info . has_video )
avcodec_flush_buffers ( pCodecCtx ) ;
2012-10-12 00:54:53 -05:00
2013-01-25 02:24:18 -06:00
// Reset previous audio location to zero
previous_packet_location . frame = - 1 ;
previous_packet_location . sample_start = 0 ;
2012-10-10 17:27:46 -05:00
// init seek flags
is_seeking = true ;
2015-08-24 01:05:48 -05:00
if ( seek_count = = 1 ) {
// Don't redefine this on multiple seek attempts for a specific frame
seeking_pts = seek_target ;
seeking_frame = requested_frame ;
}
2015-02-05 00:00:52 -06:00
seek_audio_frame_found = 0 ; // used to detect which frames to throw away after a seek
seek_video_frame_found = 0 ; // used to detect which frames to throw away after a seek
2019-04-18 01:07:57 -05:00
} else {
2012-10-10 17:27:46 -05:00
// seek failed
is_seeking = false ;
seeking_pts = 0 ;
seeking_frame = 0 ;
2016-03-08 23:11:56 -06:00
// dislable seeking for this reader (since it failed)
// TODO: Find a safer way to do this... not sure how common it is for a seek to fail.
enable_seek = false ;
// Close and re-open file (basically seeking to frame 1)
Close ( ) ;
Open ( ) ;
2017-05-17 01:17:42 -05:00
// Update overrides (since closing and re-opening might update these)
info . has_audio = has_audio_override ;
info . has_video = has_video_override ;
2011-12-11 20:42:50 -06:00
}
2011-10-11 08:44:27 -05:00
}
}
2011-10-24 08:22:21 -05:00
// Get the PTS for the current video packet
2019-04-18 01:07:57 -05:00
int64_t FFmpegReader : : GetVideoPTS ( ) {
2017-09-28 16:03:01 -05:00
int64_t current_pts = 0 ;
2019-04-18 01:07:57 -05:00
if ( packet - > dts ! = AV_NOPTS_VALUE )
2012-07-02 00:51:10 -05:00
current_pts = packet - > dts ;
2011-10-24 08:22:21 -05:00
// Return adjusted PTS
return current_pts ;
}
// Update PTS Offset (if any)
2019-04-18 01:07:57 -05:00
void FFmpegReader : : UpdatePTSOffset ( bool is_video ) {
2011-10-24 08:22:21 -05:00
// Determine the offset between the PTS and Frame number (only for 1st frame)
2019-04-18 01:07:57 -05:00
if ( is_video ) {
2011-10-24 08:22:21 -05:00
// VIDEO PACKET
2011-12-11 20:42:50 -06:00
if ( video_pts_offset = = 99999 ) // Has the offset been set yet?
2017-05-17 01:17:42 -05:00
{
2020-09-11 00:55:50 -05:00
// Find the difference between PTS and frame number
video_pts_offset = 0 - GetVideoPTS ( ) ;
// Find the difference between PTS and frame number
// Also, determine if PTS is invalid (too far away from zero)
// We compare the PTS to the timebase value equal to 1 second (which means the PTS
// must be within the -1 second to +1 second of zero, otherwise we ignore it)
2020-09-12 17:05:33 -05:00
// TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272
// for ideas to improve this logic.
2020-09-11 00:55:50 -05:00
int64_t max_offset = info . video_timebase . Reciprocal ( ) . ToFloat ( ) ;
if ( video_pts_offset < - max_offset | | video_pts_offset > max_offset ) {
// Ignore PTS, it seems invalid
video_pts_offset = 0 ;
}
2017-05-17 01:17:42 -05:00
// debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::UpdatePTSOffset (Video) " , " video_pts_offset " , video_pts_offset , " is_video " , is_video ) ;
2017-05-17 01:17:42 -05:00
}
2019-04-18 01:07:57 -05:00
} else {
2011-10-24 08:22:21 -05:00
// AUDIO PACKET
2011-12-11 20:42:50 -06:00
if ( audio_pts_offset = = 99999 ) // Has the offset been set yet?
2017-05-17 01:17:42 -05:00
{
2020-09-11 00:55:50 -05:00
// Find the difference between PTS and frame number
// Also, determine if PTS is invalid (too far away from zero)
// We compare the PTS to the timebase value equal to 1 second (which means the PTS
// must be within the -1 second to +1 second of zero, otherwise we ignore it)
2020-09-12 17:05:33 -05:00
// TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272
// for ideas to improve this logic.
2020-09-11 00:55:50 -05:00
audio_pts_offset = 0 - packet - > pts ;
int64_t max_offset = info . audio_timebase . Reciprocal ( ) . ToFloat ( ) ;
if ( audio_pts_offset < - max_offset | | audio_pts_offset > max_offset ) {
// Ignore PTS, it seems invalid
audio_pts_offset = 0 ;
}
2017-05-17 01:17:42 -05:00
// debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::UpdatePTSOffset (Audio) " , " audio_pts_offset " , audio_pts_offset , " is_video " , is_video ) ;
2017-05-17 01:17:42 -05:00
}
2011-10-24 08:22:21 -05:00
}
2011-10-11 08:44:27 -05:00
}
2011-12-11 20:42:50 -06:00
// Convert PTS into Frame Number
2019-04-18 01:07:57 -05:00
int64_t FFmpegReader : : ConvertVideoPTStoFrame ( int64_t pts ) {
2011-12-11 20:42:50 -06:00
// Apply PTS offset
pts = pts + video_pts_offset ;
2017-09-28 16:03:01 -05:00
int64_t previous_video_frame = current_video_frame ;
2011-12-11 20:42:50 -06:00
2011-11-07 17:12:25 -06:00
// Get the video packet start time (in seconds)
2011-12-11 20:42:50 -06:00
double video_seconds = double ( pts ) * info . video_timebase . ToDouble ( ) ;
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
2017-09-28 16:03:01 -05:00
int64_t frame = round ( video_seconds * info . fps . ToDouble ( ) ) + 1 ;
2015-08-24 01:05:48 -05:00
// Keep track of the expected video frame #
if ( current_video_frame = = 0 )
current_video_frame = frame ;
else {
// Sometimes frames are duplicated due to identical (or similar) timestamps
if ( frame = = previous_video_frame ) {
// return -1 frame number
frame = - 1 ;
2019-04-18 01:07:57 -05:00
} else {
2015-08-24 01:05:48 -05:00
// Increment expected frame
current_video_frame + + ;
2019-04-18 01:07:57 -05:00
}
2015-08-24 01:05:48 -05:00
if ( current_video_frame < frame )
// has missing frames
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ConvertVideoPTStoFrame (detected missing frame) " , " calculated frame " , frame , " previous_video_frame " , previous_video_frame , " current_video_frame " , current_video_frame ) ;
2015-08-24 01:05:48 -05:00
// Sometimes frames are missing due to varying timestamps, or they were dropped. Determine
// if we are missing a video frame.
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2015-08-24 01:05:48 -05:00
while ( current_video_frame < frame ) {
2016-06-29 02:42:00 -05:00
if ( ! missing_video_frames . count ( current_video_frame ) ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::ConvertVideoPTStoFrame (tracking missing frame) " , " current_video_frame " , current_video_frame , " previous_video_frame " , previous_video_frame ) ;
2019-08-04 22:23:33 -04:00
missing_video_frames . insert ( std : : pair < int64_t , int64_t > ( current_video_frame , previous_video_frame ) ) ;
missing_video_frames_source . insert ( std : : pair < int64_t , int64_t > ( previous_video_frame , current_video_frame ) ) ;
2016-06-29 02:42:00 -05:00
}
2015-08-24 01:05:48 -05:00
// Mark this reader as containing missing frames
has_missing_frames = true ;
// Increment current frame
current_video_frame + + ;
}
}
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Return frame #
2011-11-07 17:12:25 -06:00
return frame ;
2011-10-11 08:44:27 -05:00
}
2011-10-24 08:22:21 -05:00
// Convert Frame Number into Video PTS
2019-04-18 01:07:57 -05:00
int64_t FFmpegReader : : ConvertFrameToVideoPTS ( int64_t frame_number ) {
2011-11-07 17:12:25 -06:00
// Get timestamp of this frame (in seconds)
2011-12-11 20:42:50 -06:00
double seconds = double ( frame_number ) / info . fps . ToDouble ( ) ;
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Calculate the # of video packets in this timestamp
2017-09-28 16:03:01 -05:00
int64_t video_pts = round ( seconds / info . video_timebase . ToDouble ( ) ) ;
2011-11-07 17:12:25 -06:00
2011-12-11 20:42:50 -06:00
// Apply PTS offset (opposite)
2011-11-07 17:12:25 -06:00
return video_pts - video_pts_offset ;
2011-10-11 08:44:27 -05:00
}
2011-12-11 20:42:50 -06:00
// Convert Frame Number into Video PTS
2019-04-18 01:07:57 -05:00
int64_t FFmpegReader : : ConvertFrameToAudioPTS ( int64_t frame_number ) {
2011-11-07 17:12:25 -06:00
// Get timestamp of this frame (in seconds)
2011-12-11 20:42:50 -06:00
double seconds = double ( frame_number ) / info . fps . ToDouble ( ) ;
2011-10-24 08:22:21 -05:00
2011-12-11 20:42:50 -06:00
// Calculate the # of audio packets in this timestamp
2017-09-28 16:03:01 -05:00
int64_t audio_pts = round ( seconds / info . audio_timebase . ToDouble ( ) ) ;
2011-10-24 08:22:21 -05:00
2011-12-11 20:42:50 -06:00
// Apply PTS offset (opposite)
2011-10-24 08:22:21 -05:00
return audio_pts - audio_pts_offset ;
}
// Calculate Starting video frame and sample # for an audio PTS
2019-04-18 01:07:57 -05:00
AudioLocation FFmpegReader : : GetAudioPTSLocation ( int64_t pts ) {
2011-12-11 20:42:50 -06:00
// Apply PTS offset
pts = pts + audio_pts_offset ;
2011-10-14 09:47:05 -05:00
2011-12-11 20:42:50 -06:00
// Get the audio packet start time (in seconds)
double audio_seconds = double ( pts ) * info . audio_timebase . ToDouble ( ) ;
// Divide by the video timebase, to get the video frame number (frame # is decimal at this point)
double frame = ( audio_seconds * info . fps . ToDouble ( ) ) + 1 ;
2011-10-24 08:22:21 -05:00
// Frame # as a whole number (no more decimals)
2017-09-28 16:03:01 -05:00
int64_t whole_frame = int64_t ( frame ) ;
2011-10-24 08:22:21 -05:00
// Remove the whole number, and only get the decimal of the frame
double sample_start_percentage = frame - double ( whole_frame ) ;
// Get Samples per frame
2015-03-08 21:42:53 -05:00
int samples_per_frame = Frame : : GetSamplesPerFrame ( whole_frame , info . fps , info . sample_rate , info . channels ) ;
2011-11-07 17:12:25 -06:00
// Calculate the sample # to start on
2011-10-26 14:34:14 -05:00
int sample_start = round ( double ( samples_per_frame ) * sample_start_percentage ) ;
2011-10-24 08:22:21 -05:00
2012-12-03 04:51:17 -06:00
// Protect against broken (i.e. negative) timestamps
if ( whole_frame < 1 )
whole_frame = 1 ;
if ( sample_start < 0 )
sample_start = 0 ;
2011-10-24 08:22:21 -05:00
// Prepare final audio packet location
2013-09-10 12:59:06 -05:00
AudioLocation location = { whole_frame , sample_start } ;
2011-10-24 08:22:21 -05:00
2012-11-20 10:15:39 -06:00
// Compare to previous audio packet (and fix small gaps due to varying PTS timestamps)
2017-01-23 23:53:50 -06:00
if ( previous_packet_location . frame ! = - 1 ) {
2019-04-18 01:07:57 -05:00
if ( location . is_near ( previous_packet_location , samples_per_frame , samples_per_frame ) ) {
2017-09-28 16:03:01 -05:00
int64_t orig_frame = location . frame ;
2017-01-23 23:53:50 -06:00
int orig_start = location . sample_start ;
2012-11-20 10:15:39 -06:00
2017-01-23 23:53:50 -06:00
// Update sample start, to prevent gaps in audio
location . sample_start = previous_packet_location . sample_start ;
location . frame = previous_packet_location . frame ;
2012-11-20 16:22:50 -06:00
2017-01-23 23:53:50 -06:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAudioPTSLocation (Audio Gap Detected) " , " Source Frame " , orig_frame , " Source Audio Sample " , orig_start , " Target Frame " , location . frame , " Target Audio Sample " , location . sample_start , " pts " , pts ) ;
2014-08-27 09:44:27 -05:00
2017-01-23 23:53:50 -06:00
} else {
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAudioPTSLocation (Audio Gap Ignored - too big) " , " Previous location frame " , previous_packet_location . frame , " Target Frame " , location . frame , " Target Audio Sample " , location . sample_start , " pts " , pts ) ;
2016-06-29 02:42:00 -05:00
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2017-09-28 16:03:01 -05:00
for ( int64_t audio_frame = previous_packet_location . frame ; audio_frame < location . frame ; audio_frame + + ) {
2017-01-23 23:53:50 -06:00
if ( ! missing_audio_frames . count ( audio_frame ) ) {
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::GetAudioPTSLocation (tracking missing frame) " , " missing_audio_frame " , audio_frame , " previous_audio_frame " , previous_packet_location . frame , " new location frame " , location . frame ) ;
2019-08-04 22:23:33 -04:00
missing_audio_frames . insert ( std : : pair < int64_t , int64_t > ( audio_frame , previous_packet_location . frame - 1 ) ) ;
2017-01-23 23:53:50 -06:00
}
2016-06-29 02:42:00 -05:00
}
}
2012-11-20 10:15:39 -06:00
}
// Set previous location
previous_packet_location = location ;
2011-10-24 08:22:21 -05:00
// Return the associated video frame and starting sample #
return location ;
2011-10-14 09:47:05 -05:00
}
2011-10-24 08:22:21 -05:00
// Create a new Frame (or return an existing one) and add it to the working queue.
2019-04-18 01:07:57 -05:00
std : : shared_ptr < Frame > FFmpegReader : : CreateFrame ( int64_t requested_frame ) {
2011-10-24 08:22:21 -05:00
// Check working cache
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > output = working_cache . GetFrame ( requested_frame ) ;
2019-10-01 23:27:36 -04:00
2019-04-18 01:07:57 -05:00
if ( ! output ) {
2019-10-01 23:27:36 -04:00
// Lock
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
// (re-)Check working cache
output = working_cache . GetFrame ( requested_frame ) ;
if ( output ) return output ;
2011-10-24 08:22:21 -05:00
// Create a new frame on the working cache
2017-08-20 17:37:39 -05:00
output = std : : make_shared < Frame > ( requested_frame , info . width , info . height , " #000000 " , Frame : : GetSamplesPerFrame ( requested_frame , info . fps , info . sample_rate , info . channels ) , info . channels ) ;
2015-06-01 00:20:14 -07:00
output - > SetPixelRatio ( info . pixel_ratio . num , info . pixel_ratio . den ) ; // update pixel ratio
output - > ChannelsLayout ( info . channel_layout ) ; // update audio channel layout from the parent reader
output - > SampleRate ( info . sample_rate ) ; // update the frame's sample rate of the parent reader
2012-07-02 00:51:10 -05:00
2016-08-31 02:02:54 -05:00
working_cache . Add ( output ) ;
2011-10-24 08:22:21 -05:00
2014-03-21 01:25:17 -05:00
// Set the largest processed frame (if this is larger)
if ( requested_frame > largest_frame_processed )
largest_frame_processed = requested_frame ;
2011-10-24 08:22:21 -05:00
}
2019-10-01 23:27:36 -04:00
// Return frame
2015-06-01 00:20:14 -07:00
return output ;
2011-10-24 08:22:21 -05:00
}
2014-09-13 16:35:11 -05:00
// Determine if frame is partial due to seek
2017-09-28 16:03:01 -05:00
bool FFmpegReader : : IsPartialFrame ( int64_t requested_frame ) {
2014-09-13 16:35:11 -05:00
// Sometimes a seek gets partial frames, and we need to remove them
bool seek_trash = false ;
2017-09-28 16:03:01 -05:00
int64_t max_seeked_frame = seek_audio_frame_found ; // determine max seeked frame
2019-04-18 01:07:57 -05:00
if ( seek_video_frame_found > max_seeked_frame ) {
2014-09-13 16:35:11 -05:00
max_seeked_frame = seek_video_frame_found ;
2019-04-18 01:07:57 -05:00
}
2014-09-26 09:35:38 -05:00
if ( ( info . has_audio & & seek_audio_frame_found & & max_seeked_frame > = requested_frame ) | |
2019-04-18 01:07:57 -05:00
( info . has_video & & seek_video_frame_found & & max_seeked_frame > = requested_frame ) ) {
seek_trash = true ;
}
2014-09-13 16:35:11 -05:00
return seek_trash ;
}
2019-03-14 09:26:56 -07:00
// Check if a frame is missing and attempt to replace its frame image (and
2019-04-18 01:07:57 -05:00
bool FFmpegReader : : CheckMissingFrame ( int64_t requested_frame ) {
2016-06-29 02:42:00 -05:00
// Lock
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2016-06-29 02:42:00 -05:00
// Increment check count for this frame (or init to 1)
2019-10-02 00:18:47 -04:00
+ + checked_frames [ requested_frame ] ;
2016-06-29 02:42:00 -05:00
2015-08-24 01:05:48 -05:00
// Debug output
2019-10-02 00:18:47 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckMissingFrame " , " requested_frame " , requested_frame , " has_missing_frames " , has_missing_frames , " missing_video_frames.size() " , missing_video_frames . size ( ) , " checked_count " , checked_frames [ requested_frame ] ) ;
2015-08-24 01:05:48 -05:00
// Missing frames (sometimes frame #'s are skipped due to invalid or missing timestamps)
2019-08-04 22:23:33 -04:00
std : : map < int64_t , int64_t > : : iterator itr ;
2015-08-24 01:05:48 -05:00
bool found_missing_frame = false ;
2019-02-27 23:25:54 -06:00
// Special MP3 Handling (ignore more than 1 video frame)
if ( info . has_audio and info . has_video ) {
AVCodecID aCodecId = AV_FIND_DECODER_CODEC_ID ( aStream ) ;
AVCodecID vCodecId = AV_FIND_DECODER_CODEC_ID ( pStream ) ;
// If MP3 with single video frame, handle this special case by copying the previously
// decoded image to the new frame. Otherwise, it will spend a huge amount of
// CPU time looking for missing images for all the audio-only frames.
2019-10-02 00:18:47 -04:00
if ( checked_frames [ requested_frame ] > 8 & & ! missing_video_frames . count ( requested_frame ) & &
2019-02-27 23:25:54 -06:00
! processing_audio_frames . count ( requested_frame ) & & processed_audio_frames . count ( requested_frame ) & &
2020-01-08 00:09:01 -06:00
last_frame & & last_video_frame & & last_video_frame - > has_image_data & & aCodecId = = AV_CODEC_ID_MP3 & & ( vCodecId = = AV_CODEC_ID_MJPEGB | | vCodecId = = AV_CODEC_ID_MJPEG ) ) {
2019-08-04 22:23:33 -04:00
missing_video_frames . insert ( std : : pair < int64_t , int64_t > ( requested_frame , last_video_frame - > number ) ) ;
missing_video_frames_source . insert ( std : : pair < int64_t , int64_t > ( last_video_frame - > number , requested_frame ) ) ;
2019-04-18 01:07:57 -05:00
missing_frames . Add ( last_video_frame ) ;
2019-02-27 23:25:54 -06:00
}
}
// Check if requested video frame is a missing
if ( missing_video_frames . count ( requested_frame ) ) {
int64_t missing_source_frame = missing_video_frames . find ( requested_frame ) - > second ;
2015-08-24 01:05:48 -05:00
2016-12-07 01:06:16 -06:00
// Increment missing source frame check count (or init to 1)
2019-10-02 00:18:47 -04:00
+ + checked_frames [ missing_source_frame ] ;
2015-08-24 01:05:48 -05:00
2016-12-07 01:06:16 -06:00
// Get the previous frame of this missing frame (if it's available in missing cache)
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > parent_frame = missing_frames . GetFrame ( missing_source_frame ) ;
2016-12-07 01:06:16 -06:00
if ( parent_frame = = NULL ) {
parent_frame = final_cache . GetFrame ( missing_source_frame ) ;
2016-06-29 02:42:00 -05:00
if ( parent_frame ! = NULL ) {
2016-12-07 01:06:16 -06:00
// Add missing final frame to missing cache
missing_frames . Add ( parent_frame ) ;
2016-06-29 02:42:00 -05:00
}
}
2016-12-07 01:06:16 -06:00
// Create blank missing frame
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > missing_frame = CreateFrame ( requested_frame ) ;
2016-12-07 01:06:16 -06:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckMissingFrame (Is Previous Video Frame Final) " , " requested_frame " , requested_frame , " missing_frame->number " , missing_frame - > number , " missing_source_frame " , missing_source_frame ) ;
2016-12-07 01:06:16 -06:00
// If previous frame found, copy image from previous to missing frame (else we'll just wait a bit and try again later)
if ( parent_frame ! = NULL ) {
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckMissingFrame (AddImage from Previous Video Frame) " , " requested_frame " , requested_frame , " missing_frame->number " , missing_frame - > number , " missing_source_frame " , missing_source_frame ) ;
2016-12-07 01:06:16 -06:00
// Add this frame to the processed map (since it's already done)
2017-08-20 17:37:39 -05:00
std : : shared_ptr < QImage > parent_image = parent_frame - > GetImage ( ) ;
if ( parent_image ) {
2020-08-20 16:50:12 -04:00
missing_frame - > AddImage ( std : : make_shared < QImage > ( * parent_image ) ) ;
2017-08-20 17:37:39 -05:00
processed_video_frames [ missing_frame - > number ] = missing_frame - > number ;
}
2016-06-29 02:42:00 -05:00
}
2019-02-27 23:25:54 -06:00
}
2016-12-07 01:06:16 -06:00
2019-02-27 23:25:54 -06:00
// Check if requested audio frame is a missing
if ( missing_audio_frames . count ( requested_frame ) ) {
// Create blank missing frame
std : : shared_ptr < Frame > missing_frame = CreateFrame ( requested_frame ) ;
// Get Samples per frame (for this frame number)
int samples_per_frame = Frame : : GetSamplesPerFrame ( missing_frame - > number , info . fps , info . sample_rate , info . channels ) ;
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckMissingFrame (Add Silence for Missing Audio Frame) " , " requested_frame " , requested_frame , " missing_frame->number " , missing_frame - > number , " samples_per_frame " , samples_per_frame ) ;
2019-02-27 23:25:54 -06:00
// Add this frame to the processed map (since it's already done)
missing_frame - > AddAudioSilence ( samples_per_frame ) ;
processed_audio_frames [ missing_frame - > number ] = missing_frame - > number ;
2015-08-24 01:05:48 -05:00
}
return found_missing_frame ;
}
2011-10-24 08:22:21 -05:00
// Check the working queue, and move finished frames to the finished queue
2019-04-18 01:07:57 -05:00
void FFmpegReader : : CheckWorkingFrames ( bool end_of_stream , int64_t requested_frame ) {
2011-10-24 08:22:21 -05:00
// Loop through all working queue frames
2019-01-31 09:42:26 -08:00
bool checked_count_tripped = false ;
int max_checked_count = 80 ;
2017-05-17 01:17:42 -05:00
2019-02-27 23:25:54 -06:00
// Check if requested frame is 'missing'
CheckMissingFrame ( requested_frame ) ;
2019-04-18 01:07:57 -05:00
while ( true ) {
2011-10-24 08:22:21 -05:00
// Get the front frame of working cache
2017-08-20 17:37:39 -05:00
std : : shared_ptr < Frame > f ( working_cache . GetSmallestFrame ( ) ) ;
2011-10-24 08:22:21 -05:00
2015-08-24 01:05:48 -05:00
// Was a frame found?
if ( ! f )
// No frames found
break ;
2018-04-14 18:01:27 -05:00
// Remove frames which are too old
2018-05-27 00:12:50 -05:00
if ( f & & f - > number < ( requested_frame - ( OPEN_MP_NUM_PROCESSORS * 2 ) ) ) {
2018-04-14 18:01:27 -05:00
working_cache . Remove ( f - > number ) ;
}
2016-06-29 02:42:00 -05:00
// Check if this frame is 'missing'
CheckMissingFrame ( f - > number ) ;
// Init # of times this frame has been checked so far
int checked_count = 0 ;
2017-08-20 17:37:39 -05:00
int checked_frames_size = 0 ;
2016-01-01 01:39:56 -06:00
2015-06-01 00:20:14 -07:00
bool is_video_ready = false ;
bool is_audio_ready = false ;
{ // limit scope of next few lines
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2015-06-01 00:20:14 -07:00
is_video_ready = processed_video_frames . count ( f - > number ) ;
is_audio_ready = processed_audio_frames . count ( f - > number ) ;
2016-06-29 02:42:00 -05:00
// Get check count for this frame
2017-08-20 17:37:39 -05:00
checked_frames_size = checked_frames . size ( ) ;
2019-01-31 09:42:26 -08:00
if ( ! checked_count_tripped | | f - > number > = requested_frame )
checked_count = checked_frames [ f - > number ] ;
else
// Force checked count over the limit
checked_count = max_checked_count ;
2015-06-01 00:20:14 -07:00
}
2015-03-16 15:29:37 -05:00
if ( previous_packet_location . frame = = f - > number & & ! end_of_stream )
is_audio_ready = false ; // don't finalize the last processed audio frame
2014-09-13 16:35:11 -05:00
bool is_seek_trash = IsPartialFrame ( f - > number ) ;
2014-08-27 09:44:27 -05:00
2015-02-05 00:00:52 -06:00
// Adjust for available streams
if ( ! info . has_video ) is_video_ready = true ;
if ( ! info . has_audio ) is_audio_ready = true ;
2016-01-01 01:39:56 -06:00
// Make final any frames that get stuck (for whatever reason)
2017-05-17 01:17:42 -05:00
if ( checked_count > = max_checked_count & & ( ! is_video_ready | | ! is_audio_ready ) ) {
2016-06-29 02:42:00 -05:00
// Debug output
2017-08-20 17:37:39 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames (exceeded checked_count) " , " requested_frame " , requested_frame , " frame_number " , f - > number , " is_video_ready " , is_video_ready , " is_audio_ready " , is_audio_ready , " checked_count " , checked_count , " checked_frames_size " , checked_frames_size ) ;
2016-06-29 02:42:00 -05:00
2019-01-31 09:42:26 -08:00
// Trigger checked count tripped mode (clear out all frames before requested frame)
checked_count_tripped = true ;
2017-05-17 01:17:42 -05:00
2016-07-30 16:57:48 -05:00
if ( info . has_video & & ! is_video_ready & & last_video_frame ) {
2016-01-05 01:59:50 -06:00
// Copy image from last frame
2020-08-20 16:50:12 -04:00
f - > AddImage ( std : : make_shared < QImage > ( * last_video_frame - > GetImage ( ) ) ) ;
2016-06-29 02:42:00 -05:00
is_video_ready = true ;
2016-01-05 01:59:50 -06:00
}
2016-01-01 01:39:56 -06:00
2016-01-05 01:59:50 -06:00
if ( info . has_audio & & ! is_audio_ready ) {
// Mark audio as processed, and indicate the frame has audio data
2016-06-29 02:42:00 -05:00
is_audio_ready = true ;
2016-01-05 01:59:50 -06:00
}
2016-01-01 01:39:56 -06:00
}
2014-08-27 09:44:27 -05:00
// Debug output
2017-08-20 17:37:39 -05:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames " , " requested_frame " , requested_frame , " frame_number " , f - > number , " is_video_ready " , is_video_ready , " is_audio_ready " , is_audio_ready , " checked_count " , checked_count , " checked_frames_size " , checked_frames_size ) ;
2012-07-06 16:52:13 -05:00
2011-10-24 08:22:21 -05:00
// Check if working frame is final
2019-04-18 01:07:57 -05:00
if ( ( ! end_of_stream & & is_video_ready & & is_audio_ready ) | | end_of_stream | | is_seek_trash ) {
2014-08-27 09:44:27 -05:00
// Debug output
2018-01-06 16:44:54 -06:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames (mark frame as final) " , " requested_frame " , requested_frame , " f->number " , f - > number , " is_seek_trash " , is_seek_trash , " Working Cache Count " , working_cache . Count ( ) , " Final Cache Count " , final_cache . Count ( ) , " end_of_stream " , end_of_stream ) ;
2014-08-27 09:44:27 -05:00
2019-04-18 01:07:57 -05:00
if ( ! is_seek_trash ) {
2018-01-06 16:44:54 -06:00
// Add missing image (if needed - sometimes end_of_stream causes frames with only audio)
if ( info . has_video & & ! is_video_ready & & last_video_frame )
// Copy image from last frame
2020-08-20 16:50:12 -04:00
f - > AddImage ( std : : make_shared < QImage > ( * last_video_frame - > GetImage ( ) ) ) ;
2018-01-06 16:44:54 -06:00
2016-01-01 01:39:56 -06:00
// Reset counter since last 'final' frame
num_checks_since_final = 0 ;
2012-10-22 17:05:34 -05:00
// Move frame to final cache
2016-08-31 02:02:54 -05:00
final_cache . Add ( f ) ;
2012-07-06 15:17:57 -05:00
2016-06-29 02:42:00 -05:00
// Add to missing cache (if another frame depends on it)
{
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
2016-12-07 01:06:16 -06:00
if ( missing_video_frames_source . count ( f - > number ) ) {
2016-06-29 02:42:00 -05:00
// Debug output
2019-07-03 14:14:02 -04:00
ZmqLogger : : Instance ( ) - > AppendDebugMethod ( " FFmpegReader::CheckWorkingFrames (add frame to missing cache) " , " f->number " , f - > number , " is_seek_trash " , is_seek_trash , " Missing Cache Count " , missing_frames . Count ( ) , " Working Cache Count " , working_cache . Count ( ) , " Final Cache Count " , final_cache . Count ( ) ) ;
2016-08-31 02:02:54 -05:00
missing_frames . Add ( f ) ;
2016-06-29 02:42:00 -05:00
}
2017-05-22 04:43:21 -05:00
2019-01-31 09:42:26 -08:00
// Remove from 'checked' count
checked_frames . erase ( f - > number ) ;
2016-06-29 02:42:00 -05:00
}
2012-10-22 17:05:34 -05:00
// Remove frame from working cache
working_cache . Remove ( f - > number ) ;
// Update last frame processed
last_frame = f - > number ;
2014-04-05 10:19:20 -05:00
2012-10-22 17:05:34 -05:00
} else {
// Seek trash, so delete the frame from the working cache, and never add it to the final cache.
working_cache . Remove ( f - > number ) ;
}
2019-04-18 01:07:57 -05:00
} else {
2011-10-24 08:22:21 -05:00
// Stop looping
break ;
2019-04-18 01:07:57 -05:00
}
2011-10-24 08:22:21 -05:00
}
}
2011-12-15 16:11:48 -06:00
// Check for the correct frames per second (FPS) value by scanning the 1st few seconds of video packets.
2019-04-18 01:07:57 -05:00
void FFmpegReader : : CheckFPS ( ) {
2011-12-15 16:11:48 -06:00
check_fps = true ;
2018-09-11 00:40:31 -05:00
2018-08-11 18:22:18 -05:00
2011-12-15 16:11:48 -06:00
int first_second_counter = 0 ;
int second_second_counter = 0 ;
int third_second_counter = 0 ;
int forth_second_counter = 0 ;
int fifth_second_counter = 0 ;
2018-07-25 02:24:01 -05:00
int frames_detected = 0 ;
2018-09-17 00:27:30 -05:00
int64_t pts = 0 ;
2012-07-03 02:42:47 -05:00
2011-12-15 16:11:48 -06:00
// Loop through the stream
2019-04-18 01:07:57 -05:00
while ( true ) {
2011-12-15 16:11:48 -06:00
// Get the next packet (if any)
if ( GetNextPacket ( ) < 0 )
// Break loop when no more packets found
break ;
// Video packet
2019-04-18 01:07:57 -05:00
if ( packet - > stream_index = = videoStream ) {
2011-12-15 16:11:48 -06:00
// Check if the AVFrame is finished and set it
2019-04-18 01:07:57 -05:00
if ( GetAVFrame ( ) ) {
2011-12-15 16:11:48 -06:00
// Update PTS / Frame Offset (if any)
UpdatePTSOffset ( true ) ;
// Get PTS of this packet
2018-09-17 00:27:30 -05:00
pts = GetVideoPTS ( ) ;
2011-12-15 16:11:48 -06:00
2012-07-03 16:58:07 -05:00
// Remove pFrame
RemoveAVFrame ( pFrame ) ;
2011-12-15 16:11:48 -06:00
// Apply PTS offset
pts + = video_pts_offset ;
// Get the video packet start time (in seconds)
double video_seconds = double ( pts ) * info . video_timebase . ToDouble ( ) ;
// Increment the correct counter
if ( video_seconds < = 1.0 )
first_second_counter + + ;
else if ( video_seconds > 1.0 & & video_seconds < = 2.0 )
second_second_counter + + ;
else if ( video_seconds > 2.0 & & video_seconds < = 3.0 )
third_second_counter + + ;
else if ( video_seconds > 3.0 & & video_seconds < = 4.0 )
forth_second_counter + + ;
else if ( video_seconds > 4.0 & & video_seconds < = 5.0 )
fifth_second_counter + + ;
2018-07-25 02:24:01 -05:00
// Increment counters
frames_detected + + ;
2011-12-15 16:11:48 -06:00
}
}
2012-07-03 02:42:47 -05:00
}
2011-12-15 16:11:48 -06:00
2012-02-26 16:40:53 -06:00
// Double check that all counters have greater than zero (or give up)
2018-07-25 02:24:01 -05:00
if ( second_second_counter ! = 0 & & third_second_counter ! = 0 & & forth_second_counter ! = 0 & & fifth_second_counter ! = 0 ) {
2018-09-17 00:27:30 -05:00
// Calculate average FPS (average of first few seconds)
2018-07-25 02:24:01 -05:00
int sum_fps = second_second_counter + third_second_counter + forth_second_counter + fifth_second_counter ;
int avg_fps = round ( sum_fps / 4.0f ) ;
2012-02-26 16:45:50 -06:00
2018-07-25 02:24:01 -05:00
// Update FPS
info . fps = Fraction ( avg_fps , 1 ) ;
// Update Duration and Length
info . video_length = frames_detected ;
2018-09-17 00:27:30 -05:00
info . duration = frames_detected / ( sum_fps / 4.0f ) ;
// Update video bit rate
info . video_bit_rate = info . file_size / info . duration ;
} else if ( second_second_counter ! = 0 & & third_second_counter ! = 0 ) {
// Calculate average FPS (only on second 2)
int sum_fps = second_second_counter ;
// Update FPS
info . fps = Fraction ( sum_fps , 1 ) ;
// Update Duration and Length
info . video_length = frames_detected ;
info . duration = frames_detected / float ( sum_fps ) ;
2018-07-25 02:24:01 -05:00
// Update video bit rate
info . video_bit_rate = info . file_size / info . duration ;
2018-08-02 00:40:44 -05:00
} else {
// Too short to determine framerate, just default FPS
// Set a few important default video settings (so audio can be divided into frames)
info . fps . num = 30 ;
info . fps . den = 1 ;
// Calculate number of frames
info . video_length = frames_detected ;
2018-09-17 00:27:30 -05:00
info . duration = frames_detected / info . fps . ToFloat ( ) ;
2012-02-26 16:45:50 -06:00
}
2011-12-15 16:11:48 -06:00
}
2019-03-14 09:26:56 -07:00
// Remove AVFrame from cache (and deallocate its memory)
2019-04-18 01:07:57 -05:00
void FFmpegReader : : RemoveAVFrame ( AVFrame * remove_frame ) {
2019-01-31 09:42:26 -08:00
// Remove pFrame (if exists)
2019-04-18 01:07:57 -05:00
if ( remove_frame ) {
2019-01-31 09:42:26 -08:00
// Free memory
2019-04-18 01:07:57 -05:00
# pragma omp critical (packet_cache)
2019-01-19 02:18:52 -06:00
{
av_freep ( & remove_frame - > data [ 0 ] ) ;
# ifndef WIN32
AV_FREE_FRAME ( & remove_frame ) ;
# endif
}
2018-03-21 02:10:46 -05:00
}
2012-07-03 16:58:07 -05:00
}
2019-03-14 09:26:56 -07:00
// Remove AVPacket from cache (and deallocate its memory)
2019-04-18 01:07:57 -05:00
void FFmpegReader : : RemoveAVPacket ( AVPacket * remove_packet ) {
2016-11-14 22:37:44 -06:00
// deallocate memory for packet
2019-01-31 09:42:26 -08:00
AV_FREE_PACKET ( remove_packet ) ;
2012-07-03 16:58:07 -05:00
2016-11-14 22:37:44 -06:00
// Delete the object
delete remove_packet ;
2012-07-03 16:58:07 -05:00
}
2012-07-06 02:34:18 -05:00
/// Get the smallest video frame that is still being processed
2019-04-18 01:07:57 -05:00
int64_t FFmpegReader : : GetSmallestVideoFrame ( ) {
2012-07-06 02:34:18 -05:00
// Loop through frame numbers
2019-08-04 22:23:33 -04:00
std : : map < int64_t , int64_t > : : iterator itr ;
2017-09-28 16:03:01 -05:00
int64_t smallest_frame = - 1 ;
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
for ( itr = processing_video_frames . begin ( ) ; itr ! = processing_video_frames . end ( ) ; + + itr ) {
2012-07-06 02:34:18 -05:00
if ( itr - > first < smallest_frame | | smallest_frame = = - 1 )
smallest_frame = itr - > first ;
}
// Return frame number
return smallest_frame ;
}
/// Get the smallest audio frame that is still being processed
2019-04-18 01:07:57 -05:00
int64_t FFmpegReader : : GetSmallestAudioFrame ( ) {
2012-07-06 02:34:18 -05:00
// Loop through frame numbers
2019-08-04 22:23:33 -04:00
std : : map < int64_t , int64_t > : : iterator itr ;
2017-09-28 16:03:01 -05:00
int64_t smallest_frame = - 1 ;
2019-04-18 01:07:57 -05:00
const GenericScopedLock < CriticalSection > lock ( processingCriticalSection ) ;
for ( itr = processing_audio_frames . begin ( ) ; itr ! = processing_audio_frames . end ( ) ; + + itr ) {
2012-07-06 02:34:18 -05:00
if ( itr - > first < smallest_frame | | smallest_frame = = - 1 )
smallest_frame = itr - > first ;
}
// Return frame number
return smallest_frame ;
}
2013-12-07 21:09:55 -06:00
// Generate JSON string of this object
2019-12-27 08:51:51 -05:00
std : : string FFmpegReader : : Json ( ) const {
2013-12-07 21:09:55 -06:00
// Return formatted string
return JsonValue ( ) . toStyledString ( ) ;
}
2019-12-27 08:51:51 -05:00
// Generate Json::Value for this object
Json : : Value FFmpegReader : : JsonValue ( ) const {
2012-07-03 16:58:07 -05:00
2013-12-07 16:52:09 -06:00
// Create root json object
Json : : Value root = ReaderBase : : JsonValue ( ) ; // get parent properties
2013-12-07 21:09:55 -06:00
root [ " type " ] = " FFmpegReader " ;
2013-12-07 16:52:09 -06:00
root [ " path " ] = path ;
// return JsonValue
return root ;
}
2013-12-07 21:09:55 -06:00
// Load JSON string into this object
2019-12-27 08:51:51 -05:00
void FFmpegReader : : SetJson ( const std : : string value ) {
2013-12-07 21:09:55 -06:00
// Parse JSON string into JSON objects
2019-04-18 01:07:57 -05:00
try {
2019-12-27 08:51:51 -05:00
const Json : : Value root = openshot : : stringToJson ( value ) ;
2013-12-07 21:09:55 -06:00
// Set all values that match
SetJsonValue ( root ) ;
}
2019-07-03 12:58:02 -04:00
catch ( const std : : exception & e ) {
2013-12-07 21:09:55 -06:00
// Error parsing JSON (or missing keys)
2019-08-27 15:47:39 -04:00
throw InvalidJSON ( " JSON is invalid (missing keys or invalid data types) " ) ;
2013-12-07 21:09:55 -06:00
}
}
2019-12-27 08:51:51 -05:00
// Load Json::Value into this object
void FFmpegReader : : SetJsonValue ( const Json : : Value root ) {
2013-12-07 16:52:09 -06:00
// Set parent data
2013-12-07 21:09:55 -06:00
ReaderBase : : SetJsonValue ( root ) ;
2013-12-07 16:52:09 -06:00
// Set data from Json (if key is found)
2014-01-08 01:43:58 -06:00
if ( ! root [ " path " ] . isNull ( ) )
2013-12-07 16:52:09 -06:00
path = root [ " path " ] . asString ( ) ;
2013-12-07 21:09:55 -06:00
// Re-Open path, and re-init everything (if needed)
2019-04-18 01:07:57 -05:00
if ( is_open ) {
2013-12-07 21:09:55 -06:00
Close ( ) ;
Open ( ) ;
}
2013-12-07 16:52:09 -06:00
}