mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 761018 - GStreamer video buffer handling optimization; r=cdouble
This commit is contained in:
parent
e78bb4b075
commit
f1f8c03a5d
@ -142,6 +142,7 @@ VideoData::~VideoData()
|
||||
|
||||
VideoData* VideoData::Create(VideoInfo& aInfo,
|
||||
ImageContainer* aContainer,
|
||||
Image* aImage,
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
@ -150,7 +151,7 @@ VideoData* VideoData::Create(VideoInfo& aInfo,
|
||||
int64_t aTimecode,
|
||||
nsIntRect aPicture)
|
||||
{
|
||||
if (!aContainer) {
|
||||
if (!aImage && !aContainer) {
|
||||
// Create a dummy VideoData with no image. This gives us something to
|
||||
// send to media streams if necessary.
|
||||
nsAutoPtr<VideoData> v(new VideoData(aOffset,
|
||||
@ -204,14 +205,19 @@ VideoData* VideoData::Create(VideoInfo& aInfo,
|
||||
const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
|
||||
const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
|
||||
|
||||
// Currently our decoder only knows how to output to PLANAR_YCBCR
|
||||
// format.
|
||||
ImageFormat format[2] = {PLANAR_YCBCR, GRALLOC_PLANAR_YCBCR};
|
||||
if (IsYV12Format(Y, Cb, Cr)) {
|
||||
v->mImage = aContainer->CreateImage(format, 2);
|
||||
if (!aImage) {
|
||||
// Currently our decoder only knows how to output to PLANAR_YCBCR
|
||||
// format.
|
||||
ImageFormat format[2] = {PLANAR_YCBCR, GRALLOC_PLANAR_YCBCR};
|
||||
if (IsYV12Format(Y, Cb, Cr)) {
|
||||
v->mImage = aContainer->CreateImage(format, 2);
|
||||
} else {
|
||||
v->mImage = aContainer->CreateImage(format, 1);
|
||||
}
|
||||
} else {
|
||||
v->mImage = aContainer->CreateImage(format, 1);
|
||||
v->mImage = aImage;
|
||||
}
|
||||
|
||||
if (!v->mImage) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -237,10 +243,43 @@ VideoData* VideoData::Create(VideoInfo& aInfo,
|
||||
data.mStereoMode = aInfo.mStereoMode;
|
||||
|
||||
videoImage->SetDelayedConversion(true);
|
||||
videoImage->SetData(data);
|
||||
if (!aImage) {
|
||||
videoImage->SetData(data);
|
||||
} else {
|
||||
videoImage->SetDataNoCopy(data);
|
||||
}
|
||||
|
||||
return v.forget();
|
||||
}
|
||||
|
||||
VideoData* VideoData::Create(VideoInfo& aInfo,
|
||||
ImageContainer* aContainer,
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
const YCbCrBuffer& aBuffer,
|
||||
bool aKeyframe,
|
||||
int64_t aTimecode,
|
||||
nsIntRect aPicture)
|
||||
{
|
||||
return Create(aInfo, aContainer, nullptr, aOffset, aTime, aEndTime, aBuffer,
|
||||
aKeyframe, aTimecode, aPicture);
|
||||
}
|
||||
|
||||
VideoData* VideoData::Create(VideoInfo& aInfo,
|
||||
Image* aImage,
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
const YCbCrBuffer& aBuffer,
|
||||
bool aKeyframe,
|
||||
int64_t aTimecode,
|
||||
nsIntRect aPicture)
|
||||
{
|
||||
return Create(aInfo, nullptr, aImage, aOffset, aTime, aEndTime, aBuffer,
|
||||
aKeyframe, aTimecode, aPicture);
|
||||
}
|
||||
|
||||
VideoData* VideoData::CreateFromImage(VideoInfo& aInfo,
|
||||
ImageContainer* aContainer,
|
||||
int64_t aOffset,
|
||||
@ -267,7 +306,7 @@ VideoData* VideoData::Create(VideoInfo& aInfo,
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
mozilla::layers::GraphicBufferLocked *aBuffer,
|
||||
mozilla::layers::GraphicBufferLocked* aBuffer,
|
||||
bool aKeyframe,
|
||||
int64_t aTimecode,
|
||||
nsIntRect aPicture)
|
||||
|
@ -132,14 +132,39 @@ public:
|
||||
Plane mPlanes[3];
|
||||
};
|
||||
|
||||
// Constructs a VideoData object. Makes a copy of YCbCr data in aBuffer.
|
||||
// aTimecode is a codec specific number representing the timestamp of
|
||||
// the frame of video data. Returns nullptr if an error occurs. This may
|
||||
// indicate that memory couldn't be allocated to create the VideoData
|
||||
// object, or it may indicate some problem with the input data (e.g.
|
||||
// negative stride).
|
||||
// Constructs a VideoData object. If aImage is NULL, creates a new Image
|
||||
// holding a copy of the YCbCr data passed in aBuffer. If aImage is not NULL,
|
||||
// it's stored as the underlying video image and aBuffer is assumed to point
|
||||
// to memory within aImage so no copy is made. aTimecode is a codec specific
|
||||
// number representing the timestamp of the frame of video data. Returns
|
||||
// nsnull if an error occurs. This may indicate that memory couldn't be
|
||||
// allocated to create the VideoData object, or it may indicate some problem
|
||||
// with the input data (e.g. negative stride).
|
||||
static VideoData* Create(VideoInfo& aInfo,
|
||||
ImageContainer* aContainer,
|
||||
Image* aImage,
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
const YCbCrBuffer &aBuffer,
|
||||
bool aKeyframe,
|
||||
int64_t aTimecode,
|
||||
nsIntRect aPicture);
|
||||
|
||||
// Variant that always makes a copy of aBuffer
|
||||
static VideoData* Create(VideoInfo& aInfo,
|
||||
ImageContainer* aContainer,
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
const YCbCrBuffer &aBuffer,
|
||||
bool aKeyframe,
|
||||
int64_t aTimecode,
|
||||
nsIntRect aPicture);
|
||||
|
||||
// Variant to create a VideoData instance given an existing aImage
|
||||
static VideoData* Create(VideoInfo& aInfo,
|
||||
Image* aImage,
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
@ -153,7 +178,7 @@ public:
|
||||
int64_t aOffset,
|
||||
int64_t aTime,
|
||||
int64_t aEndTime,
|
||||
layers::GraphicBufferLocked *aBuffer,
|
||||
layers::GraphicBufferLocked* aBuffer,
|
||||
bool aKeyframe,
|
||||
int64_t aTimecode,
|
||||
nsIntRect aPicture);
|
||||
|
@ -27,12 +27,19 @@ extern PRLogModuleInfo* gMediaDecoderLog;
|
||||
#define LOG(type, msg)
|
||||
#endif
|
||||
|
||||
extern bool
|
||||
IsYV12Format(const VideoData::YCbCrBuffer::Plane& aYPlane,
|
||||
const VideoData::YCbCrBuffer::Plane& aCbPlane,
|
||||
const VideoData::YCbCrBuffer::Plane& aCrPlane);
|
||||
|
||||
static const int MAX_CHANNELS = 4;
|
||||
// Let the demuxer work in pull mode for short files
|
||||
static const int SHORT_FILE_SIZE = 1024 * 1024;
|
||||
// The default resource->Read() size when working in push mode
|
||||
static const int DEFAULT_SOURCE_READ_SIZE = 50 * 1024;
|
||||
|
||||
G_DEFINE_BOXED_TYPE(BufferData, buffer_data, BufferData::Copy, BufferData::Free);
|
||||
|
||||
typedef enum {
|
||||
GST_PLAY_FLAG_VIDEO = (1 << 0),
|
||||
GST_PLAY_FLAG_AUDIO = (1 << 1),
|
||||
@ -49,13 +56,13 @@ typedef enum {
|
||||
|
||||
GStreamerReader::GStreamerReader(AbstractMediaDecoder* aDecoder)
|
||||
: MediaDecoderReader(aDecoder),
|
||||
mPlayBin(NULL),
|
||||
mBus(NULL),
|
||||
mSource(NULL),
|
||||
mVideoSink(NULL),
|
||||
mVideoAppSink(NULL),
|
||||
mAudioSink(NULL),
|
||||
mAudioAppSink(NULL),
|
||||
mPlayBin(nullptr),
|
||||
mBus(nullptr),
|
||||
mSource(nullptr),
|
||||
mVideoSink(nullptr),
|
||||
mVideoAppSink(nullptr),
|
||||
mAudioSink(nullptr),
|
||||
mAudioAppSink(nullptr),
|
||||
mFormat(GST_VIDEO_FORMAT_UNKNOWN),
|
||||
mVideoSinkBufferCount(0),
|
||||
mAudioSinkBufferCount(0),
|
||||
@ -75,7 +82,7 @@ GStreamerReader::GStreamerReader(AbstractMediaDecoder* aDecoder)
|
||||
mSinkCallbacks.eos = GStreamerReader::EosCb;
|
||||
mSinkCallbacks.new_preroll = GStreamerReader::NewPrerollCb;
|
||||
mSinkCallbacks.new_buffer = GStreamerReader::NewBufferCb;
|
||||
mSinkCallbacks.new_buffer_list = NULL;
|
||||
mSinkCallbacks.new_buffer_list = nullptr;
|
||||
|
||||
gst_segment_init(&mVideoSegment, GST_FORMAT_UNDEFINED);
|
||||
gst_segment_init(&mAudioSegment, GST_FORMAT_UNDEFINED);
|
||||
@ -92,97 +99,101 @@ GStreamerReader::~GStreamerReader()
|
||||
gst_object_unref(mSource);
|
||||
gst_element_set_state(mPlayBin, GST_STATE_NULL);
|
||||
gst_object_unref(mPlayBin);
|
||||
mPlayBin = NULL;
|
||||
mVideoSink = NULL;
|
||||
mVideoAppSink = NULL;
|
||||
mAudioSink = NULL;
|
||||
mAudioAppSink = NULL;
|
||||
mPlayBin = nullptr;
|
||||
mVideoSink = nullptr;
|
||||
mVideoAppSink = nullptr;
|
||||
mAudioSink = nullptr;
|
||||
mAudioAppSink = nullptr;
|
||||
gst_object_unref(mBus);
|
||||
mBus = NULL;
|
||||
mBus = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
nsresult GStreamerReader::Init(MediaDecoderReader* aCloneDonor)
|
||||
{
|
||||
GError *error = NULL;
|
||||
GError* error = nullptr;
|
||||
if (!gst_init_check(0, 0, &error)) {
|
||||
LOG(PR_LOG_ERROR, ("gst initialization failed: %s", error->message));
|
||||
g_error_free(error);
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
mPlayBin = gst_element_factory_make("playbin2", NULL);
|
||||
if (mPlayBin == NULL) {
|
||||
mPlayBin = gst_element_factory_make("playbin2", nullptr);
|
||||
if (!mPlayBin) {
|
||||
LOG(PR_LOG_ERROR, ("couldn't create playbin2"));
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
g_object_set(mPlayBin, "buffer-size", 0, NULL);
|
||||
g_object_set(mPlayBin, "buffer-size", 0, nullptr);
|
||||
mBus = gst_pipeline_get_bus(GST_PIPELINE(mPlayBin));
|
||||
|
||||
mVideoSink = gst_parse_bin_from_description("capsfilter name=filter ! "
|
||||
"appsink name=videosink sync=true max-buffers=1 "
|
||||
"caps=video/x-raw-yuv,format=(fourcc)I420"
|
||||
, TRUE, NULL);
|
||||
, TRUE, nullptr);
|
||||
mVideoAppSink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(mVideoSink),
|
||||
"videosink"));
|
||||
gst_app_sink_set_callbacks(mVideoAppSink, &mSinkCallbacks,
|
||||
(gpointer) this, NULL);
|
||||
GstPad *sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
|
||||
(gpointer) this, nullptr);
|
||||
GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
|
||||
gst_pad_add_event_probe(sinkpad,
|
||||
G_CALLBACK(&GStreamerReader::EventProbeCb), this);
|
||||
gst_object_unref(sinkpad);
|
||||
#if GST_VERSION_MICRO >= 36
|
||||
gst_pad_set_bufferalloc_function(sinkpad, GStreamerReader::AllocateVideoBufferCb);
|
||||
#endif
|
||||
gst_pad_set_element_private(sinkpad, this);
|
||||
|
||||
mAudioSink = gst_parse_bin_from_description("capsfilter name=filter ! "
|
||||
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
|
||||
"appsink name=audiosink sync=true caps=audio/x-raw-float,"
|
||||
#ifdef IS_LITTLE_ENDIAN
|
||||
"channels={1,2},width=32,endianness=1234", TRUE, NULL);
|
||||
"channels={1,2},width=32,endianness=1234", TRUE, nullptr);
|
||||
#else
|
||||
"channels={1,2},width=32,endianness=4321", TRUE, NULL);
|
||||
"channels={1,2},width=32,endianness=4321", TRUE, nullptr);
|
||||
#endif
|
||||
#else
|
||||
"appsink name=audiosink sync=true caps=audio/x-raw-int,"
|
||||
#ifdef IS_LITTLE_ENDIAN
|
||||
"channels={1,2},width=16,endianness=1234", TRUE, NULL);
|
||||
"channels={1,2},width=16,endianness=1234", TRUE, nullptr);
|
||||
#else
|
||||
"channels={1,2},width=16,endianness=4321", TRUE, NULL);
|
||||
"channels={1,2},width=16,endianness=4321", TRUE, nullptr);
|
||||
#endif
|
||||
#endif
|
||||
mAudioAppSink = GST_APP_SINK(gst_bin_get_by_name(GST_BIN(mAudioSink),
|
||||
"audiosink"));
|
||||
"audiosink"));
|
||||
gst_app_sink_set_callbacks(mAudioAppSink, &mSinkCallbacks,
|
||||
(gpointer) this, NULL);
|
||||
(gpointer) this, nullptr);
|
||||
sinkpad = gst_element_get_pad(GST_ELEMENT(mAudioAppSink), "sink");
|
||||
gst_pad_add_event_probe(sinkpad,
|
||||
G_CALLBACK(&GStreamerReader::EventProbeCb), this);
|
||||
G_CALLBACK(&GStreamerReader::EventProbeCb), this);
|
||||
gst_object_unref(sinkpad);
|
||||
|
||||
g_object_set(mPlayBin, "uri", "appsrc://",
|
||||
"video-sink", mVideoSink,
|
||||
"audio-sink", mAudioSink,
|
||||
NULL);
|
||||
"video-sink", mVideoSink,
|
||||
"audio-sink", mAudioSink,
|
||||
nullptr);
|
||||
|
||||
g_signal_connect(G_OBJECT(mPlayBin), "notify::source",
|
||||
G_CALLBACK(GStreamerReader::PlayBinSourceSetupCb), this);
|
||||
g_object_connect(mPlayBin, "signal::source-setup",
|
||||
GStreamerReader::PlayBinSourceSetupCb, this, nullptr);
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
void GStreamerReader::PlayBinSourceSetupCb(GstElement *aPlayBin,
|
||||
GParamSpec *pspec,
|
||||
gpointer aUserData)
|
||||
void GStreamerReader::PlayBinSourceSetupCb(GstElement* aPlayBin,
|
||||
GParamSpec* pspec,
|
||||
gpointer aUserData)
|
||||
{
|
||||
GstElement *source;
|
||||
GStreamerReader *reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
|
||||
g_object_get(aPlayBin, "source", &source, NULL);
|
||||
reader->PlayBinSourceSetup(GST_APP_SRC(source));
|
||||
}
|
||||
|
||||
void GStreamerReader::PlayBinSourceSetup(GstAppSrc *aSource)
|
||||
void GStreamerReader::PlayBinSourceSetup(GstAppSrc* aSource)
|
||||
{
|
||||
mSource = GST_APP_SRC(aSource);
|
||||
gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, NULL);
|
||||
gst_app_src_set_callbacks(mSource, &mSrcCallbacks, (gpointer) this, nullptr);
|
||||
MediaResource* resource = mDecoder->GetResource();
|
||||
|
||||
/* do a short read to trigger a network request so that GetLength() below
|
||||
@ -194,25 +205,25 @@ void GStreamerReader::PlayBinSourceSetup(GstAppSrc *aSource)
|
||||
resource->Seek(SEEK_SET, 0);
|
||||
|
||||
/* now we should have a length */
|
||||
int64_t len = resource->GetLength();
|
||||
gst_app_src_set_size(mSource, len);
|
||||
int64_t resourceLength = resource->GetLength();
|
||||
gst_app_src_set_size(mSource, resourceLength);
|
||||
if (resource->IsDataCachedToEndOfResource(0) ||
|
||||
(len != -1 && len <= SHORT_FILE_SIZE)) {
|
||||
(resourceLength != -1 && resourceLength <= SHORT_FILE_SIZE)) {
|
||||
/* let the demuxer work in pull mode for local files (or very short files)
|
||||
* so that we get optimal seeking accuracy/performance
|
||||
*/
|
||||
LOG(PR_LOG_DEBUG, ("configuring random access, len %lld", len));
|
||||
LOG(PR_LOG_DEBUG, ("configuring random access, len %lld", resourceLength));
|
||||
gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_RANDOM_ACCESS);
|
||||
} else {
|
||||
/* make the demuxer work in push mode so that seeking is kept to a minimum
|
||||
*/
|
||||
LOG(PR_LOG_DEBUG, ("configuring push mode, len %lld", len));
|
||||
LOG(PR_LOG_DEBUG, ("configuring push mode, len %lld", resourceLength));
|
||||
gst_app_src_set_stream_type(mSource, GST_APP_STREAM_TYPE_SEEKABLE);
|
||||
}
|
||||
}
|
||||
|
||||
nsresult GStreamerReader::ReadMetadata(VideoInfo* aInfo,
|
||||
MetadataTags** aTags)
|
||||
MetadataTags** aTags)
|
||||
{
|
||||
NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
|
||||
nsresult ret = NS_OK;
|
||||
@ -224,24 +235,24 @@ nsresult GStreamerReader::ReadMetadata(VideoInfo* aInfo,
|
||||
guint flags[3] = {GST_PLAY_FLAG_VIDEO|GST_PLAY_FLAG_AUDIO,
|
||||
static_cast<guint>(~GST_PLAY_FLAG_AUDIO), static_cast<guint>(~GST_PLAY_FLAG_VIDEO)};
|
||||
guint default_flags, current_flags;
|
||||
g_object_get(mPlayBin, "flags", &default_flags, NULL);
|
||||
g_object_get(mPlayBin, "flags", &default_flags, nullptr);
|
||||
|
||||
GstMessage *message = NULL;
|
||||
GstMessage* message = nullptr;
|
||||
for (unsigned int i = 0; i < G_N_ELEMENTS(flags); i++) {
|
||||
current_flags = default_flags & flags[i];
|
||||
g_object_set(G_OBJECT(mPlayBin), "flags", current_flags, NULL);
|
||||
g_object_set(G_OBJECT(mPlayBin), "flags", current_flags, nullptr);
|
||||
|
||||
/* reset filter caps to ANY */
|
||||
GstCaps *caps = gst_caps_new_any();
|
||||
GstElement *filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
|
||||
g_object_set(filter, "caps", caps, NULL);
|
||||
GstCaps* caps = gst_caps_new_any();
|
||||
GstElement* filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
|
||||
g_object_set(filter, "caps", caps, nullptr);
|
||||
gst_object_unref(filter);
|
||||
|
||||
filter = gst_bin_get_by_name(GST_BIN(mVideoSink), "filter");
|
||||
g_object_set(filter, "caps", caps, NULL);
|
||||
g_object_set(filter, "caps", caps, nullptr);
|
||||
gst_object_unref(filter);
|
||||
gst_caps_unref(caps);
|
||||
filter = NULL;
|
||||
filter = nullptr;
|
||||
|
||||
if (!(current_flags & GST_PLAY_FLAG_AUDIO))
|
||||
filter = gst_bin_get_by_name(GST_BIN(mAudioSink), "filter");
|
||||
@ -252,8 +263,8 @@ nsresult GStreamerReader::ReadMetadata(VideoInfo* aInfo,
|
||||
/* Little trick: set the target caps to "skip" so that playbin2 fails to
|
||||
* find a decoder for the stream we want to skip.
|
||||
*/
|
||||
GstCaps *filterCaps = gst_caps_new_simple ("skip", NULL);
|
||||
g_object_set(filter, "caps", filterCaps, NULL);
|
||||
GstCaps* filterCaps = gst_caps_new_simple ("skip", nullptr);
|
||||
g_object_set(filter, "caps", filterCaps, nullptr);
|
||||
gst_caps_unref(filterCaps);
|
||||
gst_object_unref(filter);
|
||||
}
|
||||
@ -265,14 +276,14 @@ nsresult GStreamerReader::ReadMetadata(VideoInfo* aInfo,
|
||||
* prerolled and ready to play. Also watch for errors.
|
||||
*/
|
||||
message = gst_bus_timed_pop_filtered(mBus, GST_CLOCK_TIME_NONE,
|
||||
(GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR));
|
||||
(GstMessageType)(GST_MESSAGE_ASYNC_DONE | GST_MESSAGE_ERROR));
|
||||
if (GST_MESSAGE_TYPE(message) == GST_MESSAGE_ERROR) {
|
||||
GError *error;
|
||||
gchar *debug;
|
||||
GError* error;
|
||||
gchar* debug;
|
||||
|
||||
gst_message_parse_error(message, &error, &debug);
|
||||
LOG(PR_LOG_ERROR, ("read metadata error: %s: %s", error->message,
|
||||
debug));
|
||||
debug));
|
||||
g_error_free(error);
|
||||
g_free(debug);
|
||||
gst_element_set_state(mPlayBin, GST_STATE_NULL);
|
||||
@ -316,7 +327,7 @@ nsresult GStreamerReader::ReadMetadata(VideoInfo* aInfo,
|
||||
}
|
||||
|
||||
int n_video = 0, n_audio = 0;
|
||||
g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, NULL);
|
||||
g_object_get(mPlayBin, "n-video", &n_video, "n-audio", &n_audio, nullptr);
|
||||
mInfo.mHasVideo = n_video != 0;
|
||||
mInfo.mHasAudio = n_audio != 0;
|
||||
|
||||
@ -359,7 +370,7 @@ void GStreamerReader::NotifyBytesConsumed()
|
||||
mLastReportedByteOffset = mByteOffset;
|
||||
}
|
||||
|
||||
bool GStreamerReader::WaitForDecodedData(int *aCounter)
|
||||
bool GStreamerReader::WaitForDecodedData(int* aCounter)
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
|
||||
|
||||
@ -386,7 +397,7 @@ bool GStreamerReader::DecodeAudioData()
|
||||
return false;
|
||||
}
|
||||
|
||||
GstBuffer *buffer = gst_app_sink_pull_buffer(mAudioAppSink);
|
||||
GstBuffer* buffer = gst_app_sink_pull_buffer(mAudioAppSink);
|
||||
int64_t timestamp = GST_BUFFER_TIMESTAMP(buffer);
|
||||
timestamp = gst_segment_to_stream_time(&mAudioSegment,
|
||||
GST_FORMAT_TIME, timestamp);
|
||||
@ -401,7 +412,7 @@ bool GStreamerReader::DecodeAudioData()
|
||||
ssize_t outSize = static_cast<size_t>(size / sizeof(AudioDataValue));
|
||||
nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outSize]);
|
||||
memcpy(data, GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE(buffer));
|
||||
AudioData *audio = new AudioData(offset, timestamp, duration,
|
||||
AudioData* audio = new AudioData(offset, timestamp, duration,
|
||||
frames, data.forget(), mInfo.mAudioChannels);
|
||||
|
||||
mAudioQueue.Push(audio);
|
||||
@ -415,7 +426,7 @@ bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
|
||||
{
|
||||
NS_ASSERTION(mDecoder->OnDecodeThread(), "Should be on decode thread.");
|
||||
|
||||
GstBuffer *buffer = NULL;
|
||||
GstBuffer* buffer = nullptr;
|
||||
int64_t timestamp, nextTimestamp;
|
||||
while (true)
|
||||
{
|
||||
@ -429,7 +440,7 @@ bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
|
||||
bool isKeyframe = !GST_BUFFER_FLAG_IS_SET(buffer, GST_BUFFER_FLAG_DISCONT);
|
||||
if ((aKeyFrameSkip && !isKeyframe)) {
|
||||
gst_buffer_unref(buffer);
|
||||
buffer = NULL;
|
||||
buffer = nullptr;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -453,18 +464,45 @@ bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
|
||||
" threshold %" GST_TIME_FORMAT,
|
||||
GST_TIME_ARGS(timestamp), GST_TIME_ARGS(aTimeThreshold)));
|
||||
gst_buffer_unref(buffer);
|
||||
buffer = NULL;
|
||||
buffer = nullptr;
|
||||
continue;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (buffer == NULL)
|
||||
if (!buffer)
|
||||
/* no more frames */
|
||||
return false;
|
||||
|
||||
guint8 *data = GST_BUFFER_DATA(buffer);
|
||||
nsRefPtr<PlanarYCbCrImage> image;
|
||||
#if GST_VERSION_MICRO >= 36
|
||||
const GstStructure* structure = gst_buffer_get_qdata(buffer,
|
||||
g_quark_from_string("moz-reader-data"));
|
||||
const GValue* value = gst_structure_get_value(structure, "image");
|
||||
if (value) {
|
||||
BufferData* data = reinterpret_cast<BufferData*>(g_value_get_boxed(value));
|
||||
image = data->mImage;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!image) {
|
||||
/* Ugh, upstream is not calling gst_pad_alloc_buffer(). Fallback to
|
||||
* allocating a PlanarYCbCrImage backed GstBuffer here and memcpy.
|
||||
*/
|
||||
GstBuffer* tmp = nullptr;
|
||||
AllocateVideoBufferFull(nullptr, GST_BUFFER_OFFSET(buffer),
|
||||
GST_BUFFER_SIZE(buffer), nullptr, &tmp, image);
|
||||
|
||||
/* copy */
|
||||
gst_buffer_copy_metadata(tmp, buffer, GST_BUFFER_COPY_ALL);
|
||||
memcpy(GST_BUFFER_DATA(tmp), GST_BUFFER_DATA(buffer),
|
||||
GST_BUFFER_SIZE(tmp));
|
||||
gst_buffer_unref(buffer);
|
||||
buffer = tmp;
|
||||
}
|
||||
|
||||
guint8* data = GST_BUFFER_DATA(buffer);
|
||||
|
||||
int width = mPicture.width;
|
||||
int height = mPicture.height;
|
||||
@ -487,15 +525,9 @@ bool GStreamerReader::DecodeVideoFrame(bool &aKeyFrameSkip,
|
||||
GST_BUFFER_FLAG_DELTA_UNIT);
|
||||
/* XXX ? */
|
||||
int64_t offset = 0;
|
||||
VideoData *video = VideoData::Create(mInfo,
|
||||
mDecoder->GetImageContainer(),
|
||||
offset,
|
||||
timestamp,
|
||||
nextTimestamp,
|
||||
b,
|
||||
isKeyframe,
|
||||
-1,
|
||||
mPicture);
|
||||
VideoData* video = VideoData::Create(mInfo, image, offset,
|
||||
timestamp, nextTimestamp, b,
|
||||
isKeyframe, -1, mPicture);
|
||||
mVideoQueue.Push(video);
|
||||
gst_buffer_unref(buffer);
|
||||
|
||||
@ -532,7 +564,6 @@ nsresult GStreamerReader::GetBuffered(TimeRanges* aBuffered,
|
||||
|
||||
GstFormat format = GST_FORMAT_TIME;
|
||||
MediaResource* resource = mDecoder->GetResource();
|
||||
gint64 resourceLength = resource->GetLength();
|
||||
nsTArray<MediaByteRange> ranges;
|
||||
resource->GetCachedRanges(ranges);
|
||||
|
||||
@ -549,7 +580,7 @@ nsresult GStreamerReader::GetBuffered(TimeRanges* aBuffered,
|
||||
duration = QueryDuration();
|
||||
double end = (double) duration / GST_MSECOND;
|
||||
LOG(PR_LOG_DEBUG, ("complete range [0, %f] for [0, %li]",
|
||||
end, resourceLength));
|
||||
end, resource->GetLength()));
|
||||
aBuffered->Add(0, end);
|
||||
return NS_OK;
|
||||
}
|
||||
@ -569,7 +600,7 @@ nsresult GStreamerReader::GetBuffered(TimeRanges* aBuffered,
|
||||
double start = (double) GST_TIME_AS_USECONDS (startTime) / GST_MSECOND;
|
||||
double end = (double) GST_TIME_AS_USECONDS (endTime) / GST_MSECOND;
|
||||
LOG(PR_LOG_DEBUG, ("adding range [%f, %f] for [%li %li] size %li",
|
||||
start, end, startOffset, endOffset, resourceLength));
|
||||
start, end, startOffset, endOffset, resource->GetLength()));
|
||||
aBuffered->Add(start, end);
|
||||
}
|
||||
|
||||
@ -582,8 +613,8 @@ void GStreamerReader::ReadAndPushData(guint aLength)
|
||||
NS_ASSERTION(resource, "Decoder has no media resource");
|
||||
nsresult rv = NS_OK;
|
||||
|
||||
GstBuffer *buffer = gst_buffer_new_and_alloc(aLength);
|
||||
guint8 *data = GST_BUFFER_DATA(buffer);
|
||||
GstBuffer* buffer = gst_buffer_new_and_alloc(aLength);
|
||||
guint8* data = GST_BUFFER_DATA(buffer);
|
||||
uint32_t size = 0, bytesRead = 0;
|
||||
while(bytesRead < aLength) {
|
||||
rv = resource->Read(reinterpret_cast<char*>(data + bytesRead),
|
||||
@ -598,12 +629,14 @@ void GStreamerReader::ReadAndPushData(guint aLength)
|
||||
mByteOffset += bytesRead;
|
||||
|
||||
GstFlowReturn ret = gst_app_src_push_buffer(mSource, gst_buffer_ref(buffer));
|
||||
if (ret != GST_FLOW_OK)
|
||||
if (ret != GST_FLOW_OK) {
|
||||
LOG(PR_LOG_ERROR, ("ReadAndPushData push ret %s", gst_flow_get_name(ret)));
|
||||
}
|
||||
|
||||
if (GST_BUFFER_SIZE (buffer) < aLength)
|
||||
if (GST_BUFFER_SIZE (buffer) < aLength) {
|
||||
/* If we read less than what we wanted, we reached the end */
|
||||
gst_app_src_end_of_stream(mSource);
|
||||
}
|
||||
|
||||
gst_buffer_unref(buffer);
|
||||
}
|
||||
@ -632,73 +665,77 @@ int64_t GStreamerReader::QueryDuration()
|
||||
return duration;
|
||||
}
|
||||
|
||||
void GStreamerReader::NeedDataCb(GstAppSrc *aSrc,
|
||||
guint aLength,
|
||||
gpointer aUserData)
|
||||
void GStreamerReader::NeedDataCb(GstAppSrc* aSrc,
|
||||
guint aLength,
|
||||
gpointer aUserData)
|
||||
{
|
||||
GStreamerReader *reader = (GStreamerReader *) aUserData;
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
reader->NeedData(aSrc, aLength);
|
||||
}
|
||||
|
||||
void GStreamerReader::NeedData(GstAppSrc *aSrc, guint aLength)
|
||||
void GStreamerReader::NeedData(GstAppSrc* aSrc, guint aLength)
|
||||
{
|
||||
if (aLength == -1)
|
||||
if (aLength == static_cast<guint>(-1))
|
||||
aLength = DEFAULT_SOURCE_READ_SIZE;
|
||||
ReadAndPushData(aLength);
|
||||
}
|
||||
|
||||
void GStreamerReader::EnoughDataCb(GstAppSrc *aSrc, gpointer aUserData)
|
||||
void GStreamerReader::EnoughDataCb(GstAppSrc* aSrc, gpointer aUserData)
|
||||
{
|
||||
GStreamerReader *reader = (GStreamerReader *) aUserData;
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
reader->EnoughData(aSrc);
|
||||
}
|
||||
|
||||
void GStreamerReader::EnoughData(GstAppSrc *aSrc)
|
||||
void GStreamerReader::EnoughData(GstAppSrc* aSrc)
|
||||
{
|
||||
}
|
||||
|
||||
gboolean GStreamerReader::SeekDataCb(GstAppSrc *aSrc,
|
||||
guint64 aOffset,
|
||||
gpointer aUserData)
|
||||
gboolean GStreamerReader::SeekDataCb(GstAppSrc* aSrc,
|
||||
guint64 aOffset,
|
||||
gpointer aUserData)
|
||||
{
|
||||
GStreamerReader *reader = (GStreamerReader *) aUserData;
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
return reader->SeekData(aSrc, aOffset);
|
||||
}
|
||||
|
||||
gboolean GStreamerReader::SeekData(GstAppSrc *aSrc, guint64 aOffset)
|
||||
gboolean GStreamerReader::SeekData(GstAppSrc* aSrc, guint64 aOffset)
|
||||
{
|
||||
ReentrantMonitorAutoEnter mon(mGstThreadsMonitor);
|
||||
MediaResource* resource = mDecoder->GetResource();
|
||||
int64_t resourceLength = resource->GetLength();
|
||||
|
||||
if (gst_app_src_get_size(mSource) == -1)
|
||||
if (gst_app_src_get_size(mSource) == -1) {
|
||||
/* It's possible that we didn't know the length when we initialized mSource
|
||||
* but maybe we do now
|
||||
*/
|
||||
gst_app_src_set_size(mSource, resource->GetLength());
|
||||
gst_app_src_set_size(mSource, resourceLength);
|
||||
}
|
||||
|
||||
nsresult rv = NS_ERROR_FAILURE;
|
||||
if (aOffset < resource->GetLength())
|
||||
if (aOffset < static_cast<guint64>(resourceLength)) {
|
||||
rv = resource->Seek(SEEK_SET, aOffset);
|
||||
}
|
||||
|
||||
if (NS_SUCCEEDED(rv))
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
mByteOffset = mLastReportedByteOffset = aOffset;
|
||||
else
|
||||
} else {
|
||||
LOG(PR_LOG_ERROR, ("seek at %lu failed", aOffset));
|
||||
}
|
||||
|
||||
return NS_SUCCEEDED(rv);
|
||||
}
|
||||
|
||||
gboolean GStreamerReader::EventProbeCb(GstPad *aPad,
|
||||
GstEvent *aEvent,
|
||||
gboolean GStreamerReader::EventProbeCb(GstPad* aPad,
|
||||
GstEvent* aEvent,
|
||||
gpointer aUserData)
|
||||
{
|
||||
GStreamerReader *reader = (GStreamerReader *) aUserData;
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
return reader->EventProbe(aPad, aEvent);
|
||||
}
|
||||
|
||||
gboolean GStreamerReader::EventProbe(GstPad *aPad, GstEvent *aEvent)
|
||||
gboolean GStreamerReader::EventProbe(GstPad* aPad, GstEvent* aEvent)
|
||||
{
|
||||
GstElement *parent = GST_ELEMENT(gst_pad_get_parent(aPad));
|
||||
GstElement* parent = GST_ELEMENT(gst_pad_get_parent(aPad));
|
||||
switch(GST_EVENT_TYPE(aEvent)) {
|
||||
case GST_EVENT_NEWSEGMENT:
|
||||
{
|
||||
@ -706,7 +743,7 @@ gboolean GStreamerReader::EventProbe(GstPad *aPad, GstEvent *aEvent)
|
||||
gdouble rate;
|
||||
GstFormat format;
|
||||
gint64 start, stop, position;
|
||||
GstSegment *segment;
|
||||
GstSegment* segment;
|
||||
|
||||
/* Store the segments so we can convert timestamps to stream time, which
|
||||
* is what the upper layers sync on.
|
||||
@ -734,10 +771,72 @@ gboolean GStreamerReader::EventProbe(GstPad *aPad, GstEvent *aEvent)
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
GstFlowReturn GStreamerReader::NewPrerollCb(GstAppSink *aSink,
|
||||
GstFlowReturn GStreamerReader::AllocateVideoBufferFull(GstPad* aPad,
|
||||
guint64 aOffset,
|
||||
guint aSize,
|
||||
GstCaps* aCaps,
|
||||
GstBuffer** aBuf,
|
||||
nsRefPtr<PlanarYCbCrImage>& aImage)
|
||||
{
|
||||
/* allocate an image using the container */
|
||||
ImageContainer* container = mDecoder->GetImageContainer();
|
||||
ImageFormat format = PLANAR_YCBCR;
|
||||
PlanarYCbCrImage* img = reinterpret_cast<PlanarYCbCrImage*>(container->CreateImage(&format, 1).get());
|
||||
nsRefPtr<PlanarYCbCrImage> image = dont_AddRef(img);
|
||||
|
||||
/* prepare a GstBuffer pointing to the underlying PlanarYCbCrImage buffer */
|
||||
GstBuffer* buf = gst_buffer_new();
|
||||
GST_BUFFER_SIZE(buf) = aSize;
|
||||
/* allocate the actual YUV buffer */
|
||||
GST_BUFFER_DATA(buf) = image->AllocateAndGetNewBuffer(aSize);
|
||||
|
||||
aImage = image;
|
||||
|
||||
#if GST_VERSION_MICRO >= 36
|
||||
/* create a GBoxed handle to hold the image */
|
||||
BufferData* data = new BufferData(image);
|
||||
|
||||
/* store it in a GValue so we can put it in a GstStructure */
|
||||
GValue value = {0,};
|
||||
g_value_init(&value, buffer_data_get_type());
|
||||
g_value_take_boxed(&value, data);
|
||||
|
||||
/* store the value in the structure */
|
||||
GstStructure* structure = gst_structure_new("moz-reader-data", nullptr);
|
||||
gst_structure_take_value(structure, "image", &value);
|
||||
|
||||
/* and attach the structure to the buffer */
|
||||
gst_buffer_set_qdata(buf, g_quark_from_string("moz-reader-data"), structure);
|
||||
#endif
|
||||
|
||||
*aBuf = buf;
|
||||
return GST_FLOW_OK;
|
||||
}
|
||||
|
||||
GstFlowReturn GStreamerReader::AllocateVideoBufferCb(GstPad* aPad,
|
||||
guint64 aOffset,
|
||||
guint aSize,
|
||||
GstCaps* aCaps,
|
||||
GstBuffer** aBuf)
|
||||
{
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(gst_pad_get_element_private(aPad));
|
||||
return reader->AllocateVideoBuffer(aPad, aOffset, aSize, aCaps, aBuf);
|
||||
}
|
||||
|
||||
GstFlowReturn GStreamerReader::AllocateVideoBuffer(GstPad* aPad,
|
||||
guint64 aOffset,
|
||||
guint aSize,
|
||||
GstCaps* aCaps,
|
||||
GstBuffer** aBuf)
|
||||
{
|
||||
nsRefPtr<PlanarYCbCrImage> image;
|
||||
return AllocateVideoBufferFull(aPad, aOffset, aSize, aCaps, aBuf, image);
|
||||
}
|
||||
|
||||
GstFlowReturn GStreamerReader::NewPrerollCb(GstAppSink* aSink,
|
||||
gpointer aUserData)
|
||||
{
|
||||
GStreamerReader *reader = (GStreamerReader *) aUserData;
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
|
||||
if (aSink == reader->mVideoAppSink)
|
||||
reader->VideoPreroll();
|
||||
@ -750,12 +849,12 @@ void GStreamerReader::AudioPreroll()
|
||||
{
|
||||
/* The first audio buffer has reached the audio sink. Get rate and channels */
|
||||
LOG(PR_LOG_DEBUG, ("Audio preroll"));
|
||||
GstPad *sinkpad = gst_element_get_pad(GST_ELEMENT(mAudioAppSink), "sink");
|
||||
GstCaps *caps = gst_pad_get_negotiated_caps(sinkpad);
|
||||
GstStructure *s = gst_caps_get_structure(caps, 0);
|
||||
GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mAudioAppSink), "sink");
|
||||
GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
|
||||
GstStructure* s = gst_caps_get_structure(caps, 0);
|
||||
mInfo.mAudioRate = mInfo.mAudioChannels = 0;
|
||||
gst_structure_get_int(s, "rate", (gint *) &mInfo.mAudioRate);
|
||||
gst_structure_get_int(s, "channels", (gint *) &mInfo.mAudioChannels);
|
||||
gst_structure_get_int(s, "rate", (gint*) &mInfo.mAudioRate);
|
||||
gst_structure_get_int(s, "channels", (gint*) &mInfo.mAudioChannels);
|
||||
NS_ASSERTION(mInfo.mAudioRate != 0, ("audio rate is zero"));
|
||||
NS_ASSERTION(mInfo.mAudioChannels != 0, ("audio channels is zero"));
|
||||
NS_ASSERTION(mInfo.mAudioChannels > 0 && mInfo.mAudioChannels <= MAX_CHANNELS,
|
||||
@ -769,10 +868,10 @@ void GStreamerReader::VideoPreroll()
|
||||
{
|
||||
/* The first video buffer has reached the video sink. Get width and height */
|
||||
LOG(PR_LOG_DEBUG, ("Video preroll"));
|
||||
GstPad *sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
|
||||
GstCaps *caps = gst_pad_get_negotiated_caps(sinkpad);
|
||||
GstPad* sinkpad = gst_element_get_pad(GST_ELEMENT(mVideoAppSink), "sink");
|
||||
GstCaps* caps = gst_pad_get_negotiated_caps(sinkpad);
|
||||
gst_video_format_parse_caps(caps, &mFormat, &mPicture.width, &mPicture.height);
|
||||
GstStructure *structure = gst_caps_get_structure(caps, 0);
|
||||
GstStructure* structure = gst_caps_get_structure(caps, 0);
|
||||
gst_structure_get_fraction(structure, "framerate", &fpsNum, &fpsDen);
|
||||
NS_ASSERTION(mPicture.width && mPicture.height, "invalid video resolution");
|
||||
mInfo.mDisplay = nsIntSize(mPicture.width, mPicture.height);
|
||||
@ -781,10 +880,10 @@ void GStreamerReader::VideoPreroll()
|
||||
gst_object_unref(sinkpad);
|
||||
}
|
||||
|
||||
GstFlowReturn GStreamerReader::NewBufferCb(GstAppSink *aSink,
|
||||
gpointer aUserData)
|
||||
GstFlowReturn GStreamerReader::NewBufferCb(GstAppSink* aSink,
|
||||
gpointer aUserData)
|
||||
{
|
||||
GStreamerReader *reader = (GStreamerReader *) aUserData;
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
|
||||
if (aSink == reader->mVideoAppSink)
|
||||
reader->NewVideoBuffer();
|
||||
@ -815,13 +914,13 @@ void GStreamerReader::NewAudioBuffer()
|
||||
mon.NotifyAll();
|
||||
}
|
||||
|
||||
void GStreamerReader::EosCb(GstAppSink *aSink, gpointer aUserData)
|
||||
void GStreamerReader::EosCb(GstAppSink* aSink, gpointer aUserData)
|
||||
{
|
||||
GStreamerReader *reader = (GStreamerReader *) aUserData;
|
||||
GStreamerReader* reader = reinterpret_cast<GStreamerReader*>(aUserData);
|
||||
reader->Eos(aSink);
|
||||
}
|
||||
|
||||
void GStreamerReader::Eos(GstAppSink *aSink)
|
||||
void GStreamerReader::Eos(GstAppSink* aSink)
|
||||
{
|
||||
/* We reached the end of the stream */
|
||||
{
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <gst/app/gstappsrc.h>
|
||||
#include <gst/app/gstappsink.h>
|
||||
#include <gst/video/video.h>
|
||||
#include <map>
|
||||
#include "MediaDecoderReader.h"
|
||||
|
||||
namespace mozilla {
|
||||
@ -49,7 +50,7 @@ public:
|
||||
private:
|
||||
|
||||
void ReadAndPushData(guint aLength);
|
||||
bool WaitForDecodedData(int *counter);
|
||||
bool WaitForDecodedData(int* counter);
|
||||
void NotifyBytesConsumed();
|
||||
int64_t QueryDuration();
|
||||
|
||||
@ -58,56 +59,68 @@ private:
|
||||
/* Called on the source-setup signal emitted by playbin. Used to
|
||||
* configure appsrc .
|
||||
*/
|
||||
static void PlayBinSourceSetupCb(GstElement *aPlayBin,
|
||||
GParamSpec *pspec,
|
||||
static void PlayBinSourceSetupCb(GstElement* aPlayBin,
|
||||
GParamSpec* pspec,
|
||||
gpointer aUserData);
|
||||
void PlayBinSourceSetup(GstAppSrc *aSource);
|
||||
void PlayBinSourceSetup(GstAppSrc* aSource);
|
||||
|
||||
/* Called from appsrc when we need to read more data from the resource */
|
||||
static void NeedDataCb(GstAppSrc *aSrc, guint aLength, gpointer aUserData);
|
||||
void NeedData(GstAppSrc *aSrc, guint aLength);
|
||||
static void NeedDataCb(GstAppSrc* aSrc, guint aLength, gpointer aUserData);
|
||||
void NeedData(GstAppSrc* aSrc, guint aLength);
|
||||
|
||||
/* Called when appsrc has enough data and we can stop reading */
|
||||
static void EnoughDataCb(GstAppSrc *aSrc, gpointer aUserData);
|
||||
void EnoughData(GstAppSrc *aSrc);
|
||||
static void EnoughDataCb(GstAppSrc* aSrc, gpointer aUserData);
|
||||
void EnoughData(GstAppSrc* aSrc);
|
||||
|
||||
/* Called when a seek is issued on the pipeline */
|
||||
static gboolean SeekDataCb(GstAppSrc *aSrc,
|
||||
static gboolean SeekDataCb(GstAppSrc* aSrc,
|
||||
guint64 aOffset,
|
||||
gpointer aUserData);
|
||||
gboolean SeekData(GstAppSrc *aSrc, guint64 aOffset);
|
||||
gboolean SeekData(GstAppSrc* aSrc, guint64 aOffset);
|
||||
|
||||
/* Called when events reach the sinks. See inline comments */
|
||||
static gboolean EventProbeCb(GstPad *aPad, GstEvent *aEvent, gpointer aUserData);
|
||||
gboolean EventProbe(GstPad *aPad, GstEvent *aEvent);
|
||||
static gboolean EventProbeCb(GstPad* aPad, GstEvent* aEvent, gpointer aUserData);
|
||||
gboolean EventProbe(GstPad* aPad, GstEvent* aEvent);
|
||||
|
||||
/* Called when elements in the video branch of the pipeline call
|
||||
* gst_pad_alloc_buffer(). Used to provide PlanarYCbCrImage backed GstBuffers
|
||||
* to the pipeline so that a memory copy can be avoided when handling YUV
|
||||
* buffers from the pipeline to the gfx side.
|
||||
*/
|
||||
static GstFlowReturn AllocateVideoBufferCb(GstPad* aPad, guint64 aOffset, guint aSize,
|
||||
GstCaps* aCaps, GstBuffer** aBuf);
|
||||
GstFlowReturn AllocateVideoBufferFull(GstPad* aPad, guint64 aOffset, guint aSize,
|
||||
GstCaps* aCaps, GstBuffer** aBuf, nsRefPtr<layers::PlanarYCbCrImage>& aImage);
|
||||
GstFlowReturn AllocateVideoBuffer(GstPad* aPad, guint64 aOffset, guint aSize,
|
||||
GstCaps* aCaps, GstBuffer** aBuf);
|
||||
|
||||
/* Called when the pipeline is prerolled, that is when at start or after a
|
||||
* seek, the first audio and video buffers are queued in the sinks.
|
||||
*/
|
||||
static GstFlowReturn NewPrerollCb(GstAppSink *aSink, gpointer aUserData);
|
||||
static GstFlowReturn NewPrerollCb(GstAppSink* aSink, gpointer aUserData);
|
||||
void VideoPreroll();
|
||||
void AudioPreroll();
|
||||
|
||||
/* Called when buffers reach the sinks */
|
||||
static GstFlowReturn NewBufferCb(GstAppSink *aSink, gpointer aUserData);
|
||||
static GstFlowReturn NewBufferCb(GstAppSink* aSink, gpointer aUserData);
|
||||
void NewVideoBuffer();
|
||||
void NewAudioBuffer();
|
||||
|
||||
/* Called at end of stream, when decoding has finished */
|
||||
static void EosCb(GstAppSink *aSink, gpointer aUserData);
|
||||
void Eos(GstAppSink *aSink);
|
||||
static void EosCb(GstAppSink* aSink, gpointer aUserData);
|
||||
void Eos(GstAppSink* aSink);
|
||||
|
||||
GstElement *mPlayBin;
|
||||
GstBus *mBus;
|
||||
GstAppSrc *mSource;
|
||||
GstElement* mPlayBin;
|
||||
GstBus* mBus;
|
||||
GstAppSrc* mSource;
|
||||
/* video sink bin */
|
||||
GstElement *mVideoSink;
|
||||
GstElement* mVideoSink;
|
||||
/* the actual video app sink */
|
||||
GstAppSink *mVideoAppSink;
|
||||
GstAppSink* mVideoAppSink;
|
||||
/* audio sink bin */
|
||||
GstElement *mAudioSink;
|
||||
GstElement* mAudioSink;
|
||||
/* the actual audio app sink */
|
||||
GstAppSink *mAudioAppSink;
|
||||
GstAppSink* mAudioAppSink;
|
||||
GstVideoFormat mFormat;
|
||||
nsIntRect mPicture;
|
||||
int mVideoSinkBufferCount;
|
||||
@ -135,6 +148,21 @@ private:
|
||||
int fpsDen;
|
||||
};
|
||||
|
||||
class BufferData {
|
||||
public:
|
||||
BufferData(layers::PlanarYCbCrImage* aImage) : mImage(aImage) {}
|
||||
|
||||
static void* Copy(void* aData) {
|
||||
return new BufferData(reinterpret_cast<BufferData*>(aData)->mImage);
|
||||
}
|
||||
|
||||
static void Free(void* aData) {
|
||||
delete reinterpret_cast<BufferData*>(aData);
|
||||
}
|
||||
|
||||
nsRefPtr<layers::PlanarYCbCrImage> mImage;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif
|
||||
|
@ -454,7 +454,7 @@ PlanarYCbCrImage::CopyData(const Data& aData)
|
||||
mData.mYStride * mData.mYSize.height;
|
||||
|
||||
// get new buffer
|
||||
mBuffer = AllocateBuffer(mBufferSize);
|
||||
mBuffer = AllocateBuffer(mBufferSize);
|
||||
if (!mBuffer)
|
||||
return;
|
||||
|
||||
@ -486,6 +486,24 @@ PlanarYCbCrImage::GetOffscreenFormat()
|
||||
mOffscreenFormat;
|
||||
}
|
||||
|
||||
void
|
||||
PlanarYCbCrImage::SetDataNoCopy(const Data &aData)
|
||||
{
|
||||
mData = aData;
|
||||
mSize = aData.mPicSize;
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
PlanarYCbCrImage::AllocateAndGetNewBuffer(uint32_t aSize)
|
||||
{
|
||||
// update buffer size
|
||||
mBufferSize = aSize;
|
||||
|
||||
// get new buffer
|
||||
mBuffer = AllocateBuffer(mBufferSize);
|
||||
return mBuffer;
|
||||
}
|
||||
|
||||
already_AddRefed<gfxASurface>
|
||||
PlanarYCbCrImage::GetAsSurface()
|
||||
{
|
||||
|
@ -688,6 +688,20 @@ public:
|
||||
*/
|
||||
virtual void SetData(const Data& aData);
|
||||
|
||||
/**
|
||||
* This doesn't make a copy of the data buffers. Can be used when mBuffer is
|
||||
* pre allocated with AllocateAndGetNewBuffer(size) and then SetDataNoCopy is
|
||||
* called to only update the picture size, planes etc. fields in mData.
|
||||
* The GStreamer media backend uses this to decode into PlanarYCbCrImage(s)
|
||||
* directly.
|
||||
*/
|
||||
virtual void SetDataNoCopy(const Data &aData);
|
||||
|
||||
/**
|
||||
* This allocates and returns a new buffer
|
||||
*/
|
||||
virtual uint8_t* AllocateAndGetNewBuffer(uint32_t aSize);
|
||||
|
||||
/**
|
||||
* Ask this Image to not convert YUV to RGB during SetData, and make
|
||||
* the original data available through GetData. This is optional,
|
||||
|
Loading…
Reference in New Issue
Block a user