merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2016-01-25 11:50:09 +01:00
commit cfc0028336
120 changed files with 5564 additions and 621 deletions

View File

@ -1692,15 +1692,6 @@ if test -n "$MOZ_USE_SYSTRACE"; then
AC_DEFINE(MOZ_USE_SYSTRACE)
fi
# For profiling builds keep the symbol information
if test "$MOZ_PROFILING" -a -z "$STRIP_FLAGS"; then
case "$OS_TARGET" in
Linux|DragonFly|FreeBSD|NetBSD|OpenBSD)
STRIP_FLAGS="--strip-debug"
;;
esac
fi
dnl ========================================================
dnl = Use Valgrind
dnl ========================================================
@ -1799,6 +1790,38 @@ if test -n "$MOZ_VTUNE"; then
AC_DEFINE(MOZ_VTUNE)
fi
# For profiling builds keep the symbol information
if test "$MOZ_PROFILING" -a -z "$STRIP_FLAGS"; then
case "$OS_TARGET" in
Linux|DragonFly|FreeBSD|NetBSD|OpenBSD)
STRIP_FLAGS="--strip-debug"
;;
esac
fi
dnl ========================================================
dnl = Enable DMD
dnl ========================================================
MOZ_ARG_ENABLE_BOOL(dmd,
[ --enable-dmd Enable DMD; also enables jemalloc, replace-malloc and profiling],
MOZ_DMD=1,
MOZ_DMD= )
if test "$MOZ_DMD"; then
AC_DEFINE(MOZ_DMD)
if test "${CPU_ARCH}" = "arm"; then
CFLAGS="$CFLAGS -funwind-tables"
CXXFLAGS="$CXXFLAGS -funwind-tables"
fi
MOZ_MEMORY=1 # DMD enables jemalloc
MOZ_REPLACE_MALLOC=1 # DMD enables replace-malloc
MOZ_PROFILING=1 # DMD enables profiling
fi
AC_SUBST(MOZ_DMD)
dnl ========================================================
dnl Profiling
dnl ========================================================
@ -7062,28 +7085,6 @@ if test -n "$MOZ_DEBUG"; then
AC_DEFINE(MOZ_DUMP_PAINTING)
fi
dnl ========================================================
dnl = Enable DMD
dnl ========================================================
MOZ_ARG_ENABLE_BOOL(dmd,
[ --enable-dmd Enable DMD; also enables jemalloc and replace-malloc],
MOZ_DMD=1,
MOZ_DMD= )
if test "$MOZ_DMD"; then
AC_DEFINE(MOZ_DMD)
if test "${CPU_ARCH}" = "arm"; then
CFLAGS="$CFLAGS -funwind-tables"
CXXFLAGS="$CXXFLAGS -funwind-tables"
fi
MOZ_MEMORY=1 # DMD enables jemalloc
MOZ_REPLACE_MALLOC=1 # DMD enables replace-malloc
fi
AC_SUBST(MOZ_DMD)
dnl ========================================================
dnl = Enable jemalloc
dnl ========================================================

View File

@ -5880,7 +5880,6 @@ private:
RefPtr<FullscreenTransitionTask> mTask;
};
static const uint32_t kNextPaintTimeout = 1000; // ms
static const char* const kPaintedTopic;
RefPtr<nsGlobalWindow> mWindow;
@ -5940,8 +5939,14 @@ FullscreenTransitionTask::Run()
// Completely fixing those cases seems to be tricky, and since they
// should rarely happen, it probably isn't worth to fix. Hence we
// simply add a timeout here to ensure we never hang forever.
// In addition, if the page is complicated or the machine is less
// powerful, layout could take a long time, in which case, staying
// in black screen for that long could hurt user experience even
// more than exposing an intermediate state.
mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
mTimer->Init(observer, kNextPaintTimeout, nsITimer::TYPE_ONE_SHOT);
uint32_t timeout =
Preferences::GetUint("full-screen-api.transition.timeout", 500);
mTimer->Init(observer, timeout, nsITimer::TYPE_ONE_SHOT);
} else if (stage == eAfterToggle) {
mWidget->PerformFullscreenTransition(nsIWidget::eAfterFullscreenToggle,
mDuration.mFadeOut, mTransitionData,

View File

@ -542,7 +542,42 @@ WebGLContext::FramebufferTexture2D(GLenum target,
return;
}
if (!IsWebGL2() && level != 0) {
if (textarget != LOCAL_GL_TEXTURE_2D &&
(textarget < LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
textarget > LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z))
{
return ErrorInvalidEnumInfo("framebufferTexture2D: textarget:",
textarget);
}
if (IsWebGL2()) {
/* GLES 3.0.4 p208:
* If textarget is one of TEXTURE_CUBE_MAP_POSITIVE_X,
* TEXTURE_CUBE_MAP_POSITIVE_Y, TEXTURE_CUBE_MAP_POSITIVE_Z,
* TEXTURE_CUBE_MAP_NEGATIVE_X, TEXTURE_CUBE_MAP_NEGATIVE_Y,
* or TEXTURE_CUBE_MAP_NEGATIVE_Z, then level must be greater
* than or equal to zero and less than or equal to log2 of the
* value of MAX_CUBE_MAP_TEXTURE_SIZE. If textarget is TEXTURE_2D,
* level must be greater than or equal to zero and no larger than
* log2 of the value of MAX_TEXTURE_SIZE. Otherwise, an
* INVALID_VALUE error is generated.
*/
if (textarget == LOCAL_GL_TEXTURE_2D) {
if (uint32_t(level) > FloorLog2(mImplMaxTextureSize)) {
ErrorInvalidValue("framebufferTexture2D: level is too large.");
return;
}
} else {
MOZ_ASSERT(textarget >= LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X &&
textarget <= LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z);
if (uint32_t(level) > FloorLog2(mImplMaxCubeMapTextureSize)) {
ErrorInvalidValue("framebufferTexture2D: level is too large.");
return;
}
}
} else if (level != 0) {
ErrorInvalidValue("framebufferTexture2D: level must be 0.");
return;
}
@ -567,14 +602,6 @@ WebGLContext::FramebufferTexture2D(GLenum target,
" framebuffer 0.");
}
if (textarget != LOCAL_GL_TEXTURE_2D &&
(textarget < LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
textarget > LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z))
{
return ErrorInvalidEnumInfo("framebufferTexture2D: textarget:",
textarget);
}
if (!ValidateFramebufferAttachment(fb, attachment, "framebufferTexture2D"))
return;

View File

@ -1157,6 +1157,8 @@ WebGLFramebuffer::GetAttachmentParameter(const char* funcName, JSContext* cx,
attachPoint = GetAttachPoint(LOCAL_GL_DEPTH_ATTACHMENT);
}
FinalizeAttachments();
return attachPoint->GetParameter(funcName, mContext, cx, target, attachment, pname,
out_error);
}

View File

@ -737,6 +737,7 @@ void HTMLMediaElement::AbortExistingLoads()
}
mError = nullptr;
mCurrentPlayRangeStart = -1.0;
mLoadedDataFired = false;
mAutoplaying = true;
mIsLoadingFromSourceChildren = false;

View File

@ -96,8 +96,7 @@ MediaFormatReader::Shutdown()
mAudio.RejectPromise(CANCELED, __func__);
}
mAudio.mInitPromise.DisconnectIfExists();
mAudio.mDecoder->Shutdown();
mAudio.mDecoder = nullptr;
mAudio.ShutdownDecoder();
}
if (mAudio.mTrackDemuxer) {
mAudio.ResetDemuxer();
@ -117,8 +116,7 @@ MediaFormatReader::Shutdown()
mVideo.RejectPromise(CANCELED, __func__);
}
mVideo.mInitPromise.DisconnectIfExists();
mVideo.mDecoder->Shutdown();
mVideo.mDecoder = nullptr;
mVideo.ShutdownDecoder();
}
if (mVideo.mTrackDemuxer) {
mVideo.ResetDemuxer();
@ -381,6 +379,8 @@ MediaFormatReader::EnsureDecoderCreated(TrackType aTrack)
decoder.mDecoderInitialized = false;
MonitorAutoLock mon(decoder.mMonitor);
switch (aTrack) {
case TrackType::kAudioTrack:
decoder.mDecoder =
@ -406,6 +406,11 @@ MediaFormatReader::EnsureDecoderCreated(TrackType aTrack)
default:
break;
}
if (decoder.mDecoder ) {
decoder.mDescription = decoder.mDecoder->GetDescriptionName();
} else {
decoder.mDescription = "error creating decoder";
}
return decoder.mDecoder != nullptr;
}
@ -429,13 +434,14 @@ MediaFormatReader::EnsureDecoderInitialized(TrackType aTrack)
auto& decoder = self->GetDecoderData(aTrack);
decoder.mInitPromise.Complete();
decoder.mDecoderInitialized = true;
MonitorAutoLock mon(decoder.mMonitor);
decoder.mDescription = decoder.mDecoder->GetDescriptionName();
self->ScheduleUpdate(aTrack);
},
[self, aTrack] (MediaDataDecoder::DecoderFailureReason aResult) {
auto& decoder = self->GetDecoderData(aTrack);
decoder.mInitPromise.Complete();
decoder.mDecoder->Shutdown();
decoder.mDecoder = nullptr;
decoder.ShutdownDecoder();
self->NotifyError(aTrack);
}));
return false;
@ -465,8 +471,7 @@ MediaFormatReader::DisableHardwareAcceleration()
if (HasVideo() && !mHardwareAccelerationDisabled) {
mHardwareAccelerationDisabled = true;
Flush(TrackInfo::kVideoTrack);
mVideo.mDecoder->Shutdown();
mVideo.mDecoder = nullptr;
mVideo.ShutdownDecoder();
if (!EnsureDecoderCreated(TrackType::kVideoTrack)) {
LOG("Unable to re-create decoder, aborting");
NotifyError(TrackInfo::kVideoTrack);
@ -919,8 +924,7 @@ MediaFormatReader::HandleDemuxedSamples(TrackType aTrack,
// Flush will clear our array of queued samples. So make a copy now.
nsTArray<RefPtr<MediaRawData>> samples{decoder.mQueuedSamples};
Flush(aTrack);
decoder.mDecoder->Shutdown();
decoder.mDecoder = nullptr;
decoder.ShutdownDecoder();
if (sample->mKeyframe) {
decoder.mQueuedSamples.AppendElements(Move(samples));
NotifyDecodingRequested(aTrack);
@ -1604,11 +1608,8 @@ void MediaFormatReader::ReleaseMediaResources()
if (mVideoFrameContainer) {
mVideoFrameContainer->ClearCurrentFrame();
}
if (mVideo.mDecoder) {
mVideo.mInitPromise.DisconnectIfExists();
mVideo.mDecoder->Shutdown();
mVideo.mDecoder = nullptr;
}
mVideo.mInitPromise.DisconnectIfExists();
mVideo.ShutdownDecoder();
}
bool
@ -1666,12 +1667,25 @@ void
MediaFormatReader::GetMozDebugReaderData(nsAString& aString)
{
nsAutoCString result;
const char* audioName = "unavailable";
const char* videoName = audioName;
if (HasAudio()) {
MonitorAutoLock mon(mAudio.mMonitor);
audioName = mAudio.mDescription;
}
if (HasVideo()) {
MonitorAutoLock mon(mVideo.mMonitor);
videoName = mVideo.mDescription;
}
result += nsPrintfCString("audio decoder: %s\n", audioName);
result += nsPrintfCString("audio frames decoded: %lld\n",
mAudio.mNumSamplesOutputTotal);
result += nsPrintfCString("video decoder: %s\n", videoName);
result += nsPrintfCString("hardware video decoding: %s\n",
VideoIsHardwareAccelerated() ? "enabled" : "disabled");
result += nsPrintfCString("audio frames decoded: %lld (skipped:%lld)\n"
"video frames decoded: %lld (skipped:%lld)\n",
mAudio.mNumSamplesOutputTotal,
mAudio.mNumSamplesSkippedTotal,
result += nsPrintfCString("video frames decoded: %lld (skipped:%lld)\n",
mVideo.mNumSamplesOutputTotal,
mVideo.mNumSamplesSkippedTotal);
aString += NS_ConvertUTF8toUTF16(result);

View File

@ -10,6 +10,7 @@
#include "mozilla/Atomics.h"
#include "mozilla/Maybe.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/Monitor.h"
#include "MediaDataDemuxer.h"
#include "MediaDecoderReader.h"
@ -213,6 +214,8 @@ private:
uint32_t aDecodeAhead)
: mOwner(aOwner)
, mType(aType)
, mMonitor("DecoderData")
, mDescription("shutdown")
, mDecodeAhead(aDecodeAhead)
, mUpdateScheduled(false)
, mDemuxEOS(false)
@ -240,14 +243,27 @@ private:
// Disambiguate Audio vs Video.
MediaData::Type mType;
RefPtr<MediaTrackDemuxer> mTrackDemuxer;
// The platform decoder.
RefPtr<MediaDataDecoder> mDecoder;
// TaskQueue on which decoder can choose to decode.
// Only non-null up until the decoder is created.
RefPtr<FlushableTaskQueue> mTaskQueue;
// Callback that receives output and error notifications from the decoder.
nsAutoPtr<DecoderCallback> mCallback;
// Monitor protecting mDescription and mDecoder.
Monitor mMonitor;
// The platform decoder.
RefPtr<MediaDataDecoder> mDecoder;
const char* mDescription;
void ShutdownDecoder()
{
MonitorAutoLock mon(mMonitor);
if (mDecoder) {
mDecoder->Shutdown();
}
mDescription = "shutdown";
mDecoder = nullptr;
}
// Only accessed from reader's task queue.
uint32_t mDecodeAhead;
bool mUpdateScheduled;

View File

@ -202,12 +202,18 @@ DecodedAudioDataSink::PopFrames(uint32_t aFrames)
UniquePtr<AudioDataValue[]> mData;
};
if (!mCurrentData) {
while (!mCurrentData) {
// No data in the queue. Return an empty chunk.
if (AudioQueue().GetSize() == 0) {
return MakeUnique<Chunk>();
}
// Ignore the element with 0 frames and try next.
if (AudioQueue().PeekFront()->mFrames == 0) {
RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
continue;
}
// See if there's a gap in the audio. If there is, push silence into the
// audio hardware, so we can play across the gap.
// Calculate the timestamp of the next chunk of audio in numbers of
@ -239,6 +245,7 @@ DecodedAudioDataSink::PopFrames(uint32_t aFrames)
mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
mCurrentData->mChannels,
mCurrentData->mFrames);
MOZ_ASSERT(mCurrentData->mFrames > 0);
}
auto framesToPop = std::min(aFrames, mCursor->Available());

View File

@ -220,6 +220,11 @@ public:
{
return NS_OK;
}
// Return the name of the MediaDataDecoder, only used for decoding.
// Only return a static const string, as the information may be accessed
// in a non thread-safe fashion.
virtual const char* GetDescriptionName() const = 0;
};
} // namespace mozilla

View File

@ -93,6 +93,11 @@ public:
return NS_OK;
}
const char* GetDescriptionName() const override
{
return "blank media data decoder";
}
private:
nsAutoPtr<BlankMediaDataCreator> mCreator;
RefPtr<FlushableTaskQueue> mTaskQueue;

View File

@ -27,6 +27,10 @@ public:
nsresult Flush() override;
nsresult Drain() override;
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "opus audio decoder";
}
// Return true if mimetype is Opus
static bool IsOpus(const nsACString& aMimeType);

View File

@ -33,6 +33,10 @@ public:
nsresult Flush() override;
nsresult Drain() override;
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "libvpx video decoder";
}
// Return true if mimetype is a VPX codec
static bool IsVPX(const nsACString& aMimeType);

View File

@ -30,6 +30,10 @@ public:
nsresult Flush() override;
nsresult Drain() override;
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "vorbis audio decoder";
}
// Return true if mimetype is Vorbis
static bool IsVorbis(const nsACString& aMimeType);

View File

@ -137,6 +137,10 @@ public:
return rv;
}
const char* GetDescriptionName() const override {
return mDecoder->GetDescriptionName();
}
private:
RefPtr<MediaDataDecoder> mDecoder;

View File

@ -74,6 +74,10 @@ public:
nsresult Flush() override;
nsresult Drain() override;
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "GMP audio decoder";
}
protected:
virtual void InitTags(nsTArray<nsCString>& aTags);

View File

@ -89,6 +89,10 @@ public:
nsresult Flush() override;
nsresult Drain() override;
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "GMP video decoder";
}
protected:
virtual void InitTags(nsTArray<nsCString>& aTags);

View File

@ -138,6 +138,11 @@ public:
nsresult Drain() override;
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "GMP proxy data decoder";
}
// Called by MediaDataDecoderCallbackProxy.
void FlushComplete();

View File

@ -85,6 +85,11 @@ public:
}
const char* GetDescriptionName() const override
{
return "android video decoder";
}
RefPtr<InitPromise> Init() override
{
mSurfaceTexture = AndroidSurfaceTexture::Create();
@ -189,6 +194,11 @@ public:
}
}
const char* GetDescriptionName() const override
{
return "android audio decoder";
}
nsresult Output(BufferInfo::Param aInfo, void* aBuffer,
MediaFormat::Param aFormat, const TimeUnit& aDuration)
{

View File

@ -57,6 +57,10 @@ public:
nsresult Drain() override;
nsresult Shutdown() override;
nsresult Input(MediaRawData* aSample) override;
const char* GetDescriptionName() const override
{
return "android decoder";
}
protected:
enum ModuleState {

View File

@ -31,6 +31,11 @@ public:
nsresult Drain() override;
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "apple CoreMedia decoder";
}
// Callbacks also need access to the config.
const AudioInfo& mConfig;

View File

@ -81,6 +81,11 @@ public:
return true;
}
const char* GetDescriptionName() const override
{
return "apple VDA decoder";
}
// Access from the taskqueue and the decoder's thread.
// OutputFrame is thread-safe.
nsresult OutputFrame(CVPixelBufferRef aImage,

View File

@ -27,6 +27,13 @@ public:
return mIsHardwareAccelerated;
}
const char* GetDescriptionName() const override
{
return mIsHardwareAccelerated
? "apple hardware VT decoder"
: "apple software VT decoder";
}
protected:
void ProcessFlush() override;
void ProcessDrain() override;
@ -43,7 +50,7 @@ private:
nsresult WaitForAsynchronousFrames();
CFDictionaryRef CreateDecoderSpecification();
CFDictionaryRef CreateDecoderExtensions();
bool mIsHardwareAccelerated;
Atomic<bool> mIsHardwareAccelerated;
};
} // namespace mozilla

View File

@ -31,6 +31,10 @@ public:
void ProcessDrain() override;
void InitCodecContext() override;
static AVCodecID GetCodecId(const nsACString& aMimeType);
const char* GetDescriptionName() const override
{
return "ffmpeg audio decoder";
}
private:
void DecodePacket(MediaRawData* aSample);

View File

@ -44,6 +44,14 @@ public:
void ProcessDrain() override;
void ProcessFlush() override;
void InitCodecContext() override;
const char* GetDescriptionName() const override
{
#ifdef USING_MOZFFVPX
return "ffvpx video decoder";
#else
return "ffmpeg video decoder";
#endif
}
static AVCodecID GetCodecId(const nsACString& aMimeType);
private:

View File

@ -33,6 +33,11 @@ public:
void ProcessFlush() override;
const char* GetDescriptionName() const override
{
return "gonk audio decoder";
}
private:
bool InitMediaCodecProxy();

View File

@ -28,6 +28,7 @@ public:
virtual ~GonkDecoderManager() {}
virtual RefPtr<InitPromise> Init() = 0;
virtual const char* GetDescriptionName() const = 0;
// Asynchronously send sample into mDecoder. If out of input buffer, aSample
// will be queued for later re-send.
@ -199,6 +200,11 @@ public:
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "gonk decoder";
}
private:
android::sp<GonkDecoderManager> mManager;

View File

@ -50,6 +50,11 @@ public:
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "gonk video decoder";
}
static void RecycleCallback(TextureClient* aClient, void* aClosure);
protected:

View File

@ -71,6 +71,11 @@ public:
nsresult Shutdown() override;
const char* GetDescriptionName() const override
{
return "omx decoder";
}
// Return true if event is handled.
bool Event(OMX_EVENTTYPE aEvent, OMX_U32 aData1, OMX_U32 aData2);

View File

@ -236,7 +236,9 @@ MFTDecoder::Output(RefPtr<IMFSample>* aOutput)
// Treat other errors as unexpected, and warn.
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
MOZ_ASSERT(output.pSample);
if (!output.pSample) {
return S_OK;
}
if (mDiscontinuity) {
output.pSample->SetUINT32(MFSampleExtension_Discontinuity, TRUE);

View File

@ -10,7 +10,7 @@
#include "WMFUtils.h"
#include "nsTArray.h"
#include "TimeUnits.h"
#include "mozilla/Telemetry.h"
#include "mozilla/Logging.h"
extern mozilla::LogModule* GetPDMLog();
@ -226,6 +226,16 @@ WMFAudioMFTManager::Output(int64_t aStreamOffset,
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
if (!sample) {
LOG("Audio MFTDecoder returned success but null output.");
nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([]() -> void {
LOG("Reporting telemetry AUDIO_MFT_OUTPUT_NULL_SAMPLES");
Telemetry::Accumulate(Telemetry::ID::AUDIO_MFT_OUTPUT_NULL_SAMPLES, 1);
});
AbstractThread::MainThread()->Dispatch(task.forget());
return E_FAIL;
}
RefPtr<IMFMediaBuffer> buffer;
hr = sample->ConvertToContiguousBuffer(getter_AddRefs(buffer));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);

View File

@ -38,6 +38,11 @@ public:
return TrackInfo::kAudioTrack;
}
const char* GetDescriptionName() const override
{
return "wmf audio decoder";
}
private:
HRESULT UpdateOutputType();

View File

@ -54,6 +54,8 @@ public:
virtual void ConfigurationChanged(const TrackInfo& aConfig) {}
virtual const char* GetDescriptionName() const = 0;
protected:
// IMFTransform wrapper that performs the decoding.
RefPtr<MFTDecoder> mDecoder;
@ -85,6 +87,11 @@ public:
nsresult ConfigurationChanged(const TrackInfo& aConfig) override;
const char* GetDescriptionName() const override
{
return mMFTManager ? mMFTManager->GetDescriptionName() : "";
}
private:
// Called on the task queue. Inserts the sample into the decoder, and

View File

@ -21,6 +21,7 @@
#include "IMFYCbCrImage.h"
#include "mozilla/WindowsVersion.h"
#include "mozilla/Preferences.h"
#include "mozilla/Telemetry.h"
#include "nsPrintfCString.h"
extern mozilla::LogModule* GetPDMLog();
@ -78,6 +79,9 @@ WMFVideoMFTManager::WMFVideoMFTManager(
, mImageContainer(aImageContainer)
, mDXVAEnabled(aDXVAEnabled)
, mLayersBackend(aLayersBackend)
, mNullOutputCount(0)
, mGotValidOutputAfterNullOutput(false)
, mGotExcessiveNullOutput(false)
// mVideoStride, mVideoWidth, mVideoHeight, mUseHwAccel are initialized in
// Init().
{
@ -103,6 +107,20 @@ WMFVideoMFTManager::~WMFVideoMFTManager()
if (mDXVA2Manager) {
DeleteOnMainThread(mDXVA2Manager);
}
// Record whether the video decoder successfully decoded, or output null
// samples but did/didn't recover.
uint32_t telemetry = (mNullOutputCount == 0) ? 0 :
(mGotValidOutputAfterNullOutput && mGotExcessiveNullOutput) ? 1 :
mGotExcessiveNullOutput ? 2 :
mGotValidOutputAfterNullOutput ? 3 :
4;
nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([=]() -> void {
LOG(nsPrintfCString("Reporting telemetry VIDEO_MFT_OUTPUT_NULL_SAMPLES=%d", telemetry).get());
Telemetry::Accumulate(Telemetry::ID::VIDEO_MFT_OUTPUT_NULL_SAMPLES, telemetry);
});
AbstractThread::MainThread()->Dispatch(task.forget());
}
const GUID&
@ -575,6 +593,23 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset,
continue;
}
if (SUCCEEDED(hr)) {
if (!sample) {
LOG("Video MFTDecoder returned success but no output!");
// On some machines/input the MFT returns success but doesn't output
// a video frame. If we detect this, try again, but only up to a
// point; after 250 failures, give up. Note we count all failures
// over the life of the decoder, as we may end up exiting with a
// NEED_MORE_INPUT and coming back to hit the same error. So just
// counting with a local variable (like typeChangeCount does) may
// not work in this situation.
++mNullOutputCount;
if (mNullOutputCount > 250) {
LOG("Excessive Video MFTDecoder returning success but no output; giving up");
mGotExcessiveNullOutput = true;
return E_FAIL;
}
continue;
}
break;
}
// Else unexpected error, assert, and bail.
@ -595,6 +630,10 @@ WMFVideoMFTManager::Output(int64_t aStreamOffset,
aOutData = frame;
if (mNullOutputCount) {
mGotValidOutputAfterNullOutput = true;
}
return S_OK;
}

View File

@ -41,6 +41,13 @@ public:
void ConfigurationChanged(const TrackInfo& aConfig) override;
const char* GetDescriptionName() const override
{
nsCString failureReason;
return IsHardwareAccelerated(failureReason)
? "wmf hardware video decoder" : "wmf software video decoder";
}
private:
bool InitializeDXVA(bool aForceD3D9);
@ -88,6 +95,10 @@ private:
const GUID& GetMFTGUID();
const GUID& GetMediaSubtypeGUID();
uint32_t mNullOutputCount;
bool mGotValidOutputAfterNullOutput;
bool mGotExcessiveNullOutput;
};
} // namespace mozilla

View File

@ -111,6 +111,10 @@ private:
nsresult Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
nsresult ConfigurationChanged(const TrackInfo& aConfig) override;
const char* GetDescriptionName() const override
{
return mDecoder->GetDescriptionName();
}
RefPtr<MediaDataDecoder> mDecoder;
RefPtr<DecoderCallbackFuzzingWrapper> mCallbackWrapper;

View File

@ -35,6 +35,13 @@ public:
nsresult Drain() override;
nsresult Shutdown() override;
bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
const char* GetDescriptionName() const override
{
if (mDecoder) {
return mDecoder->GetDescriptionName();
}
return "H264Converter decoder (pending)";
}
// Return true if mimetype is H.264.
static bool IsH264(const TrackInfo& aConfig);

View File

@ -789,7 +789,13 @@ nsSynthVoiceRegistry::SpeakImpl(VoiceData* aVoice,
aTask->InitDirectAudio();
}
aVoice->mService->Speak(aText, aVoice->mUri, aVolume, aRate, aPitch, aTask);
if (NS_FAILED(aVoice->mService->Speak(aText, aVoice->mUri, aVolume, aRate,
aPitch, aTask))) {
if (serviceType == nsISpeechService::SERVICETYPE_INDIRECT_AUDIO) {
aTask->DispatchError(0, 0);
}
// XXX When using direct audio, no way to dispatch error
}
}
} // namespace dom

View File

@ -268,8 +268,7 @@ FakeIndirectAudioSynth::Speak(const nsAString& aText, const nsAString& aUri,
}
if (flags & eFailAtStart) {
aTask->DispatchError(0, 0);
return NS_OK;
return NS_ERROR_FAILURE;
}
RefPtr<FakeSynthCallback> cb = new FakeSynthCallback(

View File

@ -72,6 +72,7 @@ skip-if = e10s || buildapp == 'mulet' || buildapp == 'b2g' || toolkit == 'androi
[test_for_of.html]
[test_frameElementWrapping.html]
[test_pointerPreserves3D.html]
[test_pointerPreserves3DClip.html]
[test_framedhistoryframes.html]
[test_idleapi_permissions.html]
skip-if = buildapp == 'b2g' || buildapp == 'mulet'

View File

@ -0,0 +1,55 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test for pointer events with preserve-3d and clips</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
<style type="text/css">
.outer {
transform-style: preserve-3d;
}
.container {
overflow-y: scroll;
overflow-x: hidden;
width: 200px;
height: 300px;
}
.content {
width: 200px;
height: 1000px;
transform-style: preserve-3d;
}
#container1 {
background-color: green;
transform: translateZ(2px);
}
#container2 {
height: 100px;
transform: translateY(-200px) translateZ(10px);
background-color: red;
}
</style>
</head>
<body onload="runTest();">
<div class="outer" id="outer">
<div class="container" id="container1">
<div class="content"></div>
</div>
<div class="container" id="container2">
<div class="content"></div>
</div>
</div>
<script class="testbody" type="text/javascript">
function runTest() {
var outer = document.getElementById("outer");
var x = outer.offsetLeft;
var y = outer.offsetTop;
var target = document.elementFromPoint(x + 100, y + 250);
ok(target.parentNode == document.getElementById("container1"), "Find the right target.");
SimpleTest.finish();
}
SimpleTest.waitForExplicitFinish();
</script>
</body>
</html>

View File

@ -1339,7 +1339,13 @@ nsEventStatus AsyncPanZoomController::OnTouchEnd(const MultiTouchInput& aEvent)
if (CurrentTouchBlock()->GetActiveTouchCount() == 0) {
// It's possible we may be overscrolled if the user tapped during a
// previous overscroll pan. Make sure to snap back in this situation.
if (!SnapBackIfOverscrolled()) {
// An ancestor APZC could be overscrolled instead of this APZC, so
// walk the handoff chain as well.
CurrentTouchBlock()->GetOverscrollHandoffChain()->SnapBackOverscrolledApzc(this);
// SnapBackOverscrolledApzc() will put any APZC it causes to snap back
// into the OVERSCROLL_ANIMATION state. If that's not us, since we're
// done TOUCHING enter the NOTHING state.
if (mState != OVERSCROLL_ANIMATION) {
SetState(NOTHING);
}
}
@ -3558,6 +3564,11 @@ AsyncPanZoomController::ResetTouchInputState()
listener->HandleInputEvent(cancel);
}
CancelAnimationAndGestureState();
// Clear overscroll along the entire handoff chain, in case an APZC
// later in the chain is overscrolled.
if (TouchBlockState* block = CurrentTouchBlock()) {
block->GetOverscrollHandoffChain()->ClearOverscroll();
}
}
void

View File

@ -32,21 +32,28 @@
// (which expects an untransformed point). We handle both cases by setting both
// the transformed and untransformed fields to the same value.
SingleTouchData
CreateSingleTouchData(int32_t aIdentifier, int aX, int aY)
CreateSingleTouchData(int32_t aIdentifier, const ScreenIntPoint& aPoint)
{
SingleTouchData touch(aIdentifier, ScreenIntPoint(aX, aY), ScreenSize(0, 0), 0, 0);
touch.mLocalScreenPoint = ParentLayerPoint(aX, aY);
SingleTouchData touch(aIdentifier, aPoint, ScreenSize(0, 0), 0, 0);
touch.mLocalScreenPoint = ParentLayerPoint(aPoint.x, aPoint.y);
return touch;
}
// Convenience wrapper for CreateSingleTouchData() that takes loose coordinates.
SingleTouchData
CreateSingleTouchData(int32_t aIdentifier, ScreenIntCoord aX, ScreenIntCoord aY)
{
return CreateSingleTouchData(aIdentifier, ScreenIntPoint(aX, aY));
}
PinchGestureInput
CreatePinchGestureInput(PinchGestureInput::PinchGestureType aType,
int aFocusX, int aFocusY,
const ScreenIntPoint& aFocus,
float aCurrentSpan, float aPreviousSpan)
{
PinchGestureInput result(aType, 0, TimeStamp(), ScreenPoint(aFocusX, aFocusY),
PinchGestureInput result(aType, 0, TimeStamp(), aFocus,
aCurrentSpan, aPreviousSpan, 0);
result.mLocalFocusPoint = ParentLayerPoint(aFocusX, aFocusY);
result.mLocalFocusPoint = ParentLayerPoint(aFocus.x, aFocus.y);
return result;
}
@ -76,34 +83,38 @@ CreateMultiTouchInput(MultiTouchInput::MultiTouchType aType, TimeStamp aTime)
template<class InputReceiver>
nsEventStatus
TouchDown(const RefPtr<InputReceiver>& aTarget, int aX, int aY, TimeStamp aTime, uint64_t* aOutInputBlockId = nullptr)
TouchDown(const RefPtr<InputReceiver>& aTarget, const ScreenIntPoint& aPoint,
TimeStamp aTime, uint64_t* aOutInputBlockId = nullptr)
{
MultiTouchInput mti = CreateMultiTouchInput(MultiTouchInput::MULTITOUCH_START, aTime);
mti.mTouches.AppendElement(CreateSingleTouchData(0, aX, aY));
mti.mTouches.AppendElement(CreateSingleTouchData(0, aPoint));
return aTarget->ReceiveInputEvent(mti, nullptr, aOutInputBlockId);
}
template<class InputReceiver>
nsEventStatus
TouchMove(const RefPtr<InputReceiver>& aTarget, int aX, int aY, TimeStamp aTime)
TouchMove(const RefPtr<InputReceiver>& aTarget, const ScreenIntPoint& aPoint,
TimeStamp aTime)
{
MultiTouchInput mti = CreateMultiTouchInput(MultiTouchInput::MULTITOUCH_MOVE, aTime);
mti.mTouches.AppendElement(CreateSingleTouchData(0, aX, aY));
mti.mTouches.AppendElement(CreateSingleTouchData(0, aPoint));
return aTarget->ReceiveInputEvent(mti, nullptr, nullptr);
}
template<class InputReceiver>
nsEventStatus
TouchUp(const RefPtr<InputReceiver>& aTarget, int aX, int aY, TimeStamp aTime)
TouchUp(const RefPtr<InputReceiver>& aTarget, const ScreenIntPoint& aPoint,
TimeStamp aTime)
{
MultiTouchInput mti = CreateMultiTouchInput(MultiTouchInput::MULTITOUCH_END, aTime);
mti.mTouches.AppendElement(CreateSingleTouchData(0, aX, aY));
mti.mTouches.AppendElement(CreateSingleTouchData(0, aPoint));
return aTarget->ReceiveInputEvent(mti, nullptr, nullptr);
}
template<class InputReceiver>
void
Tap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentControllerDelayed* aMcc,
Tap(const RefPtr<InputReceiver>& aTarget, const ScreenIntPoint& aPoint,
MockContentControllerDelayed* aMcc,
TimeDuration aTapLength,
nsEventStatus (*aOutEventStatuses)[2] = nullptr,
uint64_t* aOutInputBlockId = nullptr)
@ -115,7 +126,7 @@ Tap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentControllerD
aOutInputBlockId = &blockId;
}
nsEventStatus status = TouchDown(aTarget, aX, aY, aMcc->Time(), aOutInputBlockId);
nsEventStatus status = TouchDown(aTarget, aPoint, aMcc->Time(), aOutInputBlockId);
if (aOutEventStatuses) {
(*aOutEventStatuses)[0] = status;
}
@ -127,7 +138,7 @@ Tap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentControllerD
SetDefaultAllowedTouchBehavior(aTarget, *aOutInputBlockId);
}
status = TouchUp(aTarget, aX, aY, aMcc->Time());
status = TouchUp(aTarget, aPoint, aMcc->Time());
if (aOutEventStatuses) {
(*aOutEventStatuses)[1] = status;
}
@ -135,11 +146,12 @@ Tap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentControllerD
template<class InputReceiver>
void
TapAndCheckStatus(const RefPtr<InputReceiver>& aTarget, int aX, int aY,
MockContentControllerDelayed* aMcc, TimeDuration aTapLength)
TapAndCheckStatus(const RefPtr<InputReceiver>& aTarget,
const ScreenIntPoint& aPoint, MockContentControllerDelayed* aMcc,
TimeDuration aTapLength)
{
nsEventStatus statuses[2];
Tap(aTarget, aX, aY, aMcc, aTapLength, &statuses);
Tap(aTarget, aPoint, aMcc, aTapLength, &statuses);
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[0]);
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[1]);
}
@ -148,8 +160,8 @@ template<class InputReceiver>
void
Pan(const RefPtr<InputReceiver>& aTarget,
MockContentControllerDelayed* aMcc,
const ScreenPoint& aTouchStart,
const ScreenPoint& aTouchEnd,
const ScreenIntPoint& aTouchStart,
const ScreenIntPoint& aTouchEnd,
bool aKeepFingerDown = false,
nsTArray<uint32_t>* aAllowedTouchBehaviors = nullptr,
nsEventStatus (*aOutEventStatuses)[4] = nullptr,
@ -173,7 +185,9 @@ Pan(const RefPtr<InputReceiver>& aTarget,
}
// Make sure the move is large enough to not be handled as a tap
nsEventStatus status = TouchDown(aTarget, aTouchStart.x, aTouchStart.y + OVERCOME_TOUCH_TOLERANCE, aMcc->Time(), aOutInputBlockId);
nsEventStatus status = TouchDown(aTarget,
ScreenIntPoint(aTouchStart.x, aTouchStart.y + OVERCOME_TOUCH_TOLERANCE),
aMcc->Time(), aOutInputBlockId);
if (aOutEventStatuses) {
(*aOutEventStatuses)[0] = status;
}
@ -190,14 +204,14 @@ Pan(const RefPtr<InputReceiver>& aTarget,
}
}
status = TouchMove(aTarget, aTouchStart.x, aTouchStart.y, aMcc->Time());
status = TouchMove(aTarget, aTouchStart, aMcc->Time());
if (aOutEventStatuses) {
(*aOutEventStatuses)[1] = status;
}
aMcc->AdvanceBy(TIME_BETWEEN_TOUCH_EVENT);
status = TouchMove(aTarget, aTouchEnd.x, aTouchEnd.y, aMcc->Time());
status = TouchMove(aTarget, aTouchEnd, aMcc->Time());
if (aOutEventStatuses) {
(*aOutEventStatuses)[2] = status;
}
@ -205,7 +219,7 @@ Pan(const RefPtr<InputReceiver>& aTarget,
aMcc->AdvanceBy(TIME_BETWEEN_TOUCH_EVENT);
if (!aKeepFingerDown) {
status = TouchUp(aTarget, aTouchEnd.x, aTouchEnd.y, aMcc->Time());
status = TouchUp(aTarget, aTouchEnd, aMcc->Time());
} else {
status = nsEventStatus_eIgnore;
}
@ -232,7 +246,7 @@ Pan(const RefPtr<InputReceiver>& aTarget,
nsEventStatus (*aOutEventStatuses)[4] = nullptr,
uint64_t* aOutInputBlockId = nullptr)
{
::Pan(aTarget, aMcc, ScreenPoint(10, aTouchStartY), ScreenPoint(10, aTouchEndY),
::Pan(aTarget, aMcc, ScreenIntPoint(10, aTouchStartY), ScreenIntPoint(10, aTouchEndY),
aKeepFingerDown, aAllowedTouchBehaviors, aOutEventStatuses, aOutInputBlockId);
}
@ -279,19 +293,20 @@ ApzcPanNoFling(const RefPtr<TestAsyncPanZoomController>& aApzc,
template<class InputReceiver>
void
PinchWithPinchInput(const RefPtr<InputReceiver>& aTarget,
int aFocusX, int aFocusY, int aSecondFocusX, int aSecondFocusY, float aScale,
const ScreenIntPoint& aFocus,
const ScreenIntPoint& aSecondFocus, float aScale,
nsEventStatus (*aOutEventStatuses)[3] = nullptr)
{
nsEventStatus actualStatus = aTarget->ReceiveInputEvent(
CreatePinchGestureInput(PinchGestureInput::PINCHGESTURE_START,
aFocusX, aFocusY, 10.0, 10.0),
aFocus, 10.0, 10.0),
nullptr);
if (aOutEventStatuses) {
(*aOutEventStatuses)[0] = actualStatus;
}
actualStatus = aTarget->ReceiveInputEvent(
CreatePinchGestureInput(PinchGestureInput::PINCHGESTURE_SCALE,
aSecondFocusX, aSecondFocusY, 10.0 * aScale, 10.0),
aSecondFocus, 10.0 * aScale, 10.0),
nullptr);
if (aOutEventStatuses) {
(*aOutEventStatuses)[1] = actualStatus;
@ -300,7 +315,7 @@ PinchWithPinchInput(const RefPtr<InputReceiver>& aTarget,
CreatePinchGestureInput(PinchGestureInput::PINCHGESTURE_END,
// note: negative values here tell APZC
// not to turn the pinch into a pan
aFocusX, aFocusY, -1.0, -1.0),
aFocus, -1.0, -1.0),
nullptr);
if (aOutEventStatuses) {
(*aOutEventStatuses)[2] = actualStatus;
@ -310,11 +325,11 @@ PinchWithPinchInput(const RefPtr<InputReceiver>& aTarget,
template<class InputReceiver>
void
PinchWithPinchInputAndCheckStatus(const RefPtr<InputReceiver>& aTarget,
int aFocusX, int aFocusY, float aScale,
const ScreenIntPoint& aFocus, float aScale,
bool aShouldTriggerPinch)
{
nsEventStatus statuses[3]; // scalebegin, scale, scaleend
PinchWithPinchInput(aTarget, aFocusX, aFocusY, aFocusX, aFocusY, aScale, &statuses);
PinchWithPinchInput(aTarget, aFocus, aFocus, aScale, &statuses);
nsEventStatus expectedStatus = aShouldTriggerPinch
? nsEventStatus_eConsumeNoDefault
@ -326,7 +341,7 @@ PinchWithPinchInputAndCheckStatus(const RefPtr<InputReceiver>& aTarget,
template<class InputReceiver>
void
PinchWithTouchInput(const RefPtr<InputReceiver>& aTarget,
int aFocusX, int aFocusY, float aScale,
const ScreenIntPoint& aFocus, float aScale,
int& inputId,
nsTArray<uint32_t>* aAllowedTouchBehaviors = nullptr,
nsEventStatus (*aOutEventStatuses)[4] = nullptr,
@ -345,8 +360,8 @@ PinchWithTouchInput(const RefPtr<InputReceiver>& aTarget,
}
MultiTouchInput mtiStart = MultiTouchInput(MultiTouchInput::MULTITOUCH_START, 0, TimeStamp(), 0);
mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX, aFocusY));
mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX, aFocusY));
mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus));
mtiStart.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus));
nsEventStatus status = aTarget->ReceiveInputEvent(mtiStart, aOutInputBlockId);
if (aOutEventStatuses) {
(*aOutEventStatuses)[0] = status;
@ -360,24 +375,24 @@ PinchWithTouchInput(const RefPtr<InputReceiver>& aTarget,
}
MultiTouchInput mtiMove1 = MultiTouchInput(MultiTouchInput::MULTITOUCH_MOVE, 0, TimeStamp(), 0);
mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX - pinchLength, aFocusY));
mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX + pinchLength, aFocusY));
mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus.x - pinchLength, aFocus.y));
mtiMove1.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus.x + pinchLength, aFocus.y));
status = aTarget->ReceiveInputEvent(mtiMove1, nullptr);
if (aOutEventStatuses) {
(*aOutEventStatuses)[1] = status;
}
MultiTouchInput mtiMove2 = MultiTouchInput(MultiTouchInput::MULTITOUCH_MOVE, 0, TimeStamp(), 0);
mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX - pinchLengthScaled, aFocusY));
mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX + pinchLengthScaled, aFocusY));
mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus.x - pinchLengthScaled, aFocus.y));
mtiMove2.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus.x + pinchLengthScaled, aFocus.y));
status = aTarget->ReceiveInputEvent(mtiMove2, nullptr);
if (aOutEventStatuses) {
(*aOutEventStatuses)[2] = status;
}
MultiTouchInput mtiEnd = MultiTouchInput(MultiTouchInput::MULTITOUCH_END, 0, TimeStamp(), 0);
mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocusX - pinchLengthScaled, aFocusY));
mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocusX + pinchLengthScaled, aFocusY));
mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId, aFocus.x - pinchLengthScaled, aFocus.y));
mtiEnd.mTouches.AppendElement(CreateSingleTouchData(inputId + 1, aFocus.x + pinchLengthScaled, aFocus.y));
status = aTarget->ReceiveInputEvent(mtiEnd, nullptr);
if (aOutEventStatuses) {
(*aOutEventStatuses)[3] = status;
@ -389,12 +404,12 @@ PinchWithTouchInput(const RefPtr<InputReceiver>& aTarget,
template<class InputReceiver>
void
PinchWithTouchInputAndCheckStatus(const RefPtr<InputReceiver>& aTarget,
int aFocusX, int aFocusY, float aScale,
const ScreenIntPoint& aFocus, float aScale,
int& inputId, bool aShouldTriggerPinch,
nsTArray<uint32_t>* aAllowedTouchBehaviors)
{
nsEventStatus statuses[4]; // down, move, move, up
PinchWithTouchInput(aTarget, aFocusX, aFocusY, aScale, inputId, aAllowedTouchBehaviors, &statuses);
PinchWithTouchInput(aTarget, aFocus, aScale, inputId, aAllowedTouchBehaviors, &statuses);
nsEventStatus expectedMoveStatus = aShouldTriggerPinch
? nsEventStatus_eConsumeDoDefault
@ -406,12 +421,13 @@ PinchWithTouchInputAndCheckStatus(const RefPtr<InputReceiver>& aTarget,
template<class InputReceiver>
void
DoubleTap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentControllerDelayed* aMcc,
DoubleTap(const RefPtr<InputReceiver>& aTarget, const ScreenIntPoint& aPoint,
MockContentControllerDelayed* aMcc,
nsEventStatus (*aOutEventStatuses)[4] = nullptr,
uint64_t (*aOutInputBlockIds)[2] = nullptr)
{
uint64_t blockId;
nsEventStatus status = TouchDown(aTarget, aX, aY, aMcc->Time(), &blockId);
nsEventStatus status = TouchDown(aTarget, aPoint, aMcc->Time(), &blockId);
if (aOutEventStatuses) {
(*aOutEventStatuses)[0] = status;
}
@ -426,12 +442,12 @@ DoubleTap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentContr
SetDefaultAllowedTouchBehavior(aTarget, blockId);
}
status = TouchUp(aTarget, aX, aY, aMcc->Time());
status = TouchUp(aTarget, aPoint, aMcc->Time());
if (aOutEventStatuses) {
(*aOutEventStatuses)[1] = status;
}
aMcc->AdvanceByMillis(10);
status = TouchDown(aTarget, aX, aY, aMcc->Time(), &blockId);
status = TouchDown(aTarget, aPoint, aMcc->Time(), &blockId);
if (aOutEventStatuses) {
(*aOutEventStatuses)[2] = status;
}
@ -444,7 +460,7 @@ DoubleTap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentContr
SetDefaultAllowedTouchBehavior(aTarget, blockId);
}
status = TouchUp(aTarget, aX, aY, aMcc->Time());
status = TouchUp(aTarget, aPoint, aMcc->Time());
if (aOutEventStatuses) {
(*aOutEventStatuses)[3] = status;
}
@ -452,11 +468,12 @@ DoubleTap(const RefPtr<InputReceiver>& aTarget, int aX, int aY, MockContentContr
template<class InputReceiver>
void
DoubleTapAndCheckStatus(const RefPtr<InputReceiver>& aTarget, int aX, int aY,
MockContentControllerDelayed* aMcc, uint64_t (*aOutInputBlockIds)[2] = nullptr)
DoubleTapAndCheckStatus(const RefPtr<InputReceiver>& aTarget,
const ScreenIntPoint& aPoint, MockContentControllerDelayed* aMcc,
uint64_t (*aOutInputBlockIds)[2] = nullptr)
{
nsEventStatus statuses[4];
DoubleTap(aTarget, aX, aY, aMcc, &statuses, aOutInputBlockIds);
DoubleTap(aTarget, aPoint, aMcc, &statuses, aOutInputBlockIds);
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[0]);
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[1]);
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, statuses[2]);

View File

@ -22,7 +22,7 @@ TEST_F(APZCBasicTester, Overzoom) {
EXPECT_CALL(*mcc, RequestContentRepaint(_)).Times(1);
PinchWithPinchInputAndCheckStatus(apzc, 50, 50, 0.5, true);
PinchWithPinchInputAndCheckStatus(apzc, ScreenIntPoint(50, 50), 0.5, true);
fm = apzc->GetFrameMetrics();
EXPECT_EQ(0.8f, fm.GetZoom().ToScaleFactor().scale);
@ -295,8 +295,8 @@ TEST_F(APZCBasicTester, OverScroll_Bug1152051b) {
// to schedule a new one since we're still overscrolled. We don't pan because
// panning can trigger functions that clear the overscroll animation state
// in other ways.
TouchDown(apzc, 10, 10, mcc->Time(), nullptr);
TouchUp(apzc, 10, 10, mcc->Time());
TouchDown(apzc, ScreenIntPoint(10, 10), mcc->Time(), nullptr);
TouchUp(apzc, ScreenIntPoint(10, 10), mcc->Time());
// Sample the second overscroll animation to its end.
// If the ending of the first overscroll animation fails to clear state

View File

@ -186,18 +186,18 @@ TEST_F(APZEventRegionsTester, HitRegionImmediateResponse) {
// Tap in the exposed hit regions of each of the layers once and ensure
// the clicks are dispatched right away
Tap(manager, 10, 10, mcc, tapDuration);
Tap(manager, ScreenIntPoint(10, 10), mcc, tapDuration);
mcc->RunThroughDelayedTasks(); // this runs the tap event
check.Call("Tapped on left");
Tap(manager, 110, 110, mcc, tapDuration);
Tap(manager, ScreenIntPoint(110, 110), mcc, tapDuration);
mcc->RunThroughDelayedTasks(); // this runs the tap event
check.Call("Tapped on bottom");
Tap(manager, 110, 10, mcc, tapDuration);
Tap(manager, ScreenIntPoint(110, 10), mcc, tapDuration);
mcc->RunThroughDelayedTasks(); // this runs the tap event
check.Call("Tapped on root");
// Now tap on the dispatch-to-content region where the layers overlap
Tap(manager, 10, 110, mcc, tapDuration);
Tap(manager, ScreenIntPoint(10, 110), mcc, tapDuration);
mcc->RunThroughDelayedTasks(); // this runs the main-thread timeout
check.Call("Tap pending on d-t-c region");
mcc->RunThroughDelayedTasks(); // this runs the tap event
@ -205,7 +205,7 @@ TEST_F(APZEventRegionsTester, HitRegionImmediateResponse) {
// Now let's do that again, but simulate a main-thread response
uint64_t inputBlockId = 0;
Tap(manager, 10, 110, mcc, tapDuration, nullptr, &inputBlockId);
Tap(manager, ScreenIntPoint(10, 110), mcc, tapDuration, nullptr, &inputBlockId);
nsTArray<ScrollableLayerGuid> targets;
targets.AppendElement(left->GetGuid());
manager->SetTargetAPZC(inputBlockId, targets);
@ -221,7 +221,7 @@ TEST_F(APZEventRegionsTester, HitRegionAccumulatesChildren) {
// content controller, which indicates the input events got routed correctly
// to the APZC.
EXPECT_CALL(*mcc, HandleSingleTap(_, _, rootApzc->GetGuid())).Times(1);
Tap(manager, 10, 160, mcc, TimeDuration::FromMilliseconds(100));
Tap(manager, ScreenIntPoint(10, 160), mcc, TimeDuration::FromMilliseconds(100));
}
TEST_F(APZEventRegionsTester, Obscuration) {
@ -260,7 +260,7 @@ TEST_F(APZEventRegionsTester, Bug1117712) {
// These touch events should hit the dispatch-to-content region of layers[3]
// and so get queued with that APZC as the tentative target.
uint64_t inputBlockId = 0;
Tap(manager, 55, 5, mcc, TimeDuration::FromMilliseconds(100), nullptr, &inputBlockId);
Tap(manager, ScreenIntPoint(55, 5), mcc, TimeDuration::FromMilliseconds(100), nullptr, &inputBlockId);
// But now we tell the APZ that really it hit layers[2], and expect the tap
// to be delivered at the correct coordinates.
EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(55, 5), 0, apzc2->GetGuid())).Times(1);

View File

@ -210,12 +210,12 @@ protected:
// Deliver a tap to abort the fling. Ensure that we get a HandleSingleTap
// call out of it if and only if the fling is slow.
EXPECT_CALL(*mcc, HandleSingleTap(_, 0, apzc->GetGuid())).Times(tapCallsExpected);
Tap(apzc, 10, 10, mcc, 0);
Tap(apzc, ScreenIntPoint(10, 10), mcc, 0);
while (mcc->RunThroughDelayedTasks());
// Deliver another tap, to make sure that taps are flowing properly once
// the fling is aborted.
Tap(apzc, 100, 100, mcc, 0);
Tap(apzc, ScreenIntPoint(100, 100), mcc, 0);
while (mcc->RunThroughDelayedTasks());
// Verify that we didn't advance any further after the fling was aborted, in either case.
@ -247,7 +247,7 @@ protected:
EXPECT_GT(finalPoint.y, point.y);
// Now we put our finger down to stop the fling
TouchDown(apzc, 10, 10, mcc->Time(), &blockId);
TouchDown(apzc, ScreenIntPoint(10, 10), mcc->Time(), &blockId);
// Re-sample to make sure it hasn't moved
apzc->SampleContentTransformForFrame(&viewTransform, point, TimeDuration::FromMilliseconds(10));
@ -264,7 +264,7 @@ protected:
EXPECT_EQ(finalPoint.y, point.y);
// clean up
TouchUp(apzc, 10, 10, mcc->Time());
TouchUp(apzc, ScreenIntPoint(10, 10), mcc->Time());
apzc->AssertStateIsReset();
}
@ -300,7 +300,7 @@ TEST_F(APZCGestureDetectorTester, ShortPress) {
}
check.Call("pre-tap");
TapAndCheckStatus(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(100));
TapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(100));
check.Call("post-tap");
apzc->AssertStateIsReset();
@ -320,7 +320,7 @@ TEST_F(APZCGestureDetectorTester, MediumPress) {
}
check.Call("pre-tap");
TapAndCheckStatus(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(400));
TapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(400));
check.Call("post-tap");
apzc->AssertStateIsReset();
@ -333,7 +333,7 @@ protected:
uint64_t blockId = 0;
nsEventStatus status = TouchDown(apzc, 10, 10, mcc->Time(), &blockId);
nsEventStatus status = TouchDown(apzc, ScreenIntPoint(10, 10), mcc->Time(), &blockId);
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status);
if (gfxPrefs::TouchActionEnabled() && status != nsEventStatus_eConsumeNoDefault) {
@ -376,7 +376,7 @@ protected:
// Finally, simulate lifting the finger. Since the long-press wasn't
// prevent-defaulted, we should get a long-tap-up event.
check.Call("preHandleSingleTap");
status = TouchUp(apzc, 10, 10, mcc->Time());
status = TouchUp(apzc, ScreenIntPoint(10, 10), mcc->Time());
mcc->RunThroughDelayedTasks();
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status);
check.Call("postHandleSingleTap");
@ -394,7 +394,7 @@ protected:
touchEndY = 50;
uint64_t blockId = 0;
nsEventStatus status = TouchDown(apzc, touchX, touchStartY, mcc->Time(), &blockId);
nsEventStatus status = TouchDown(apzc, ScreenIntPoint(touchX, touchStartY), mcc->Time(), &blockId);
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status);
if (gfxPrefs::TouchActionEnabled() && status != nsEventStatus_eConsumeNoDefault) {
@ -436,7 +436,7 @@ protected:
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status);
EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(touchX, touchEndY), 0, apzc->GetGuid())).Times(0);
status = TouchUp(apzc, touchX, touchEndY, mcc->Time());
status = TouchUp(apzc, ScreenIntPoint(touchX, touchEndY), mcc->Time());
EXPECT_EQ(nsEventStatus_eConsumeDoDefault, status);
ParentLayerPoint pointOut;
@ -482,7 +482,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTap) {
EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(1);
uint64_t blockIds[2];
DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds);
DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds);
// responses to the two touchstarts
apzc->ContentReceivedInputBlock(blockIds[0], false);
@ -499,7 +499,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTapNotZoomable) {
EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(0);
uint64_t blockIds[2];
DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds);
DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds);
// responses to the two touchstarts
apzc->ContentReceivedInputBlock(blockIds[0], false);
@ -516,7 +516,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTapPreventDefaultFirstOnly) {
EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(0);
uint64_t blockIds[2];
DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds);
DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds);
// responses to the two touchstarts
apzc->ContentReceivedInputBlock(blockIds[0], true);
@ -533,7 +533,7 @@ TEST_F(APZCGestureDetectorTester, DoubleTapPreventDefaultBoth) {
EXPECT_CALL(*mcc, HandleDoubleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(0);
uint64_t blockIds[2];
DoubleTapAndCheckStatus(apzc, 10, 10, mcc, &blockIds);
DoubleTapAndCheckStatus(apzc, ScreenIntPoint(10, 10), mcc, &blockIds);
// responses to the two touchstarts
apzc->ContentReceivedInputBlock(blockIds[0], true);
@ -549,7 +549,7 @@ TEST_F(APZCGestureDetectorTester, TapFollowedByPinch) {
EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(1);
Tap(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(100));
Tap(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(100));
int inputId = 0;
MultiTouchInput mti;
@ -571,7 +571,7 @@ TEST_F(APZCGestureDetectorTester, TapFollowedByMultipleTouches) {
EXPECT_CALL(*mcc, HandleSingleTap(CSSPoint(10, 10), 0, apzc->GetGuid())).Times(1);
Tap(apzc, 10, 10, mcc, TimeDuration::FromMilliseconds(100));
Tap(apzc, ScreenIntPoint(10, 10), mcc, TimeDuration::FromMilliseconds(100));
int inputId = 0;
MultiTouchInput mti;

View File

@ -465,12 +465,12 @@ TEST_F(APZHitTestingTester, Bug1148350) {
EXPECT_CALL(check, Call("Tapped with interleaved transform"));
}
Tap(manager, 100, 100, mcc, TimeDuration::FromMilliseconds(100));
Tap(manager, ScreenIntPoint(100, 100), mcc, TimeDuration::FromMilliseconds(100));
mcc->RunThroughDelayedTasks();
check.Call("Tapped without transform");
uint64_t blockId;
TouchDown(manager, 100, 100, mcc->Time(), &blockId);
TouchDown(manager, ScreenIntPoint(100, 100), mcc->Time(), &blockId);
if (gfxPrefs::TouchActionEnabled()) {
SetDefaultAllowedTouchBehavior(manager, blockId);
}
@ -480,7 +480,7 @@ TEST_F(APZHitTestingTester, Bug1148350) {
layers[0]->SetBaseTransform(Matrix4x4::Translation(0, 50, 0));
manager->UpdateHitTestingTree(nullptr, root, false, 0, 0);
TouchUp(manager, 100, 100, mcc->Time());
TouchUp(manager, ScreenIntPoint(100, 100), mcc->Time());
mcc->RunThroughDelayedTasks();
check.Call("Tapped with interleaved transform");
}

View File

@ -44,9 +44,11 @@ protected:
int touchInputId = 0;
if (mGestureBehavior == AsyncPanZoomController::USE_GESTURE_DETECTOR) {
PinchWithTouchInputAndCheckStatus(apzc, 250, 300, 1.25, touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors);
PinchWithTouchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 1.25,
touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors);
} else {
PinchWithPinchInputAndCheckStatus(apzc, 250, 300, 1.25, aShouldTriggerPinch);
PinchWithPinchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 1.25,
aShouldTriggerPinch);
}
FrameMetrics fm = apzc->GetFrameMetrics();
@ -72,9 +74,11 @@ protected:
// the visible area of the document in CSS pixels is x=930 y=5 w=50 h=100
if (mGestureBehavior == AsyncPanZoomController::USE_GESTURE_DETECTOR) {
PinchWithTouchInputAndCheckStatus(apzc, 250, 300, 0.5, touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors);
PinchWithTouchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 0.5,
touchInputId, aShouldTriggerPinch, aAllowedTouchBehaviors);
} else {
PinchWithPinchInputAndCheckStatus(apzc, 250, 300, 0.5, aShouldTriggerPinch);
PinchWithPinchInputAndCheckStatus(apzc, ScreenIntPoint(250, 300), 0.5,
aShouldTriggerPinch);
}
fm = apzc->GetFrameMetrics();
@ -142,7 +146,8 @@ TEST_F(APZCPinchGestureDetectorTester, Pinch_PreventDefault) {
int touchInputId = 0;
uint64_t blockId = 0;
PinchWithTouchInput(apzc, 250, 300, 1.25, touchInputId, nullptr, nullptr, &blockId);
PinchWithTouchInput(apzc, ScreenIntPoint(250, 300), 1.25, touchInputId,
nullptr, nullptr, &blockId);
// Send the prevent-default notification for the touch block
apzc->ContentReceivedInputBlock(blockId, true);
@ -162,7 +167,8 @@ TEST_F(APZCPinchTester, Panning_TwoFinger_ZoomDisabled) {
MakeApzcUnzoomable();
nsEventStatus statuses[3]; // scalebegin, scale, scaleend
PinchWithPinchInput(apzc, 250, 350, 200, 300, 10, &statuses);
PinchWithPinchInput(apzc, ScreenIntPoint(250, 350), ScreenIntPoint(200, 300),
10, &statuses);
FrameMetrics fm = apzc->GetFrameMetrics();

View File

@ -8,12 +8,12 @@
#include "APZTestCommon.h"
#include "InputUtils.h"
class APZOverscrollHandoffTester : public APZCTreeManagerTester {
class APZScrollHandoffTester : public APZCTreeManagerTester {
protected:
UniquePtr<ScopedLayerTreeRegistration> registration;
TestAsyncPanZoomController* rootApzc;
void CreateOverscrollHandoffLayerTree1() {
void CreateScrollHandoffLayerTree1() {
const char* layerTreeSyntax = "c(t)";
nsIntRegion layerVisibleRegion[] = {
nsIntRegion(IntRect(0, 0, 100, 100)),
@ -26,9 +26,10 @@ protected:
registration = MakeUnique<ScopedLayerTreeRegistration>(manager, 0, root, mcc);
manager->UpdateHitTestingTree(nullptr, root, false, 0, 0);
rootApzc = ApzcOf(root);
rootApzc->GetFrameMetrics().SetIsRootContent(true); // make root APZC zoomable
}
void CreateOverscrollHandoffLayerTree2() {
void CreateScrollHandoffLayerTree2() {
const char* layerTreeSyntax = "c(c(t))";
nsIntRegion layerVisibleRegion[] = {
nsIntRegion(IntRect(0, 0, 100, 100)),
@ -48,7 +49,7 @@ protected:
rootApzc = ApzcOf(root);
}
void CreateOverscrollHandoffLayerTree3() {
void CreateScrollHandoffLayerTree3() {
const char* layerTreeSyntax = "c(c(t)c(t))";
nsIntRegion layerVisibleRegion[] = {
nsIntRegion(IntRect(0, 0, 100, 100)), // root
@ -126,9 +127,9 @@ protected:
// Here we test that if the processing of a touch block is deferred while we
// wait for content to send a prevent-default message, overscroll is still
// handed off correctly when the block is processed.
TEST_F(APZOverscrollHandoffTester, DeferredInputEventProcessing) {
TEST_F(APZScrollHandoffTester, DeferredInputEventProcessing) {
// Set up the APZC tree.
CreateOverscrollHandoffLayerTree1();
CreateScrollHandoffLayerTree1();
TestAsyncPanZoomController* childApzc = ApzcOf(layers[1]);
@ -154,9 +155,9 @@ TEST_F(APZOverscrollHandoffTester, DeferredInputEventProcessing) {
// one has been queued, overscroll handoff for the first block follows
// the original layer structure while overscroll handoff for the second block
// follows the new layer structure.
TEST_F(APZOverscrollHandoffTester, LayerStructureChangesWhileEventsArePending) {
TEST_F(APZScrollHandoffTester, LayerStructureChangesWhileEventsArePending) {
// Set up an initial APZC tree.
CreateOverscrollHandoffLayerTree1();
CreateScrollHandoffLayerTree1();
TestAsyncPanZoomController* childApzc = ApzcOf(layers[1]);
@ -170,7 +171,7 @@ TEST_F(APZOverscrollHandoffTester, LayerStructureChangesWhileEventsArePending) {
// Modify the APZC tree to insert a new APZC 'middle' into the handoff chain
// between the child and the root.
CreateOverscrollHandoffLayerTree2();
CreateScrollHandoffLayerTree2();
RefPtr<Layer> middle = layers[1];
childApzc->SetWaitForMainThread();
TestAsyncPanZoomController* middleApzc = ApzcOf(middle);
@ -202,11 +203,11 @@ TEST_F(APZOverscrollHandoffTester, LayerStructureChangesWhileEventsArePending) {
// Test that putting a second finger down on an APZC while a down-chain APZC
// is overscrolled doesn't result in being stuck in overscroll.
TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1073250) {
TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1073250) {
// Enable overscrolling.
SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
CreateOverscrollHandoffLayerTree1();
CreateScrollHandoffLayerTree1();
TestAsyncPanZoomController* child = ApzcOf(layers[1]);
@ -239,11 +240,11 @@ TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1073250) {
// This is almost exactly like StuckInOverscroll_Bug1073250, except the
// APZC receiving the input events for the first touch block is the child
// (and thus not the same APZC that overscrolls, which is the parent).
TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1231228) {
TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1231228) {
// Enable overscrolling.
SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
CreateOverscrollHandoffLayerTree1();
CreateScrollHandoffLayerTree1();
TestAsyncPanZoomController* child = ApzcOf(layers[1]);
@ -273,17 +274,95 @@ TEST_F(APZOverscrollHandoffTester, StuckInOverscroll_Bug1231228) {
EXPECT_FALSE(rootApzc->IsOverscrolled());
}
TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1240202a) {
// Enable overscrolling.
SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
CreateScrollHandoffLayerTree1();
TestAsyncPanZoomController* child = ApzcOf(layers[1]);
// Pan, causing the parent APZC to overscroll.
Pan(manager, mcc, 60, 90, true /* keep finger down */);
EXPECT_FALSE(child->IsOverscrolled());
EXPECT_TRUE(rootApzc->IsOverscrolled());
// Lift the finger, triggering an overscroll animation
// (but don't allow it to run).
TouchUp(manager, ScreenIntPoint(10, 90), mcc->Time());
// Put the finger down again, interrupting the animation
// and entering the TOUCHING state.
TouchDown(manager, ScreenIntPoint(10, 90), mcc->Time());
// Lift the finger once again.
TouchUp(manager, ScreenIntPoint(10, 90), mcc->Time());
// Allow any animations to run their course.
child->AdvanceAnimationsUntilEnd();
rootApzc->AdvanceAnimationsUntilEnd();
// Make sure nothing is overscrolled.
EXPECT_FALSE(child->IsOverscrolled());
EXPECT_FALSE(rootApzc->IsOverscrolled());
}
TEST_F(APZScrollHandoffTester, StuckInOverscroll_Bug1240202b) {
// Enable overscrolling.
SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
CreateScrollHandoffLayerTree1();
TestAsyncPanZoomController* child = ApzcOf(layers[1]);
// Pan, causing the parent APZC to overscroll.
Pan(manager, mcc, 60, 90, true /* keep finger down */);
EXPECT_FALSE(child->IsOverscrolled());
EXPECT_TRUE(rootApzc->IsOverscrolled());
// Lift the finger, triggering an overscroll animation
// (but don't allow it to run).
TouchUp(manager, ScreenIntPoint(10, 90), mcc->Time());
// Put the finger down again, interrupting the animation
// and entering the TOUCHING state.
TouchDown(manager, ScreenIntPoint(10, 90), mcc->Time());
// Put a second finger down. Since we're in the TOUCHING state,
// the "are we panned into overscroll" check will fail and we
// will not ignore the second finger, instead entering the
// PINCHING state.
MultiTouchInput secondFingerDown(MultiTouchInput::MULTITOUCH_START, 0, TimeStamp(), 0);
// Use the same touch identifier for the first touch (0) as TouchDown(). (A bit hacky.)
secondFingerDown.mTouches.AppendElement(SingleTouchData(0, ScreenIntPoint(10, 90), ScreenSize(0, 0), 0, 0));
secondFingerDown.mTouches.AppendElement(SingleTouchData(1, ScreenIntPoint(10, 80), ScreenSize(0, 0), 0, 0));
manager->ReceiveInputEvent(secondFingerDown, nullptr, nullptr);
// Release the fingers.
MultiTouchInput fingersUp = secondFingerDown;
fingersUp.mType = MultiTouchInput::MULTITOUCH_END;
manager->ReceiveInputEvent(fingersUp, nullptr, nullptr);
// Allow any animations to run their course.
child->AdvanceAnimationsUntilEnd();
rootApzc->AdvanceAnimationsUntilEnd();
// Make sure nothing is overscrolled.
EXPECT_FALSE(child->IsOverscrolled());
EXPECT_FALSE(rootApzc->IsOverscrolled());
}
// Test that flinging in a direction where one component of the fling goes into
// overscroll but the other doesn't, results in just the one component being
// handed off to the parent, while the original APZC continues flinging in the
// other direction.
TEST_F(APZOverscrollHandoffTester, PartialFlingHandoff) {
CreateOverscrollHandoffLayerTree1();
TEST_F(APZScrollHandoffTester, PartialFlingHandoff) {
CreateScrollHandoffLayerTree1();
// Fling up and to the left. The child APZC has room to scroll up, but not
// to the left, so the horizontal component of the fling should be handed
// off to the parent APZC.
Pan(manager, mcc, ScreenPoint(90, 90), ScreenPoint(55, 55));
Pan(manager, mcc, ScreenIntPoint(90, 90), ScreenIntPoint(55, 55));
RefPtr<TestAsyncPanZoomController> parent = ApzcOf(root);
RefPtr<TestAsyncPanZoomController> child = ApzcOf(layers[1]);
@ -300,9 +379,9 @@ TEST_F(APZOverscrollHandoffTester, PartialFlingHandoff) {
// Here we test that if two flings are happening simultaneously, overscroll
// is handed off correctly for each.
TEST_F(APZOverscrollHandoffTester, SimultaneousFlings) {
TEST_F(APZScrollHandoffTester, SimultaneousFlings) {
// Set up an initial APZC tree.
CreateOverscrollHandoffLayerTree3();
CreateScrollHandoffLayerTree3();
RefPtr<TestAsyncPanZoomController> parent1 = ApzcOf(layers[1]);
RefPtr<TestAsyncPanZoomController> child1 = ApzcOf(layers[2]);
@ -330,7 +409,7 @@ TEST_F(APZOverscrollHandoffTester, SimultaneousFlings) {
parent2->AssertStateIsFling();
}
TEST_F(APZOverscrollHandoffTester, Scrollgrab) {
TEST_F(APZScrollHandoffTester, Scrollgrab) {
// Set up the layer tree
CreateScrollgrabLayerTree();
@ -345,7 +424,7 @@ TEST_F(APZOverscrollHandoffTester, Scrollgrab) {
EXPECT_EQ(15, childApzc->GetFrameMetrics().GetScrollOffset().y);
}
TEST_F(APZOverscrollHandoffTester, ScrollgrabFling) {
TEST_F(APZScrollHandoffTester, ScrollgrabFling) {
// Set up the layer tree
CreateScrollgrabLayerTree();
@ -359,20 +438,20 @@ TEST_F(APZOverscrollHandoffTester, ScrollgrabFling) {
childApzc->AssertStateIsReset();
}
TEST_F(APZOverscrollHandoffTester, ScrollgrabFlingAcceleration1) {
TEST_F(APZScrollHandoffTester, ScrollgrabFlingAcceleration1) {
CreateScrollgrabLayerTree(true /* make parent scrollable */);
TestFlingAcceleration();
}
TEST_F(APZOverscrollHandoffTester, ScrollgrabFlingAcceleration2) {
TEST_F(APZScrollHandoffTester, ScrollgrabFlingAcceleration2) {
CreateScrollgrabLayerTree(false /* do not make parent scrollable */);
TestFlingAcceleration();
}
TEST_F(APZOverscrollHandoffTester, ImmediateHandoffDisallowed_Pan) {
TEST_F(APZScrollHandoffTester, ImmediateHandoffDisallowed_Pan) {
SCOPED_GFX_PREF(APZAllowImmediateHandoff, bool, false);
CreateOverscrollHandoffLayerTree1();
CreateScrollHandoffLayerTree1();
RefPtr<TestAsyncPanZoomController> parentApzc = ApzcOf(root);
RefPtr<TestAsyncPanZoomController> childApzc = ApzcOf(layers[1]);
@ -394,10 +473,10 @@ TEST_F(APZOverscrollHandoffTester, ImmediateHandoffDisallowed_Pan) {
EXPECT_EQ(10, parentApzc->GetFrameMetrics().GetScrollOffset().y);
}
TEST_F(APZOverscrollHandoffTester, ImmediateHandoffDisallowed_Fling) {
TEST_F(APZScrollHandoffTester, ImmediateHandoffDisallowed_Fling) {
SCOPED_GFX_PREF(APZAllowImmediateHandoff, bool, false);
CreateOverscrollHandoffLayerTree1();
CreateScrollHandoffLayerTree1();
RefPtr<TestAsyncPanZoomController> parentApzc = ApzcOf(root);
RefPtr<TestAsyncPanZoomController> childApzc = ApzcOf(layers[1]);

View File

@ -9,9 +9,9 @@ UNIFIED_SOURCES += [
'TestEventRegions.cpp',
'TestGestureDetector.cpp',
'TestHitTesting.cpp',
'TestOverscrollHandoff.cpp',
'TestPanning.cpp',
'TestPinching.cpp',
'TestScrollHandoff.cpp',
'TestTreeManager.cpp',
]

View File

@ -324,6 +324,7 @@ MessageChannel::MessageChannel(MessageListener *aListener)
mDispatchingAsyncMessagePriority(0),
mCurrentTransaction(0),
mTimedOutMessageSeqno(0),
mTimedOutMessagePriority(0),
mRecvdErrors(0),
mRemoteStackDepthGuess(false),
mSawInterruptOutMsg(false),
@ -1039,6 +1040,7 @@ MessageChannel::Send(Message* aMsg, Message* aReply)
}
mTimedOutMessageSeqno = seqno;
mTimedOutMessagePriority = prio;
return false;
}
}
@ -1408,7 +1410,22 @@ MessageChannel::DispatchSyncMessage(const Message& aMsg, Message*& aReply)
MessageChannel*& blockingVar = ShouldBlockScripts() ? gParentProcessBlocker : dummy;
Result rv;
{
if (mTimedOutMessageSeqno && mTimedOutMessagePriority >= prio) {
// If the other side sends a message in response to one of our messages
// that we've timed out, then we reply with an error.
//
// We do this because want to avoid a situation where we process an
// incoming message from the child here while it simultaneously starts
// processing our timed-out CPOW. It's very bad for both sides to
// be processing sync messages concurrently.
//
// The only exception is if the incoming message has urgent priority and
// our timed-out message had only high priority. In that case it's safe
// to process the incoming message because we know that the child won't
// process anything (the child will defer incoming messages when waiting
// for a response to its urgent message).
rv = MsgNotAllowed;
} else {
AutoSetValue<MessageChannel*> blocked(blockingVar, this);
AutoSetValue<bool> sync(mDispatchingSyncMessage, true);
AutoSetValue<int> prioSet(mDispatchingSyncMessagePriority, prio);
@ -2117,8 +2134,10 @@ MessageChannel::CancelCurrentTransaction()
{
MonitorAutoLock lock(*mMonitor);
if (mCurrentTransaction) {
CancelMessage *cancel = new CancelMessage();
cancel->set_transaction_id(mCurrentTransaction);
mLink->SendMessage(cancel);
CancelCurrentTransactionInternal();
mLink->SendMessage(new CancelMessage());
}
}

View File

@ -630,6 +630,7 @@ class MessageChannel : HasResultCodes
// hitting a lot of corner cases with message nesting that we don't really
// care about.
int32_t mTimedOutMessageSeqno;
int mTimedOutMessagePriority;
// If waiting for the reply to a sync out-message, it will be saved here
// on the I/O thread and then read and cleared by the worker thread.

View File

@ -195,6 +195,36 @@ function ArrayStaticSome(list, callbackfn/*, thisArg*/) {
return callFunction(ArraySome, list, callbackfn, T);
}
/* ES6 draft 2016-1-15 22.1.3.25 Array.prototype.sort (comparefn) */
function ArraySort(comparefn) {
/* Step 1. */
var O = ToObject(this);
/* Step 2. */
var len = TO_UINT32(O.length);
/* 22.1.3.25.1 Runtime Semantics: SortCompare( x, y ) */
var wrappedCompareFn = comparefn;
comparefn = function(x, y) {
/* Steps 1-3. */
if (x === undefined) {
if (y === undefined)
return 0;
return 1;
}
if (y === undefined)
return -1;
/* Step 4.a. */
var v = ToNumber(wrappedCompareFn(x, y));
/* Step 4.b-c. */
return v !== v ? 0 : v;
}
return MergeSort(O, len, comparefn);
}
/* ES5 15.4.4.18. */
function ArrayForEach(callbackfn/*, thisArg*/) {
/* Step 1. */

185
js/src/builtin/Sorting.js Normal file
View File

@ -0,0 +1,185 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// We use varying sorts across the self-hosted codebase. All sorts are
// consolidated here to avoid confusion and re-implementation of existing
// algorithms.
// For sorting small arrays.
function InsertionSort(array, from, to, comparefn) {
var item, swap;
for (var i = from + 1; i <= to; i++) {
item = array[i];
for (var j = i - 1; j >= from; j--) {
swap = array[j];
if (comparefn(swap, item) <= 0)
break;
array[j + 1] = swap;
}
array[j + 1] = item;
}
}
function SwapArrayElements(array, i, j) {
var swap = array[i];
array[i] = array[j];
array[j] = swap;
}
// A helper function for MergeSort.
function Merge(array, start, mid, end, lBuffer, rBuffer, comparefn) {
var i, j, k;
var sizeLeft = mid - start + 1;
var sizeRight = end - mid;
// Copy our virtual arrays into separate buffers.
for (i = 0; i < sizeLeft; i++)
lBuffer[i] = array[start + i];
for (j = 0; j < sizeRight; j++)
rBuffer[j] = array[mid + 1 + j];
i = 0;
j = 0;
k = start;
while (i < sizeLeft && j < sizeRight) {
if (comparefn(lBuffer[i], rBuffer[j]) <= 0) {
array[k] = lBuffer[i];
i++;
} else {
array[k] = rBuffer[j];
j++;
}
k++;
}
// Empty out any remaining elements in the buffer.
while (i < sizeLeft) {
array[k] = lBuffer[i];
i++;
k++;
}
while (j < sizeRight) {
array[k] = rBuffer[j];
j++;
k++;
}
}
// Iterative, bottom up, mergesort.
function MergeSort(array, len, comparefn) {
// Insertion sort for small arrays, where "small" is defined by performance
// testing.
if (len < 24) {
InsertionSort(array, 0, len - 1, comparefn);
return array;
}
// We do all of our allocating up front
var lBuffer = new List();
var rBuffer = new List();
var mid, end, endOne, endTwo;
for (var windowSize = 1; windowSize < len; windowSize = 2*windowSize) {
for (var start = 0; start < len - 1; start += 2*windowSize) {
assert(windowSize < len, "The window size is larger than the array length!");
// The midpoint between the two subarrays.
mid = start + windowSize - 1;
// To keep from going over the edge.
end = start + 2 * windowSize - 1;
end = end < len - 1 ? end : len - 1;
// Skip lopsided runs to avoid doing useless work
if (mid > end)
continue;
Merge(array, start, mid, end, lBuffer, rBuffer, comparefn);
}
}
return array;
}
// Rearranges the elements in array[from:to + 1] and returns an index j such that:
// - from < j < to
// - each element in array[from:j] is less than or equal to array[j]
// - each element in array[j + 1:to + 1] greater than or equal to array[j].
function Partition(array, from, to, comparefn) {
assert(to - from >= 3, "Partition will not work with less than three elements");
var medianIndex = (from + to) >> 1;
var i = from + 1;
var j = to;
SwapArrayElements(array, medianIndex, i);
// Median of three pivot selection.
if (comparefn(array[from], array[to]) > 0)
SwapArrayElements(array, from, to);
if (comparefn(array[i], array[to]) > 0)
SwapArrayElements(array, i, to);
if (comparefn(array[from], array[i]) > 0)
SwapArrayElements(array, from, i);
var pivotIndex = i;
// Hoare partition method.
for(;;) {
do i++; while (comparefn(array[i], array[pivotIndex]) < 0);
do j--; while (comparefn(array[j], array[pivotIndex]) > 0);
if (i > j)
break;
SwapArrayElements(array, i, j);
}
SwapArrayElements(array, pivotIndex, j);
return j;
}
// In-place QuickSort.
function QuickSort(array, len, comparefn) {
// Managing the stack ourselves seems to provide a small performance boost.
var stack = new List();
var top = 0;
var start = 0;
var end = len - 1;
var pivotIndex, i, j, leftLen, rightLen;
for (;;) {
// Insertion sort for the first N elements where N is some value
// determined by performance testing.
if (end - start <= 23) {
InsertionSort(array, start, end, comparefn);
if (top < 1)
break;
end = stack[--top];
start = stack[--top];
} else {
pivotIndex = Partition(array, start, end, comparefn);
// Calculate the left and right sub-array lengths and save
// stack space by directly modifying start/end so that
// we sort the longest of the two during the next iteration.
// This reduces the maximum stack size to log2(len).
leftLen = (pivotIndex - 1) - start;
rightLen = end - (pivotIndex + 1);
if (rightLen > leftLen) {
stack[top++] = start;
stack[top++] = pivotIndex - 1;
start = pivotIndex + 1;
} else {
stack[top++] = pivotIndex + 1;
stack[top++] = end;
end = pivotIndex - 1;
}
}
}
return array;
}

View File

@ -939,112 +939,6 @@ function TypedArraySome(callbackfn, thisArg = undefined) {
return false;
}
// For sorting small arrays
function InsertionSort(array, from, to, comparefn) {
var item, swap;
for (var i = from + 1; i <= to; i++) {
item = array[i];
for (var j = i - 1; j >= from; j--) {
swap = array[j];
if (comparefn(swap, item) <= 0)
break
array[j + 1] = swap;
}
array[j + 1] = item;
}
}
function SwapArrayElements(array, i, j) {
var swap = array[i];
array[i] = array[j];
array[j] = swap;
}
// Rearranges the elements in array[from:to + 1] and returns an index j such that:
// - from < j < to
// - each element in array[from:j] is less than or equal to array[j]
// - each element in array[j + 1:to + 1] greater than or equal to array[j].
function Partition(array, from, to, comparefn) {
assert(to - from >= 3,
"Partition will not work with less than three elements");
var median_i = (from + to) >> 1;
var i = from + 1;
var j = to;
SwapArrayElements(array, median_i, i);
// Median of three pivot selection
if (comparefn(array[from], array[to]) > 0)
SwapArrayElements(array, from, to);
if (comparefn(array[i], array[to]) > 0)
SwapArrayElements(array, i, to);
if (comparefn(array[from], array[i]) > 0)
SwapArrayElements(array, from, i);
var pivot_i = i;
// Hoare partition method
for(;;) {
do i++; while (comparefn(array[i], array[pivot_i]) < 0);
do j--; while (comparefn(array[j], array[pivot_i]) > 0);
if (i > j)
break;
SwapArrayElements(array, i, j);
}
SwapArrayElements(array, pivot_i, j);
return j;
}
// In-place QuickSort
function QuickSort(array, len, comparefn) {
// Managing the stack ourselves seems to provide a small performance boost
var stack = new List();
var top = 0;
var start = 0;
var end = len - 1;
var pivot_i, i, j, l_len, r_len;
for (;;) {
// Insertion sort for the first N elements where N is some value
// determined by performance testing.
if (end - start <= 23) {
InsertionSort(array, start, end, comparefn);
if (top < 1)
break;
end = stack[--top];
start = stack[--top];
} else {
pivot_i = Partition(array, start, end, comparefn);
// Calculate the left and right sub-array lengths and save
// stack space by directly modifying start/end so that
// we sort the longest of the two during the next iteration.
// This reduces the maximum stack size to log2(len)
l_len = (pivot_i - 1) - start;
r_len = end - (pivot_i + 1);
if (r_len > l_len) {
stack[top++] = start;
stack[top++] = pivot_i - 1;
start = pivot_i + 1;
} else {
stack[top++] = pivot_i + 1;
stack[top++] = end;
end = pivot_i - 1;
}
}
}
return array;
}
// ES6 draft 20151210 22.2.3.26
// Cases are ordered according to likelihood of occurrence
// as opposed to the ordering in the spec.

View File

@ -2901,24 +2901,6 @@ elif test "$GNU_CC"; then
fi
fi
dnl ========================================================
dnl = Enable DMD
dnl ========================================================
MOZ_ARG_ENABLE_BOOL(dmd,
[ --enable-dmd Enable DMD; also enables jemalloc and replace-malloc],
MOZ_DMD=1,
MOZ_DMD= )
if test "$MOZ_DMD"; then
AC_DEFINE(MOZ_DMD)
if test "${CPU_ARCH}" = "arm"; then
CFLAGS="$CFLAGS -funwind-tables"
CXXFLAGS="$CXXFLAGS -funwind-tables"
fi
fi
dnl ========================================================
dnl = Enable jemalloc
dnl ========================================================

View File

@ -18,11 +18,6 @@
#include "gc/StoreBuffer.h"
#include "gc/Tracer.h"
/* Perform validation of incremental marking in debug builds but not on B2G. */
#if defined(DEBUG) && !defined(MOZ_B2G)
#define JS_GC_MARKING_VALIDATION
#endif
namespace js {
class AutoLockGC;
@ -1182,7 +1177,7 @@ class GCRuntime
js::gc::ZoneList zonesToMaybeCompact;
ArenaHeader* relocatedArenasToRelease;
#ifdef JS_GC_MARKING_VALIDATION
#ifdef JS_GC_ZEAL
js::gc::MarkingValidator* markingValidator;
#endif

View File

@ -2,6 +2,6 @@ try {
[0,0].sort(Array.some)
"".replace(RegExp(), Array.reduce)
} catch (error) {
if (!(error instanceof TypeError && error.message == "0 is not a function"))
if (!(error instanceof TypeError && /^\w is not a function$/.test(error.message)))
throw error;
}
}

View File

@ -1817,6 +1817,43 @@ js::array_sort(JSContext* cx, unsigned argc, Value* vp)
if (!obj)
return false;
ComparatorMatchResult comp = MatchNumericComparator(cx, fval);
if (comp == Match_Failure)
return false;
if (!fval.isNull() && comp == Match_None) {
/*
* Non-optimized user supplied comparators perform much better when
* called from within a self-hosted sorting function.
*/
RootedAtom selfHostedSortAtom(cx, Atomize(cx, "ArraySort", 9));
RootedPropertyName selfHostedSortName(cx, selfHostedSortAtom->asPropertyName());
RootedValue selfHostedSortValue(cx);
if (!GlobalObject::getIntrinsicValue(cx, cx->global(), selfHostedSortName,
&selfHostedSortValue)) {
return false;
}
MOZ_ASSERT(selfHostedSortValue.isObject());
MOZ_ASSERT(selfHostedSortValue.toObject().is<JSFunction>());
InvokeArgs iargs(cx);
if (!iargs.init(1))
return false;
iargs.setCallee(selfHostedSortValue);
iargs.setThis(args.thisv());
iargs[0].set(fval);
if (!Invoke(cx, iargs))
return false;
args.rval().set(iargs.rval());
return true;
}
uint32_t len;
if (!GetLengthProperty(cx, obj, &len))
return false;
@ -1917,27 +1954,13 @@ js::array_sort(JSContext* cx, unsigned argc, Value* vp)
return false;
}
} else {
ComparatorMatchResult comp = MatchNumericComparator(cx, fval);
if (comp == Match_Failure)
return false;
if (comp != Match_None) {
if (allInts) {
JS_ALWAYS_TRUE(vec.resize(n * 2));
if (!MergeSort(vec.begin(), n, vec.begin() + n, SortComparatorInt32s[comp]))
return false;
} else {
if (!SortNumerically(cx, &vec, n, comp))
return false;
}
} else {
FastInvokeGuard fig(cx, fval);
if (allInts) {
JS_ALWAYS_TRUE(vec.resize(n * 2));
if (!MergeSort(vec.begin(), n, vec.begin() + n,
SortComparatorFunction(cx, fval, fig)))
{
if (!MergeSort(vec.begin(), n, vec.begin() + n, SortComparatorInt32s[comp]))
return false;
} else {
if (!SortNumerically(cx, &vec, n, comp))
return false;
}
}
}

View File

@ -1150,7 +1150,7 @@ GCRuntime::GCRuntime(JSRuntime* rt) :
arenasAllocatedDuringSweep(nullptr),
startedCompacting(false),
relocatedArenasToRelease(nullptr),
#ifdef JS_GC_MARKING_VALIDATION
#ifdef JS_GC_ZEAL
markingValidator(nullptr),
#endif
interFrameGC(false),

View File

@ -711,6 +711,7 @@ selfhosted.inputs = [
'builtin/RegExp.js',
'builtin/String.js',
'builtin/Set.js',
'builtin/Sorting.js',
'builtin/TypedArray.js',
'builtin/TypedObject.js',
'builtin/WeakSet.js'

View File

@ -0,0 +1,34 @@
// Note: failed runs should include their "SEED" value in error messages,
// setting "const SEED" to that value will recreate the data from any such run.
const SEED = (Math.random() * 10) + 1;
// Create an array filled with random values, 'size' is the desired length of
// the array and 'seed' is an initial value supplied to a pseudo-random number
// generator.
function genRandomArray(size, seed) {
return Array.from(XorShiftGenerator(seed, size));
}
function SortTest(size, seed) {
let arrOne = genRandomArray(size, seed);
let arrTwo = Array.from(arrOne);
let arrThree = Array.from(arrOne);
// Test numeric comparators against typed array sort.
assertDeepEq(Array.from((Int32Array.from(arrOne)).sort()),
arrTwo.sort((x, y) => (x - y)),
`The arr is not properly sorted! seed: ${SEED}`);
// Use multiplication to kill comparator optimization and trigger
// self-hosted sorting.
assertDeepEq(Array.from((Int32Array.from(arrOne)).sort()),
arrThree.sort((x, y) => (1*x - 1*y)),
`The arr is not properly sorted! seed: ${SEED}`);
}
SortTest(2048, SEED);
SortTest(16, SEED);
SortTest(0, SEED);
if (typeof reportCompare === "function")
reportCompare(true, true);

View File

@ -0,0 +1,33 @@
// Sort every possible permutation of some arrays.
function sortAllPermutations(data, comparefn) {
for (let permutation of Permutations(Array.from(data))) {
let sorted = (Array.from(permutation)).sort(comparefn);
for (let i in sorted) {
assertEq(sorted[i], data[i],
[`[${permutation}].sort(${comparefn})`,
`returned ${sorted}, expected ${data}`].join(' '));
}
}
}
let lex = [2112, "bob", "is", "my", "name"];
let nans = [1/undefined, NaN, Number.NaN]
let num1 = [-11, 0, 0, 100, 101];
let num2 = [-11, 100, 201234.23, undefined, undefined];
sortAllPermutations(lex);
sortAllPermutations(nans);
sortAllPermutations(nans, (x, y) => x - y);
// Multiplication kills comparator optimization.
sortAllPermutations(nans, (x, y) => (1*x - 1*y));
sortAllPermutations(num1, (x, y) => x - y);
sortAllPermutations(num1, (x, y) => (1*x - 1*y));
sortAllPermutations(num2, (x, y) => x - y);
sortAllPermutations(num2, (x, y) => (1*x - 1*y));
if (typeof reportCompare === "function")
reportCompare(true, true);

View File

@ -2,20 +2,6 @@
// setting "const SEED" to that value will recreate the data from any such run.
const SEED = (Math.random() * 10) + 1;
// An xorshift pseudo-random number generator see:
// https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
// This generator will always produce a value, n, where
// 0 <= n <= 255
function *xorShiftGenerator(seed, size) {
let x = seed;
for (let i = 0; i < size; i++) {
x ^= x >> 12;
x ^= x << 25;
x ^= x >> 27;
yield x % 256;
}
}
// Fill up an array buffer with random values and return it in raw form.
// 'size' is the desired length of the view we will place atop the buffer,
// 'width' is the bit-width of the view we plan on placing atop the buffer,
@ -26,7 +12,7 @@ function genRandomArrayBuffer(size, width, seed) {
let len = 0;
// We generate a random number, n, where 0 <= n <= 255 for every space
// available in our buffer.
for (let n of xorShiftGenerator(seed, buf.byteLength))
for (let n of XorShiftGenerator(seed, buf.byteLength))
arr[len++] = n;
return buf;
}

View File

@ -1,22 +1,3 @@
function swapElements(arr, i, j) {
var swap = arr[i];
arr[i] = arr[j];
arr[j] = swap;
}
// Yield every permutation of the elements in some iterable.
function *permutations(items) {
if (items.length == 0) {
yield [];
} else {
for (let i = 0; i < items.length; i++) {
swapElements(items, 0, i);
for (let e of permutations(items.slice(1, items.length)))
yield [items[0]].concat(e);
}
}
}
// Pre-sorted test data, it's important that these arrays remain in ascending order.
let i32 = [-2147483648, -320000, -244000, 2147483647]
let u32 = [0, 987632, 4294967295]
@ -35,7 +16,7 @@ let nans = [1/undefined, NaN, Number.NaN]
// Sort every possible permutation of an arrays
function sortAllPermutations(dataType, testData) {
let reference = new dataType(testData);
for (let permutation of permutations(testData))
for (let permutation of Permutations(testData))
assertDeepEq((new dataType(permutation)).sort(), reference);
}

View File

@ -372,6 +372,40 @@ function enterFunc (funcName)
callStack.push(funcName);
}
/*
* An xorshift pseudo-random number generator see:
* https://en.wikipedia.org/wiki/Xorshift#xorshift.2A
* This generator will always produce a value, n, where
* 0 <= n <= 255
*/
function *XorShiftGenerator(seed, size) {
let x = seed;
for (let i = 0; i < size; i++) {
x ^= x >> 12;
x ^= x << 25;
x ^= x >> 27;
yield x % 256;
}
}
/*
* Yield every permutation of the elements in some iterable.
*/
function *Permutations(items) {
if (items.length == 0) {
yield [];
} else {
let swap;
for (let i = 0; i < items.length; i++) {
swap = items[0];
items[0] = items[i];
items[i] = swap;
for (let e of Permutations(items.slice(1, items.length)))
yield [items[0]].concat(e);
}
}
}
/*
* Pops the top funcName off the call stack. funcName is optional, and can be
* used to check push-pop balance.

View File

@ -1066,7 +1066,8 @@ SavedStacks::clear()
size_t
SavedStacks::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)
{
return frames.sizeOfExcludingThis(mallocSizeOf);
return frames.sizeOfExcludingThis(mallocSizeOf) +
pcLocationMap.sizeOfExcludingThis(mallocSizeOf);
}
bool

View File

@ -1856,7 +1856,7 @@ struct FramesWithDepth
{}
bool operator<(const FramesWithDepth& aOther) const {
if (mDepth != aOther.mDepth) {
if (!FuzzyEqual(mDepth, aOther.mDepth, 0.1f)) {
// We want to sort so that the shallowest item (highest depth value) is first
return mDepth > aOther.mDepth;
}
@ -1919,13 +1919,16 @@ void nsDisplayList::HitTest(nsDisplayListBuilder* aBuilder, const nsRect& aRect,
bool snap;
nsRect r = item->GetBounds(aBuilder, &snap).Intersect(aRect);
auto itemType = item->GetType();
bool alwaysIntersect =
bool same3DContext =
(itemType == nsDisplayItem::TYPE_TRANSFORM &&
static_cast<nsDisplayTransform*>(item)->IsParticipating3DContext()) ||
(itemType == nsDisplayItem::TYPE_PERSPECTIVE &&
static_cast<nsDisplayPerspective*>(item)->Frame()->Extend3DContext());
if (alwaysIntersect &&
if (same3DContext &&
!static_cast<nsDisplayTransform*>(item)->IsLeafOf3DContext()) {
if (!item->GetClip().MayIntersect(aRect)) {
continue;
}
nsAutoTArray<nsIFrame*, 1> neverUsed;
// Start gethering leaves of the 3D rendering context, and
// append leaves at the end of mItemBuffer. Leaves are
@ -1936,7 +1939,7 @@ void nsDisplayList::HitTest(nsDisplayListBuilder* aBuilder, const nsRect& aRect,
i = aState->mItemBuffer.Length();
continue;
}
if (alwaysIntersect || item->GetClip().MayIntersect(r)) {
if (same3DContext || item->GetClip().MayIntersect(r)) {
nsAutoTArray<nsIFrame*, 16> outFrames;
item->HitTest(aBuilder, aRect, aState, &outFrames);

View File

@ -5,6 +5,7 @@ support-files =
Ahem.ttf
border_radius_hit_testing_iframe.html
preserve3d_sorting_hit_testing_iframe.html
preserve3d_sorting_hit_testing2_iframe.html
image_rgrg-256x256.png
image_rrgg-256x256.png
bug369950-subframe.xml
@ -41,6 +42,7 @@ support-files =
multi-range-script-select-ref.html
[test_preserve3d_sorting_hit_testing.html]
[test_preserve3d_sorting_hit_testing2.html]
[test_after_paint_pref.html]
[test_bug993936.html]
skip-if = e10s

View File

@ -0,0 +1,97 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style>
body {
background: #333;
overflow: hidden;
}
::-webkit-scrollbar {
display: none;
}
div {
margin: 0;
padding: 0;
-webkit-transform-style: preserve-3d;
transform-style: preserve-3d;
position: absolute;
}
#container {
font-family: UnifrakturMaguntia;
width: 350px;
height: 70%;
max-height: 500px;
-webkit-perspective: 5000px;
perspective: 5000px;
transform: translate(-50%, -50%) rotateY(20deg);
}
#container p {
padding: 0 5px 0 5px;
}
#container hr {
margin: 0 20px 0 20px;
}
#content {
-ms-overflow-style: none;
overflow: -moz-scrollbars-none;
overflow-y: scroll;
height: 100%;
background: #fefee0;
}
#lorem {
font-size: 7em;
float: left;
color: red;
border: 1px solid black;
margin-right: 5px;
}
#tree {
float: right;
width: 10em;
height: 10em;
border: 1px solid black;
margin: 0 5px 0 2px;
}
</style>
</head>
<body>
<div id="container">
<div id="content">
<p>
<span id="lorem">L</span>orem ipsum dolor sit amet, consectetur adipiscing elit. Integer sagittis nisi urna, a ultrices est facilisis a. Morbi porttitor vulputate odio, eu lacinia nisi. Suspendisse felis sapien, facilisis nec ex in, blandit tincidunt tellus. Sed at commodo nunc. In nibh lectus, facilisis nec magna nec, bibendum egestas nunc. Nam varius lorem in fringilla cursus. Integer dignissim, lectus vitae sodales molestie, libero purus malesuada arcu, vitae facilisis nunc dolor non mi. In nunc tortor, tempor non pharetra vitae, mattis a purus. Nulla rhoncus vitae metus vel ornare. Nunc augue dui, suscipit ac urna vel, consectetur volutpat ipsum. Nunc ac nulla ut enim laoreet placerat. Sed luctus aliquam purus, sollicitudin blandit dui blandit id. Aenean venenatis risus dolor, at viverra urna aliquam non. Morbi sit amet pellentesque justo, eget viverra augue.
</p>
<p>&nbsp;&nbsp;&nbsp;&nbsp;
Praesent posuere ultricies orci sit amet lacinia. Suspendisse lacinia scelerisque risus, sodales egestas turpis cursus sed. Proin sed mollis mauris, vitae ultricies nibh. Nulla bibendum leo a mauris luctus, sit amet iaculis arcu blandit. Etiam pulvinar, odio et rutrum egestas, elit mi maximus ex, id elementum est tortor id turpis. Duis rhoncus et lorem vel maximus. Aenean at justo sagittis, aliquet eros eget, iaculis magna. Nam non orci congue, dapibus dui eget, sagittis nisl. Phasellus venenatis id est et tempor. Aenean condimentum tristique nibh sit amet varius. Vestibulum et lectus quis eros dapibus consectetur nec auctor dolor. Sed euismod eu felis aliquam fermentum. Donec lacinia fringilla erat, at eleifend velit tempus at.
</p>
<hr>
<p>&nbsp;&nbsp;&nbsp;&nbsp;
Cras justo turpis, vulputate eget venenatis sit amet, bibendum quis dolor. Cras at interdum libero. Quisque convallis rutrum magna in ultrices. Donec ut magna dolor. Mauris pulvinar ut sapien a posuere. Sed nisi elit, tincidunt vitae magna eu, dapibus suscipit purus. Maecenas tincidunt mollis eros et dictum. Duis est nulla, rhoncus tincidunt velit at, venenatis elementum velit. Phasellus lobortis sem tellus, id sodales quam dignissim nec. Phasellus pulvinar metus ex, nec gravida nunc elementum vel. Ut mattis varius fringilla. Phasellus imperdiet sit amet risus a elementum. Donec pulvinar ante sit amet massa blandit ullamcorper. Donec vitae malesuada nisl, et laoreet sem.
</p>
<p>&nbsp;&nbsp;&nbsp;&nbsp;
Suspendisse bibendum elit blandit arcu vulputate, nec hendrerit dui vehicula. Vestibulum porta finibus odio vitae maximus. Duis in vulputate risus. Donec mattis turpis ex, vitae semper sem ultrices eu. Aliquam in ex blandit erat ultrices sollicitudin. Vestibulum porta nisl in porttitor rutrum. Integer consectetur porttitor ligula facilisis malesuada. Proin placerat enim sed lacus commodo mollis nec eu arcu. In hac habitasse platea dictumst. Curabitur luctus est risus, sit amet fringilla nunc condimentum vel. Integer mauris lorem, molestie ut nisl sit amet, pellentesque mollis quam. Aliquam posuere purus non nisi molestie semper.
</p>
<hr>
<p>&nbsp;&nbsp;&nbsp;&nbsp;
Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris facilisis nisi diam, eu pulvinar ex sollicitudin sed. Maecenas sed eros id quam suscipit ultricies ut tincidunt quam. Donec iaculis, justo at fringilla laoreet, quam sem dapibus urna, ut eleifend odio eros et ligula. Proin urna ante, condimentum vitae sollicitudin sit amet, egestas ac nunc. Aenean sapien velit, porta a eros quis, iaculis dignissim felis. Suspendisse mollis vulputate metus vel interdum. Aliquam hendrerit elementum erat, sit amet commodo velit suscipit et. Sed semper sem at mauris rhoncus, id efficitur arcu molestie. Nam feugiat lorem pretium, consectetur felis et, fringilla dolor. Nunc dui velit, elementum non hendrerit nec, sagittis vitae odio. Curabitur nec leo tincidunt, pellentesque metus at, condimentum risus.
</p>
</div>
</div>
</body>
<script type="application/javascript">
window.onload = function() {
opener.child_opened(document);
};
</script>
</html>

View File

@ -0,0 +1,40 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=1241394
-->
<head>
<title>Test for Bug 1241394</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
</head>
<body onload="run()">
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1241394">Mozilla Bug 1241394</a>
<pre id="test">
<script type="application/javascript">
/** Test for Bug 1241394 **/
SimpleTest.waitForExplicitFinish();
function run() {
var win;
window.child_opened = function(doc) {
var container= doc.getElementById("container");
isnot(doc.elementFromPoint(60, 50).id, container.id,
"point (50, 50): should not hit background");
win.close();
SimpleTest.finish();
}
win = window.open("preserve3d_sorting_hit_testing2_iframe.html");
}
</script>
</pre>
</body>
</html>

View File

@ -65,7 +65,7 @@ static void nr_ice_socket_readable_cb(NR_SOCKET s, int how, void *cb_arg)
NR_ASYNC_WAIT(s,how,nr_ice_socket_readable_cb,cb_arg);
if(r=nr_socket_recvfrom(sock->sock,buf,sizeof(buf),&len_s,0,&addr)){
if (r != R_WOULDBLOCK && (sock->type == NR_ICE_SOCKET_TYPE_STREAM_TURN)) {
if (r != R_WOULDBLOCK && (sock->type != NR_ICE_SOCKET_TYPE_DGRAM)) {
/* Report this error upward. Bug 946423 */
r_log(LOG_ICE,LOG_ERR,"ICE(%s): Error %d on reliable socket. Abandoning.",sock->ctx->label, r);
NR_ASYNC_CANCEL(s, NR_ASYNC_WAIT_READ);

View File

@ -47,13 +47,19 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endif
#define END_HEADERS CRLF CRLF
typedef enum {
PROXY_TUNNEL_NONE=0,
PROXY_TUNNEL_REQUESTED,
PROXY_TUNNEL_CONNECTED,
PROXY_TUNNEL_CLOSED,
PROXY_TUNNEL_FAILED
} nr_socket_proxy_tunnel_state;
typedef struct nr_socket_proxy_tunnel_ {
nr_proxy_tunnel_config *config;
nr_socket *inner;
nr_transport_addr remote_addr;
int connect_requested;
int connect_answered;
int connect_failed;
nr_socket_proxy_tunnel_state state;
char buffer[MAX_HTTP_CONNECT_BUFFER_SIZE];
size_t buffered_bytes;
void *resolver_handle;
@ -143,7 +149,7 @@ static int send_http_connect(nr_socket_proxy_tunnel *sock)
ABORT(R_IO_ERROR);
}
sock->connect_requested = 1;
sock->state = PROXY_TUNNEL_REQUESTED;
_status = 0;
abort:
@ -173,6 +179,9 @@ static int parse_http_response(char *begin, char *end, unsigned int *status)
// len should *never* be greater than nr_socket_proxy_tunnel::buffered_bytes.
// Which in turn should never be greater nr_socket_proxy_tunnel::buffer size.
assert(len <= MAX_HTTP_CONNECT_BUFFER_SIZE);
if (len > MAX_HTTP_CONNECT_BUFFER_SIZE) {
return R_BAD_DATA;
}
memcpy(response, begin, len);
response[len] = '\0';
@ -249,6 +258,10 @@ static int nr_socket_proxy_tunnel_resolved_cb(void *obj, nr_transport_addr *prox
else {
r_log(LOG_GENERIC,LOG_WARNING,"Failed to resolve proxy %s",
sock->config->proxy_host);
/* TODO: Mozilla bug 1241758: because of the callback the return value goes
* nowhere, so we can't mark the candidate as failed, so everything depends
* on the overall timeouts in this case. */
sock->state = PROXY_TUNNEL_FAILED;
ABORT(R_NOT_FOUND);
}
@ -336,13 +349,20 @@ int nr_socket_proxy_tunnel_write(void *obj, const void *msg, size_t len,
r_log(LOG_GENERIC,LOG_DEBUG,"nr_socket_proxy_tunnel_write");
if (!sock->connect_requested) {
if (sock->state >= PROXY_TUNNEL_CLOSED) {
return R_FAILED;
}
if (sock->state == PROXY_TUNNEL_NONE) {
if ((r=send_http_connect(sock))) {
ABORT(r);
}
}
/* TODO (bug 1117984): we cannot assume it's safe to write until we receive a response. */
if (sock->state != PROXY_TUNNEL_CONNECTED) {
return R_WOULDBLOCK;
}
if ((r=nr_socket_write(sock->inner, msg, len, written, 0))) {
ABORT(r);
}
@ -366,11 +386,11 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen,
*len = 0;
if (sock->connect_failed) {
if (sock->state >= PROXY_TUNNEL_CLOSED) {
return R_FAILED;
}
if (sock->connect_answered) {
if (sock->state == PROXY_TUNNEL_CONNECTED) {
return nr_socket_read(sock->inner, buf, maxlen, len, 0);
}
@ -391,8 +411,6 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen,
sock->buffered_bytes += bytes_read;
if (http_term = find_http_terminator(sock->buffer, sock->buffered_bytes)) {
sock->connect_answered = 1;
if ((r = parse_http_response(sock->buffer, http_term, &http_status))) {
ABORT(r);
}
@ -404,6 +422,8 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen,
ABORT(R_FAILED);
}
sock->state = PROXY_TUNNEL_CONNECTED;
ptr = http_term + strlen(END_HEADERS);
pending = sock->buffered_bytes - (ptr - sock->buffer);
@ -420,7 +440,7 @@ int nr_socket_proxy_tunnel_read(void *obj, void * restrict buf, size_t maxlen,
_status=0;
abort:
if (_status && _status != R_WOULDBLOCK) {
sock->connect_failed = 1;
sock->state = PROXY_TUNNEL_FAILED;
}
return(_status);
}
@ -436,6 +456,8 @@ int nr_socket_proxy_tunnel_close(void *obj)
sock->resolver_handle = 0;
}
sock->state = PROXY_TUNNEL_CLOSED;
return nr_socket_close(sock->inner);
}

View File

@ -524,6 +524,10 @@ static void nr_socket_buffered_stun_writable_cb(NR_SOCKET s, int how, void *arg)
int r,_status;
nr_p_buf *n1, *n2;
if (sock->read_state == NR_ICE_SOCKET_READ_FAILED) {
ABORT(R_FAILED);
}
/* Try to flush */
STAILQ_FOREACH_SAFE(n1, &sock->pending_writes, entry, n2) {
size_t written = 0;

View File

@ -49,19 +49,4 @@ void TransportLayer::SetState(State state, const char *file, unsigned line) {
}
}
nsresult TransportLayer::RunOnThread(nsIRunnable *event) {
if (target_) {
nsIThread *thr;
DebugOnly<nsresult> rv = NS_GetCurrentThread(&thr);
MOZ_ASSERT(NS_SUCCEEDED(rv));
if (target_ != thr) {
return target_->Dispatch(event, NS_DISPATCH_SYNC);
}
}
return event->Run();
}
} // close namespace

View File

@ -57,10 +57,6 @@ class TransportLayer : public sigslot::has_slots<> {
// Downward interface
TransportLayer *downward() { return downward_; }
// Dispatch a call onto our thread (or run on the same thread if
// thread is not set). This is always synchronous.
nsresult RunOnThread(nsIRunnable *event);
// Get the state
State state() const { return state_; }
// Must be implemented by derived classes

View File

@ -2088,16 +2088,7 @@ JsepSessionImpl::SetupDefaultCodecs()
));
// Supported video codecs.
JsepVideoCodecDescription* vp8 = new JsepVideoCodecDescription(
"120",
"VP8",
90000
);
// Defaults for mandatory params
vp8->mConstraints.maxFs = 12288; // Enough for 2048x1536
vp8->mConstraints.maxFps = 60;
mSupportedCodecs.values.push_back(vp8);
// Note: order here implies priority for building offers!
JsepVideoCodecDescription* vp9 = new JsepVideoCodecDescription(
"121",
"VP9",
@ -2108,6 +2099,16 @@ JsepSessionImpl::SetupDefaultCodecs()
vp9->mConstraints.maxFps = 60;
mSupportedCodecs.values.push_back(vp9);
JsepVideoCodecDescription* vp8 = new JsepVideoCodecDescription(
"120",
"VP8",
90000
);
// Defaults for mandatory params
vp8->mConstraints.maxFs = 12288; // Enough for 2048x1536
vp8->mConstraints.maxFps = 60;
mSupportedCodecs.values.push_back(vp8);
JsepVideoCodecDescription* h264_1 = new JsepVideoCodecDescription(
"126",
"H264",

View File

@ -2656,8 +2656,8 @@ TEST_F(JsepSessionTest, ValidateOfferedCodecParams)
ASSERT_EQ(SdpDirectionAttribute::kSendrecv, video_attrs.GetDirection());
ASSERT_EQ(4U, video_section.GetFormats().size());
ASSERT_EQ("120", video_section.GetFormats()[0]);
ASSERT_EQ("121", video_section.GetFormats()[1]);
ASSERT_EQ("121", video_section.GetFormats()[0]);
ASSERT_EQ("120", video_section.GetFormats()[1]);
ASSERT_EQ("126", video_section.GetFormats()[2]);
ASSERT_EQ("97", video_section.GetFormats()[3]);
@ -2790,22 +2790,24 @@ TEST_F(JsepSessionTest, ValidateAnsweredCodecParams)
// TODO(bug 1099351): Once fixed, this stuff will need to be updated.
ASSERT_EQ(1U, video_section.GetFormats().size());
// ASSERT_EQ(3U, video_section.GetFormats().size());
ASSERT_EQ("120", video_section.GetFormats()[0]);
ASSERT_EQ("121", video_section.GetFormats()[0]);
// ASSERT_EQ("126", video_section.GetFormats()[1]);
// ASSERT_EQ("97", video_section.GetFormats()[2]);
// Validate rtpmap
ASSERT_TRUE(video_attrs.HasAttribute(SdpAttribute::kRtpmapAttribute));
auto& rtpmaps = video_attrs.GetRtpmap();
ASSERT_TRUE(rtpmaps.HasEntry("120"));
ASSERT_TRUE(rtpmaps.HasEntry("121"));
// ASSERT_TRUE(rtpmaps.HasEntry("126"));
// ASSERT_TRUE(rtpmaps.HasEntry("97"));
auto& vp8_entry = rtpmaps.GetEntry("120");
//auto& vp8_entry = rtpmaps.GetEntry("120");
auto& vp9_entry = rtpmaps.GetEntry("121");
// auto& h264_1_entry = rtpmaps.GetEntry("126");
// auto& h264_0_entry = rtpmaps.GetEntry("97");
ASSERT_EQ("VP8", vp8_entry.name);
//ASSERT_EQ("VP8", vp8_entry.name);
ASSERT_EQ("VP9", vp9_entry.name);
// ASSERT_EQ("H264", h264_1_entry.name);
// ASSERT_EQ("H264", h264_0_entry.name);
@ -2816,17 +2818,17 @@ TEST_F(JsepSessionTest, ValidateAnsweredCodecParams)
ASSERT_EQ(1U, fmtps.size());
// ASSERT_EQ(3U, fmtps.size());
// VP8
ASSERT_EQ("120", fmtps[0].format);
// VP9
ASSERT_EQ("121", fmtps[0].format);
ASSERT_TRUE(!!fmtps[0].parameters);
ASSERT_EQ(SdpRtpmapAttributeList::kVP8, fmtps[0].parameters->codec_type);
ASSERT_EQ(SdpRtpmapAttributeList::kVP9, fmtps[0].parameters->codec_type);
auto& parsed_vp8_params =
auto& parsed_vp9_params =
*static_cast<const SdpFmtpAttributeList::VP8Parameters*>(
fmtps[0].parameters.get());
ASSERT_EQ((uint32_t)12288, parsed_vp8_params.max_fs);
ASSERT_EQ((uint32_t)60, parsed_vp8_params.max_fr);
ASSERT_EQ((uint32_t)12288, parsed_vp9_params.max_fs);
ASSERT_EQ((uint32_t)60, parsed_vp9_params.max_fr);
SetLocalAnswer(answer);

View File

@ -29,8 +29,13 @@
'target_name': 'rtc_base_approved',
'type': 'static_library',
'sources': [
'bitbuffer.cc',
'bitbuffer.h',
'buffer.cc',
'buffer.h',
'checks.cc',
'checks.h',
'constructormagic.h',
'event.cc',
'event.h',
'event_tracer.cc',

View File

@ -0,0 +1,296 @@
/*
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/bitbuffer.h"
#include <algorithm>
#include <limits>
#include "webrtc/base/checks.h"
namespace {
// Returns the lowest (right-most) |bit_count| bits in |byte|.
uint8_t LowestBits(uint8_t byte, size_t bit_count) {
RTC_DCHECK_LE(bit_count, 8u);
return byte & ((1 << bit_count) - 1);
}
// Returns the highest (left-most) |bit_count| bits in |byte|, shifted to the
// lowest bits (to the right).
uint8_t HighestBits(uint8_t byte, size_t bit_count) {
RTC_DCHECK_LE(bit_count, 8u);
uint8_t shift = 8 - static_cast<uint8_t>(bit_count);
uint8_t mask = 0xFF << shift;
return (byte & mask) >> shift;
}
// Returns the highest byte of |val| in a uint8_t.
uint8_t HighestByte(uint64_t val) {
return static_cast<uint8_t>(val >> 56);
}
// Returns the result of writing partial data from |source|, of
// |source_bit_count| size in the highest bits, to |target| at
// |target_bit_offset| from the highest bit.
uint8_t WritePartialByte(uint8_t source,
size_t source_bit_count,
uint8_t target,
size_t target_bit_offset) {
RTC_DCHECK(target_bit_offset < 8);
RTC_DCHECK(source_bit_count < 9);
RTC_DCHECK(source_bit_count <= (8 - target_bit_offset));
// Generate a mask for just the bits we're going to overwrite, so:
uint8_t mask =
// The number of bits we want, in the most significant bits...
static_cast<uint8_t>(0xFF << (8 - source_bit_count))
// ...shifted over to the target offset from the most signficant bit.
>> target_bit_offset;
// We want the target, with the bits we'll overwrite masked off, or'ed with
// the bits from the source we want.
return (target & ~mask) | (source >> target_bit_offset);
}
// Counts the number of bits used in the binary representation of val.
size_t CountBits(uint64_t val) {
size_t bit_count = 0;
while (val != 0) {
bit_count++;
val >>= 1;
}
return bit_count;
}
} // namespace
namespace rtc {
BitBuffer::BitBuffer(const uint8_t* bytes, size_t byte_count)
: bytes_(bytes), byte_count_(byte_count), byte_offset_(), bit_offset_() {
RTC_DCHECK(static_cast<uint64_t>(byte_count_) <=
std::numeric_limits<uint32_t>::max());
}
uint64_t BitBuffer::RemainingBitCount() const {
return (static_cast<uint64_t>(byte_count_) - byte_offset_) * 8 - bit_offset_;
}
bool BitBuffer::ReadUInt8(uint8_t* val) {
uint32_t bit_val;
if (!ReadBits(&bit_val, sizeof(uint8_t) * 8)) {
return false;
}
RTC_DCHECK(bit_val <= std::numeric_limits<uint8_t>::max());
*val = static_cast<uint8_t>(bit_val);
return true;
}
bool BitBuffer::ReadUInt16(uint16_t* val) {
uint32_t bit_val;
if (!ReadBits(&bit_val, sizeof(uint16_t) * 8)) {
return false;
}
RTC_DCHECK(bit_val <= std::numeric_limits<uint16_t>::max());
*val = static_cast<uint16_t>(bit_val);
return true;
}
bool BitBuffer::ReadUInt32(uint32_t* val) {
return ReadBits(val, sizeof(uint32_t) * 8);
}
bool BitBuffer::PeekBits(uint32_t* val, size_t bit_count) {
if (!val || bit_count > RemainingBitCount() || bit_count > 32) {
return false;
}
const uint8_t* bytes = bytes_ + byte_offset_;
size_t remaining_bits_in_current_byte = 8 - bit_offset_;
uint32_t bits = LowestBits(*bytes++, remaining_bits_in_current_byte);
// If we're reading fewer bits than what's left in the current byte, just
// return the portion of this byte that we need.
if (bit_count < remaining_bits_in_current_byte) {
*val = HighestBits(bits, bit_offset_ + bit_count);
return true;
}
// Otherwise, subtract what we've read from the bit count and read as many
// full bytes as we can into bits.
bit_count -= remaining_bits_in_current_byte;
while (bit_count >= 8) {
bits = (bits << 8) | *bytes++;
bit_count -= 8;
}
// Whatever we have left is smaller than a byte, so grab just the bits we need
// and shift them into the lowest bits.
if (bit_count > 0) {
bits <<= bit_count;
bits |= HighestBits(*bytes, bit_count);
}
*val = bits;
return true;
}
bool BitBuffer::ReadBits(uint32_t* val, size_t bit_count) {
return PeekBits(val, bit_count) && ConsumeBits(bit_count);
}
bool BitBuffer::ConsumeBytes(size_t byte_count) {
return ConsumeBits(byte_count * 8);
}
bool BitBuffer::ConsumeBits(size_t bit_count) {
if (bit_count > RemainingBitCount()) {
return false;
}
byte_offset_ += (bit_offset_ + bit_count) / 8;
bit_offset_ = (bit_offset_ + bit_count) % 8;
return true;
}
bool BitBuffer::ReadExponentialGolomb(uint32_t* val) {
if (!val) {
return false;
}
// Store off the current byte/bit offset, in case we want to restore them due
// to a failed parse.
size_t original_byte_offset = byte_offset_;
size_t original_bit_offset = bit_offset_;
// Count the number of leading 0 bits by peeking/consuming them one at a time.
size_t zero_bit_count = 0;
uint32_t peeked_bit;
while (PeekBits(&peeked_bit, 1) && peeked_bit == 0) {
zero_bit_count++;
ConsumeBits(1);
}
// We should either be at the end of the stream, or the next bit should be 1.
RTC_DCHECK(!PeekBits(&peeked_bit, 1) || peeked_bit == 1);
// The bit count of the value is the number of zeros + 1. Make sure that many
// bits fits in a uint32_t and that we have enough bits left for it, and then
// read the value.
size_t value_bit_count = zero_bit_count + 1;
if (value_bit_count > 32 || !ReadBits(val, value_bit_count)) {
RTC_CHECK(Seek(original_byte_offset, original_bit_offset));
return false;
}
*val -= 1;
return true;
}
bool BitBuffer::ReadSignedExponentialGolomb(int32_t* val) {
uint32_t unsigned_val;
if (!ReadExponentialGolomb(&unsigned_val)) {
return false;
}
if ((unsigned_val & 1) == 0) {
*val = -static_cast<int32_t>(unsigned_val / 2);
} else {
*val = (unsigned_val + 1) / 2;
}
return true;
}
void BitBuffer::GetCurrentOffset(
size_t* out_byte_offset, size_t* out_bit_offset) {
RTC_CHECK(out_byte_offset != NULL);
RTC_CHECK(out_bit_offset != NULL);
*out_byte_offset = byte_offset_;
*out_bit_offset = bit_offset_;
}
bool BitBuffer::Seek(size_t byte_offset, size_t bit_offset) {
if (byte_offset > byte_count_ || bit_offset > 7 ||
(byte_offset == byte_count_ && bit_offset > 0)) {
return false;
}
byte_offset_ = byte_offset;
bit_offset_ = bit_offset;
return true;
}
BitBufferWriter::BitBufferWriter(uint8_t* bytes, size_t byte_count)
: BitBuffer(bytes, byte_count), writable_bytes_(bytes) {
}
bool BitBufferWriter::WriteUInt8(uint8_t val) {
return WriteBits(val, sizeof(uint8_t) * 8);
}
bool BitBufferWriter::WriteUInt16(uint16_t val) {
return WriteBits(val, sizeof(uint16_t) * 8);
}
bool BitBufferWriter::WriteUInt32(uint32_t val) {
return WriteBits(val, sizeof(uint32_t) * 8);
}
bool BitBufferWriter::WriteBits(uint64_t val, size_t bit_count) {
if (bit_count > RemainingBitCount()) {
return false;
}
size_t total_bits = bit_count;
// For simplicity, push the bits we want to read from val to the highest bits.
val <<= (sizeof(uint64_t) * 8 - bit_count);
uint8_t* bytes = writable_bytes_ + byte_offset_;
// The first byte is relatively special; the bit offset to write to may put us
// in the middle of the byte, and the total bit count to write may require we
// save the bits at the end of the byte.
size_t remaining_bits_in_current_byte = 8 - bit_offset_;
size_t bits_in_first_byte =
std::min(bit_count, remaining_bits_in_current_byte);
*bytes = WritePartialByte(
HighestByte(val), bits_in_first_byte, *bytes, bit_offset_);
if (bit_count <= remaining_bits_in_current_byte) {
// Nothing left to write, so quit early.
return ConsumeBits(total_bits);
}
// Subtract what we've written from the bit count, shift it off the value, and
// write the remaining full bytes.
val <<= bits_in_first_byte;
bytes++;
bit_count -= bits_in_first_byte;
while (bit_count >= 8) {
*bytes++ = HighestByte(val);
val <<= 8;
bit_count -= 8;
}
// Last byte may also be partial, so write the remaining bits from the top of
// val.
if (bit_count > 0) {
*bytes = WritePartialByte(HighestByte(val), bit_count, *bytes, 0);
}
// All done! Consume the bits we've written.
return ConsumeBits(total_bits);
}
bool BitBufferWriter::WriteExponentialGolomb(uint32_t val) {
// We don't support reading UINT32_MAX, because it doesn't fit in a uint32_t
// when encoded, so don't support writing it either.
if (val == std::numeric_limits<uint32_t>::max()) {
return false;
}
uint64_t val_to_encode = static_cast<uint64_t>(val) + 1;
// We need to write CountBits(val+1) 0s and then val+1. Since val (as a
// uint64_t) has leading zeros, we can just write the total golomb encoded
// size worth of bits, knowing the value will appear last.
return WriteBits(val_to_encode, CountBits(val_to_encode) * 2 - 1);
}
} // namespace rtc

View File

@ -0,0 +1,122 @@
/*
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_BASE_BITBUFFER_H_
#define WEBRTC_BASE_BITBUFFER_H_
#include <stdint.h> // For integer types.
#include <stddef.h> // For size_t.
#include "webrtc/base/constructormagic.h"
namespace rtc {
// A class, similar to ByteBuffer, that can parse bit-sized data out of a set of
// bytes. Has a similar API to ByteBuffer, plus methods for reading bit-sized
// and exponential golomb encoded data. For a writable version, use
// BitBufferWriter. Unlike ByteBuffer, this class doesn't make a copy of the
// source bytes, so it can be used on read-only data.
// Sizes/counts specify bits/bytes, for clarity.
// Byte order is assumed big-endian/network.
class BitBuffer {
public:
BitBuffer(const uint8_t* bytes, size_t byte_count);
// Gets the current offset, in bytes/bits, from the start of the buffer. The
// bit offset is the offset into the current byte, in the range [0,7].
void GetCurrentOffset(size_t* out_byte_offset, size_t* out_bit_offset);
// The remaining bits in the byte buffer.
uint64_t RemainingBitCount() const;
// Reads byte-sized values from the buffer. Returns false if there isn't
// enough data left for the specified type.
bool ReadUInt8(uint8_t* val);
bool ReadUInt16(uint16_t* val);
bool ReadUInt32(uint32_t* val);
// Reads bit-sized values from the buffer. Returns false if there isn't enough
// data left for the specified bit count..
bool ReadBits(uint32_t* val, size_t bit_count);
// Peeks bit-sized values from the buffer. Returns false if there isn't enough
// data left for the specified number of bits. Doesn't move the current
// offset.
bool PeekBits(uint32_t* val, size_t bit_count);
// Reads the exponential golomb encoded value at the current offset.
// Exponential golomb values are encoded as:
// 1) x = source val + 1
// 2) In binary, write [countbits(x) - 1] 0s, then x
// To decode, we count the number of leading 0 bits, read that many + 1 bits,
// and increment the result by 1.
// Returns false if there isn't enough data left for the specified type, or if
// the value wouldn't fit in a uint32_t.
bool ReadExponentialGolomb(uint32_t* val);
// Reads signed exponential golomb values at the current offset. Signed
// exponential golomb values are just the unsigned values mapped to the
// sequence 0, 1, -1, 2, -2, etc. in order.
bool ReadSignedExponentialGolomb(int32_t* val);
// Moves current position |byte_count| bytes forward. Returns false if
// there aren't enough bytes left in the buffer.
bool ConsumeBytes(size_t byte_count);
// Moves current position |bit_count| bits forward. Returns false if
// there aren't enough bits left in the buffer.
bool ConsumeBits(size_t bit_count);
// Sets the current offset to the provied byte/bit offsets. The bit
// offset is from the given byte, in the range [0,7].
bool Seek(size_t byte_offset, size_t bit_offset);
protected:
const uint8_t* const bytes_;
// The total size of |bytes_|.
size_t byte_count_;
// The current offset, in bytes, from the start of |bytes_|.
size_t byte_offset_;
// The current offset, in bits, into the current byte.
size_t bit_offset_;
RTC_DISALLOW_COPY_AND_ASSIGN(BitBuffer);
};
// A BitBuffer API for write operations. Supports symmetric write APIs to the
// reading APIs of BitBuffer. Note that the read/write offset is shared with the
// BitBuffer API, so both reading and writing will consume bytes/bits.
class BitBufferWriter : public BitBuffer {
public:
// Constructs a bit buffer for the writable buffer of |bytes|.
BitBufferWriter(uint8_t* bytes, size_t byte_count);
// Writes byte-sized values from the buffer. Returns false if there isn't
// enough data left for the specified type.
bool WriteUInt8(uint8_t val);
bool WriteUInt16(uint16_t val);
bool WriteUInt32(uint32_t val);
// Writes bit-sized values to the buffer. Returns false if there isn't enough
// room left for the specified number of bits.
bool WriteBits(uint64_t val, size_t bit_count);
// Writes the exponential golomb encoded version of the supplied value.
// Returns false if there isn't enough room left for the value.
bool WriteExponentialGolomb(uint32_t val);
private:
// The buffer, as a writable array.
uint8_t* const writable_bytes_;
RTC_DISALLOW_COPY_AND_ASSIGN(BitBufferWriter);
};
} // namespace rtc
#endif // WEBRTC_BASE_BITBUFFER_H_

View File

@ -0,0 +1,330 @@
/*
* Copyright 2015 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/base/arraysize.h"
#include "webrtc/base/bitbuffer.h"
#include "webrtc/base/bytebuffer.h"
#include "webrtc/base/common.h"
#include "webrtc/base/gunit.h"
namespace rtc {
TEST(BitBufferTest, ConsumeBits) {
const uint8_t bytes[64] = {0};
BitBuffer buffer(bytes, 32);
uint64_t total_bits = 32 * 8;
EXPECT_EQ(total_bits, buffer.RemainingBitCount());
EXPECT_TRUE(buffer.ConsumeBits(3));
total_bits -= 3;
EXPECT_EQ(total_bits, buffer.RemainingBitCount());
EXPECT_TRUE(buffer.ConsumeBits(3));
total_bits -= 3;
EXPECT_EQ(total_bits, buffer.RemainingBitCount());
EXPECT_TRUE(buffer.ConsumeBits(15));
total_bits -= 15;
EXPECT_EQ(total_bits, buffer.RemainingBitCount());
EXPECT_TRUE(buffer.ConsumeBits(37));
total_bits -= 37;
EXPECT_EQ(total_bits, buffer.RemainingBitCount());
EXPECT_FALSE(buffer.ConsumeBits(32 * 8));
EXPECT_EQ(total_bits, buffer.RemainingBitCount());
}
TEST(BitBufferTest, ReadBytesAligned) {
const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23, 0x45, 0x67, 0x89};
uint8_t val8;
uint16_t val16;
uint32_t val32;
BitBuffer buffer(bytes, 8);
EXPECT_TRUE(buffer.ReadUInt8(&val8));
EXPECT_EQ(0x0Au, val8);
EXPECT_TRUE(buffer.ReadUInt8(&val8));
EXPECT_EQ(0xBCu, val8);
EXPECT_TRUE(buffer.ReadUInt16(&val16));
EXPECT_EQ(0xDEF1u, val16);
EXPECT_TRUE(buffer.ReadUInt32(&val32));
EXPECT_EQ(0x23456789u, val32);
}
TEST(BitBufferTest, ReadBytesOffset4) {
const uint8_t bytes[] = {0x0A, 0xBC, 0xDE, 0xF1, 0x23,
0x45, 0x67, 0x89, 0x0A};
uint8_t val8;
uint16_t val16;
uint32_t val32;
BitBuffer buffer(bytes, 9);
EXPECT_TRUE(buffer.ConsumeBits(4));
EXPECT_TRUE(buffer.ReadUInt8(&val8));
EXPECT_EQ(0xABu, val8);
EXPECT_TRUE(buffer.ReadUInt8(&val8));
EXPECT_EQ(0xCDu, val8);
EXPECT_TRUE(buffer.ReadUInt16(&val16));
EXPECT_EQ(0xEF12u, val16);
EXPECT_TRUE(buffer.ReadUInt32(&val32));
EXPECT_EQ(0x34567890u, val32);
}
TEST(BitBufferTest, ReadBytesOffset3) {
// The pattern we'll check against is counting down from 0b1111. It looks
// weird here because it's all offset by 3.
// Byte pattern is:
// 56701234
// 0b00011111,
// 0b11011011,
// 0b10010111,
// 0b01010011,
// 0b00001110,
// 0b11001010,
// 0b10000110,
// 0b01000010
// xxxxx <-- last 5 bits unused.
// The bytes. It almost looks like counting down by two at a time, except the
// jump at 5->3->0, since that's when the high bit is turned off.
const uint8_t bytes[] = {0x1F, 0xDB, 0x97, 0x53, 0x0E, 0xCA, 0x86, 0x42};
uint8_t val8;
uint16_t val16;
uint32_t val32;
BitBuffer buffer(bytes, 8);
EXPECT_TRUE(buffer.ConsumeBits(3));
EXPECT_TRUE(buffer.ReadUInt8(&val8));
EXPECT_EQ(0xFEu, val8);
EXPECT_TRUE(buffer.ReadUInt16(&val16));
EXPECT_EQ(0xDCBAu, val16);
EXPECT_TRUE(buffer.ReadUInt32(&val32));
EXPECT_EQ(0x98765432u, val32);
// 5 bits left unread. Not enough to read a uint8_t.
EXPECT_EQ(5u, buffer.RemainingBitCount());
EXPECT_FALSE(buffer.ReadUInt8(&val8));
}
TEST(BitBufferTest, ReadBits) {
// Bit values are:
// 0b01001101,
// 0b00110010
const uint8_t bytes[] = {0x4D, 0x32};
uint32_t val;
BitBuffer buffer(bytes, 2);
EXPECT_TRUE(buffer.ReadBits(&val, 3));
// 0b010
EXPECT_EQ(0x2u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 2));
// 0b01
EXPECT_EQ(0x1u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 7));
// 0b1010011
EXPECT_EQ(0x53u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 2));
// 0b00
EXPECT_EQ(0x0u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 1));
// 0b1
EXPECT_EQ(0x1u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 1));
// 0b0
EXPECT_EQ(0x0u, val);
EXPECT_FALSE(buffer.ReadBits(&val, 1));
}
TEST(BitBufferTest, SetOffsetValues) {
uint8_t bytes[4] = {0};
BitBufferWriter buffer(bytes, 4);
size_t byte_offset, bit_offset;
// Bit offsets are [0,7].
EXPECT_TRUE(buffer.Seek(0, 0));
EXPECT_TRUE(buffer.Seek(0, 7));
buffer.GetCurrentOffset(&byte_offset, &bit_offset);
EXPECT_EQ(0u, byte_offset);
EXPECT_EQ(7u, bit_offset);
EXPECT_FALSE(buffer.Seek(0, 8));
buffer.GetCurrentOffset(&byte_offset, &bit_offset);
EXPECT_EQ(0u, byte_offset);
EXPECT_EQ(7u, bit_offset);
// Byte offsets are [0,length]. At byte offset length, the bit offset must be
// 0.
EXPECT_TRUE(buffer.Seek(0, 0));
EXPECT_TRUE(buffer.Seek(2, 4));
buffer.GetCurrentOffset(&byte_offset, &bit_offset);
EXPECT_EQ(2u, byte_offset);
EXPECT_EQ(4u, bit_offset);
EXPECT_TRUE(buffer.Seek(4, 0));
EXPECT_FALSE(buffer.Seek(5, 0));
buffer.GetCurrentOffset(&byte_offset, &bit_offset);
EXPECT_EQ(4u, byte_offset);
EXPECT_EQ(0u, bit_offset);
EXPECT_FALSE(buffer.Seek(4, 1));
// Disable death test on Android because it relies on fork() and doesn't play
// nicely.
#if defined(GTEST_HAS_DEATH_TEST)
#if !defined(WEBRTC_ANDROID)
// Passing a NULL out parameter is death.
EXPECT_DEATH(buffer.GetCurrentOffset(&byte_offset, NULL), "");
#endif
#endif
}
uint64_t GolombEncoded(uint32_t val) {
val++;
uint32_t bit_counter = val;
uint64_t bit_count = 0;
while (bit_counter > 0) {
bit_count++;
bit_counter >>= 1;
}
return static_cast<uint64_t>(val) << (64 - (bit_count * 2 - 1));
}
TEST(BitBufferTest, GolombUint32Values) {
ByteBuffer byteBuffer;
byteBuffer.Resize(16);
BitBuffer buffer(reinterpret_cast<const uint8_t*>(byteBuffer.Data()),
byteBuffer.Capacity());
// Test over the uint32_t range with a large enough step that the test doesn't
// take forever. Around 20,000 iterations should do.
const int kStep = std::numeric_limits<uint32_t>::max() / 20000;
for (uint32_t i = 0; i < std::numeric_limits<uint32_t>::max() - kStep;
i += kStep) {
uint64_t encoded_val = GolombEncoded(i);
byteBuffer.Clear();
byteBuffer.WriteUInt64(encoded_val);
uint32_t decoded_val;
EXPECT_TRUE(buffer.Seek(0, 0));
EXPECT_TRUE(buffer.ReadExponentialGolomb(&decoded_val));
EXPECT_EQ(i, decoded_val);
}
}
TEST(BitBufferTest, SignedGolombValues) {
uint8_t golomb_bits[] = {
0x80, // 1
0x40, // 010
0x60, // 011
0x20, // 00100
0x38, // 00111
};
int32_t expected[] = {0, 1, -1, 2, -3};
for (size_t i = 0; i < sizeof(golomb_bits); ++i) {
BitBuffer buffer(&golomb_bits[i], 1);
int32_t decoded_val;
ASSERT_TRUE(buffer.ReadSignedExponentialGolomb(&decoded_val));
EXPECT_EQ(expected[i], decoded_val)
<< "Mismatch in expected/decoded value for golomb_bits[" << i
<< "]: " << static_cast<int>(golomb_bits[i]);
}
}
TEST(BitBufferTest, NoGolombOverread) {
const uint8_t bytes[] = {0x00, 0xFF, 0xFF};
// Make sure the bit buffer correctly enforces byte length on golomb reads.
// If it didn't, the above buffer would be valid at 3 bytes.
BitBuffer buffer(bytes, 1);
uint32_t decoded_val;
EXPECT_FALSE(buffer.ReadExponentialGolomb(&decoded_val));
BitBuffer longer_buffer(bytes, 2);
EXPECT_FALSE(longer_buffer.ReadExponentialGolomb(&decoded_val));
BitBuffer longest_buffer(bytes, 3);
EXPECT_TRUE(longest_buffer.ReadExponentialGolomb(&decoded_val));
// Golomb should have read 9 bits, so 0x01FF, and since it is golomb, the
// result is 0x01FF - 1 = 0x01FE.
EXPECT_EQ(0x01FEu, decoded_val);
}
TEST(BitBufferWriterTest, SymmetricReadWrite) {
uint8_t bytes[16] = {0};
BitBufferWriter buffer(bytes, 4);
// Write some bit data at various sizes.
EXPECT_TRUE(buffer.WriteBits(0x2u, 3));
EXPECT_TRUE(buffer.WriteBits(0x1u, 2));
EXPECT_TRUE(buffer.WriteBits(0x53u, 7));
EXPECT_TRUE(buffer.WriteBits(0x0u, 2));
EXPECT_TRUE(buffer.WriteBits(0x1u, 1));
EXPECT_TRUE(buffer.WriteBits(0x1ABCDu, 17));
// That should be all that fits in the buffer.
EXPECT_FALSE(buffer.WriteBits(1, 1));
EXPECT_TRUE(buffer.Seek(0, 0));
uint32_t val;
EXPECT_TRUE(buffer.ReadBits(&val, 3));
EXPECT_EQ(0x2u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 2));
EXPECT_EQ(0x1u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 7));
EXPECT_EQ(0x53u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 2));
EXPECT_EQ(0x0u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 1));
EXPECT_EQ(0x1u, val);
EXPECT_TRUE(buffer.ReadBits(&val, 17));
EXPECT_EQ(0x1ABCDu, val);
// And there should be nothing left.
EXPECT_FALSE(buffer.ReadBits(&val, 1));
}
TEST(BitBufferWriterTest, SymmetricBytesMisaligned) {
uint8_t bytes[16] = {0};
BitBufferWriter buffer(bytes, 16);
// Offset 3, to get things misaligned.
EXPECT_TRUE(buffer.ConsumeBits(3));
EXPECT_TRUE(buffer.WriteUInt8(0x12u));
EXPECT_TRUE(buffer.WriteUInt16(0x3456u));
EXPECT_TRUE(buffer.WriteUInt32(0x789ABCDEu));
buffer.Seek(0, 3);
uint8_t val8;
uint16_t val16;
uint32_t val32;
EXPECT_TRUE(buffer.ReadUInt8(&val8));
EXPECT_EQ(0x12u, val8);
EXPECT_TRUE(buffer.ReadUInt16(&val16));
EXPECT_EQ(0x3456u, val16);
EXPECT_TRUE(buffer.ReadUInt32(&val32));
EXPECT_EQ(0x789ABCDEu, val32);
}
TEST(BitBufferWriterTest, SymmetricGolomb) {
char test_string[] = "my precious";
uint8_t bytes[64] = {0};
BitBufferWriter buffer(bytes, 64);
for (size_t i = 0; i < arraysize(test_string); ++i) {
EXPECT_TRUE(buffer.WriteExponentialGolomb(test_string[i]));
}
buffer.Seek(0, 0);
for (size_t i = 0; i < arraysize(test_string); ++i) {
uint32_t val;
EXPECT_TRUE(buffer.ReadExponentialGolomb(&val));
EXPECT_LE(val, std::numeric_limits<uint8_t>::max());
EXPECT_EQ(test_string[i], static_cast<char>(val));
}
}
TEST(BitBufferWriterTest, WriteClearsBits) {
uint8_t bytes[] = {0xFF, 0xFF};
BitBufferWriter buffer(bytes, 2);
EXPECT_TRUE(buffer.ConsumeBits(3));
EXPECT_TRUE(buffer.WriteBits(0, 1));
EXPECT_EQ(0xEFu, bytes[0]);
EXPECT_TRUE(buffer.WriteBits(0, 3));
EXPECT_EQ(0xE1u, bytes[0]);
EXPECT_TRUE(buffer.WriteBits(0, 2));
EXPECT_EQ(0xE0u, bytes[0]);
EXPECT_EQ(0x7F, bytes[1]);
}
} // namespace rtc

View File

@ -13,7 +13,8 @@
#include <string.h>
#include "webrtc/base/common.h"
// common.h isn't in the rtc_approved list
//#include "webrtc/base/common.h"
#include "webrtc/base/scoped_ptr.h"
namespace rtc {
@ -52,12 +53,12 @@ class Buffer {
}
void SetData(const void* data, size_t size) {
ASSERT(data != NULL || size == 0);
assert(data != NULL || size == 0);
SetSize(size);
memcpy(data_.get(), data, size);
}
void AppendData(const void* data, size_t size) {
ASSERT(data != NULL || size == 0);
assert(data != NULL || size == 0);
size_t old_size = size_;
SetSize(size_ + size);
memcpy(data_.get() + old_size, data, size);
@ -76,7 +77,7 @@ class Buffer {
}
void TransferTo(Buffer* buf) {
ASSERT(buf != NULL);
assert(buf != NULL);
buf->data_.reset(data_.release());
buf->size_ = size_;
buf->capacity_ = capacity_;

View File

@ -91,6 +91,8 @@ namespace rtc {
LAZY_STREAM(rtc::FatalMessage(__FILE__, __LINE__).stream(), !(condition)) \
<< "Check failed: " #condition << std::endl << "# "
#define RTC_CHECK(condition) CHECK(condition)
// Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below.
//
@ -185,6 +187,36 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define DCHECK_GT(v1, v2) EAT_STREAM_PARAMETERS((v1) > (v2))
#endif
#define RTC_CHECK_EQ(val1, val2) CHECK_OP(EQ, ==, val1, val2)
#define RTC_CHECK_NE(val1, val2) CHECK_OP(NE, !=, val1, val2)
#define RTC_CHECK_LE(val1, val2) CHECK_OP(LE, <=, val1, val2)
#define RTC_CHECK_LT(val1, val2) CHECK_OP(LT, < , val1, val2)
#define RTC_CHECK_GE(val1, val2) CHECK_OP(GE, >=, val1, val2)
#define RTC_CHECK_GT(val1, val2) CHECK_OP(GT, > , val1, val2)
// The RTC_DCHECK macro is equivalent to RTC_CHECK except that it only generates
// code in debug builds. It does reference the condition parameter in all cases,
// though, so callers won't risk getting warnings about unused variables.
#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON))
#define RTC_DCHECK_IS_ON 1
#define RTC_DCHECK(condition) CHECK(condition)
#define RTC_DCHECK_EQ(v1, v2) CHECK_EQ(v1, v2)
#define RTC_DCHECK_NE(v1, v2) CHECK_NE(v1, v2)
#define RTC_DCHECK_LE(v1, v2) CHECK_LE(v1, v2)
#define RTC_DCHECK_LT(v1, v2) CHECK_LT(v1, v2)
#define RTC_DCHECK_GE(v1, v2) CHECK_GE(v1, v2)
#define RTC_DCHECK_GT(v1, v2) CHECK_GT(v1, v2)
#else
#define RTC_DCHECK_IS_ON 0
#define RTC_DCHECK(condition) EAT_STREAM_PARAMETERS(condition)
#define RTC_DCHECK_EQ(v1, v2) EAT_STREAM_PARAMETERS((v1) == (v2))
#define RTC_DCHECK_NE(v1, v2) EAT_STREAM_PARAMETERS((v1) != (v2))
#define RTC_DCHECK_LE(v1, v2) EAT_STREAM_PARAMETERS((v1) <= (v2))
#define RTC_DCHECK_LT(v1, v2) EAT_STREAM_PARAMETERS((v1) < (v2))
#define RTC_DCHECK_GE(v1, v2) EAT_STREAM_PARAMETERS((v1) >= (v2))
#define RTC_DCHECK_GT(v1, v2) EAT_STREAM_PARAMETERS((v1) > (v2))
#endif
// This is identical to LogMessageVoidify but in name.
class FatalMessageVoidify {
public:

View File

@ -17,6 +17,8 @@
#undef DISALLOW_ASSIGN
#define DISALLOW_ASSIGN(TypeName) \
void operator=(const TypeName&)
#define RTC_DISALLOW_ASSIGN(TypeName) \
void operator=(const TypeName&) = delete
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class.
@ -24,6 +26,9 @@
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
DISALLOW_ASSIGN(TypeName)
#define RTC_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
RTC_DISALLOW_ASSIGN(TypeName)
// Alternative, less-accurate legacy name.
#undef DISALLOW_EVIL_CONSTRUCTORS
@ -40,6 +45,9 @@
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
DISALLOW_EVIL_CONSTRUCTORS(TypeName)
#define RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName() = delete; \
RTC_DISALLOW_COPY_AND_ASSIGN(TypeName)
#endif // WEBRTC_BASE_CONSTRUCTORMAGIC_H_

View File

@ -639,6 +639,9 @@ struct VideoCodecVP9 {
bool frameDroppingOn;
int keyFrameInterval;
bool adaptiveQpMode;
bool automaticResizeOn;
unsigned char numberOfSpatialLayers;
bool flexibleMode;
};
// H264 specific.

View File

@ -15,6 +15,7 @@
#include <string.h> // memcpy
#include <algorithm>
#include <limits>
#include "webrtc/base/constructormagic.h"
#include "webrtc/common_types.h"
@ -31,8 +32,16 @@ struct RTPAudioHeader {
};
const int16_t kNoPictureId = -1;
const int16_t kMaxOneBytePictureId = 0x7F; // 7 bits
const int16_t kMaxTwoBytePictureId = 0x7FFF; // 15 bits
const int16_t kNoTl0PicIdx = -1;
const uint8_t kNoTemporalIdx = 0xFF;
const uint8_t kNoSpatialIdx = 0xFF;
const uint8_t kNoGofIdx = 0xFF;
const uint8_t kNumVp9Buffers = 8;
const size_t kMaxVp9RefPics = 3;
const size_t kMaxVp9FramesInGof = 0xFF; // 8 bits
const size_t kMaxVp9NumberOfSpatialLayers = 8;
const int kNoKeyIdx = -1;
struct RTPVideoHeaderVP8 {
@ -61,37 +70,164 @@ struct RTPVideoHeaderVP8 {
// in a VP8 partition. Otherwise false
};
enum TemporalStructureMode {
kTemporalStructureMode1, // 1 temporal layer structure - i.e., IPPP...
kTemporalStructureMode2, // 2 temporal layers 0-1-0-1...
kTemporalStructureMode3 // 3 temporal layers 0-2-1-2-0-2-1-2...
};
struct GofInfoVP9 {
void SetGofInfoVP9(TemporalStructureMode tm) {
switch (tm) {
case kTemporalStructureMode1:
num_frames_in_gof = 1;
temporal_idx[0] = 0;
temporal_up_switch[0] = false;
num_ref_pics[0] = 1;
pid_diff[0][0] = 1;
break;
case kTemporalStructureMode2:
num_frames_in_gof = 2;
temporal_idx[0] = 0;
temporal_up_switch[0] = false;
num_ref_pics[0] = 1;
pid_diff[0][0] = 2;
temporal_idx[1] = 1;
temporal_up_switch[1] = true;
num_ref_pics[1] = 1;
pid_diff[1][0] = 1;
break;
case kTemporalStructureMode3:
num_frames_in_gof = 4;
temporal_idx[0] = 0;
temporal_up_switch[0] = false;
num_ref_pics[0] = 1;
pid_diff[0][0] = 4;
temporal_idx[1] = 2;
temporal_up_switch[1] = true;
num_ref_pics[1] = 1;
pid_diff[1][0] = 1;
temporal_idx[2] = 1;
temporal_up_switch[2] = true;
num_ref_pics[2] = 1;
pid_diff[2][0] = 2;
temporal_idx[3] = 2;
temporal_up_switch[3] = false;
num_ref_pics[3] = 2;
pid_diff[3][0] = 1;
pid_diff[3][1] = 2;
break;
default:
assert(false);
}
}
void CopyGofInfoVP9(const GofInfoVP9& src) {
num_frames_in_gof = src.num_frames_in_gof;
for (size_t i = 0; i < num_frames_in_gof; ++i) {
temporal_idx[i] = src.temporal_idx[i];
temporal_up_switch[i] = src.temporal_up_switch[i];
num_ref_pics[i] = src.num_ref_pics[i];
for (uint8_t r = 0; r < num_ref_pics[i]; ++r) {
pid_diff[i][r] = src.pid_diff[i][r];
}
}
}
size_t num_frames_in_gof;
uint8_t temporal_idx[kMaxVp9FramesInGof];
bool temporal_up_switch[kMaxVp9FramesInGof];
uint8_t num_ref_pics[kMaxVp9FramesInGof];
uint8_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
};
struct RTPVideoHeaderVP9 {
void InitRTPVideoHeaderVP9() {
inter_pic_predicted = false;
flexible_mode = false;
beginning_of_frame = false;
end_of_frame = false;
ss_data_available = false;
picture_id = kNoPictureId;
max_picture_id = kMaxTwoBytePictureId;
tl0_pic_idx = kNoTl0PicIdx;
temporal_idx = kNoTemporalIdx;
spatial_idx = kNoSpatialIdx;
temporal_up_switch = false;
inter_layer_predicted = false;
gof_idx = kNoGofIdx;
num_ref_pics = 0;
num_spatial_layers = 1;
}
bool inter_pic_predicted; // This layer frame is dependent on previously
// coded frame(s).
bool flexible_mode; // This frame is in flexible mode.
bool beginning_of_frame; // True if this packet is the first in a VP9 layer
// frame.
bool end_of_frame; // True if this packet is the last in a VP9 layer frame.
bool ss_data_available; // True if SS data is available in this payload
// descriptor.
int16_t picture_id; // PictureID index, 15 bits;
// kNoPictureId if PictureID does not exist.
int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF;
int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits;
// kNoTl0PicIdx means no value provided.
uint8_t temporal_idx; // Temporal layer index, or kNoTemporalIdx.
uint8_t spatial_idx; // Spatial layer index, or kNoSpatialIdx.
bool temporal_up_switch; // True if upswitch to higher frame rate is possible
// starting from this frame.
bool inter_layer_predicted; // Frame is dependent on directly lower spatial
// layer frame.
uint8_t gof_idx; // Index to predefined temporal frame info in SS data.
uint8_t num_ref_pics; // Number of reference pictures used by this layer
// frame.
uint8_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID
// of the reference pictures.
int16_t ref_picture_id[kMaxVp9RefPics]; // PictureID of reference pictures.
// SS data.
size_t num_spatial_layers; // Always populated.
bool spatial_layer_resolution_present;
uint16_t width[kMaxVp9NumberOfSpatialLayers];
uint16_t height[kMaxVp9NumberOfSpatialLayers];
GofInfoVP9 gof;
};
#if WEBRTC_48_H264_IMPL
// The packetization types that we support: single, aggregated, and fragmented.
enum H264PacketizationTypes {
kH264SingleNalu, // This packet contains a single NAL unit.
kH264StapA, // This packet contains STAP-A (single time
// aggregation) packets. If this packet has an
// associated NAL unit type, it'll be for the
// first such aggregated packet.
kH264FuA, // This packet contains a FU-A (fragmentation
// unit) packet, meaning it is a part of a frame
// that was too large to fit into a single packet.
};
struct RTPVideoHeaderH264 {
uint8_t nalu_type; // The NAL unit type. If this is a header for a
// fragmented packet, it's the NAL unit type of
// the original data. If this is the header for an
// aggregated packet, it's the NAL unit type of
// the first NAL unit in the packet.
H264PacketizationTypes packetization_type;
};
#else
// Mozilla's OpenH264 implementation
struct RTPVideoHeaderH264 {
bool stap_a;
bool single_nalu;
};
// XXX fix vp9 (bug 1138629)
struct RTPVideoHeaderVP9 {
void InitRTPVideoHeaderVP9() {
nonReference = false;
pictureId = kNoPictureId;
tl0PicIdx = kNoTl0PicIdx;
temporalIdx = kNoTemporalIdx;
layerSync = false;
keyIdx = kNoKeyIdx;
partitionId = 0;
beginningOfPartition = false;
}
bool nonReference; // Frame is discardable.
int16_t pictureId; // Picture ID index, 15 bits;
// kNoPictureId if PictureID does not exist.
int16_t tl0PicIdx; // TL0PIC_IDX, 8 bits;
// kNoTl0PicIdx means no value provided.
uint8_t temporalIdx; // Temporal layer index, or kNoTemporalIdx.
bool layerSync; // This frame is a layer sync frame.
// Disabled if temporalIdx == kNoTemporalIdx.
int keyIdx; // 5 bits; kNoKeyIdx means not used.
int partitionId; // VP9 partition ID
bool beginningOfPartition; // True if this packet is the first
// in a VP9 partition. Otherwise false
};
#endif
union RTPVideoTypeHeader {
RTPVideoHeaderVP8 VP8;
@ -611,6 +747,18 @@ inline AudioFrame& AudioFrame::Append(const AudioFrame& rhs) {
return *this;
}
namespace {
inline int16_t ClampToInt16(int32_t input) {
if (input < -0x00008000) {
return -0x8000;
} else if (input > 0x00007FFF) {
return 0x7FFF;
} else {
return static_cast<int16_t>(input);
}
}
}
inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
// Sanity check
assert((num_channels_ > 0) && (num_channels_ < 3));
@ -643,15 +791,9 @@ inline AudioFrame& AudioFrame::operator+=(const AudioFrame& rhs) {
} else {
// IMPROVEMENT this can be done very fast in assembly
for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrapGuard =
int32_t wrap_guard =
static_cast<int32_t>(data_[i]) + static_cast<int32_t>(rhs.data_[i]);
if (wrapGuard < -32768) {
data_[i] = -32768;
} else if (wrapGuard > 32767) {
data_[i] = 32767;
} else {
data_[i] = (int16_t)wrapGuard;
}
data_[i] = ClampToInt16(wrap_guard);
}
}
energy_ = 0xffffffff;
@ -674,15 +816,9 @@ inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) {
speech_type_ = kUndefined;
for (int i = 0; i < samples_per_channel_ * num_channels_; i++) {
int32_t wrapGuard =
int32_t wrap_guard =
static_cast<int32_t>(data_[i]) - static_cast<int32_t>(rhs.data_[i]);
if (wrapGuard < -32768) {
data_[i] = -32768;
} else if (wrapGuard > 32767) {
data_[i] = 32767;
} else {
data_[i] = (int16_t)wrapGuard;
}
data_[i] = ClampToInt16(wrap_guard);
}
energy_ = 0xffffffff;
return *this;
@ -690,11 +826,24 @@ inline AudioFrame& AudioFrame::operator-=(const AudioFrame& rhs) {
inline bool IsNewerSequenceNumber(uint16_t sequence_number,
uint16_t prev_sequence_number) {
// Distinguish between elements that are exactly 0x8000 apart.
// If s1>s2 and |s1-s2| = 0x8000: IsNewer(s1,s2)=true, IsNewer(s2,s1)=false
// rather than having IsNewer(s1,s2) = IsNewer(s2,s1) = false.
if (static_cast<uint16_t>(sequence_number - prev_sequence_number) == 0x8000) {
return sequence_number > prev_sequence_number;
}
return sequence_number != prev_sequence_number &&
static_cast<uint16_t>(sequence_number - prev_sequence_number) < 0x8000;
}
inline bool IsNewerTimestamp(uint32_t timestamp, uint32_t prev_timestamp) {
// Distinguish between elements that are exactly 0x80000000 apart.
// If t1>t2 and |t1-t2| = 0x80000000: IsNewer(t1,t2)=true,
// IsNewer(t2,t1)=false
// rather than having IsNewer(t1,t2) = IsNewer(t2,t1) = false.
if (static_cast<uint32_t>(timestamp - prev_timestamp) == 0x80000000) {
return timestamp > prev_timestamp;
}
return timestamp != prev_timestamp &&
static_cast<uint32_t>(timestamp - prev_timestamp) < 0x80000000;
}
@ -715,6 +864,46 @@ inline uint32_t LatestTimestamp(uint32_t timestamp1, uint32_t timestamp2) {
return IsNewerTimestamp(timestamp1, timestamp2) ? timestamp1 : timestamp2;
}
// Utility class to unwrap a sequence number to a larger type, for easier
// handling large ranges. Note that sequence numbers will never be unwrapped
// to a negative value.
class SequenceNumberUnwrapper {
public:
SequenceNumberUnwrapper() : last_seq_(-1) {}
// Get the unwrapped sequence, but don't update the internal state.
int64_t UnwrapWithoutUpdate(uint16_t sequence_number) {
if (last_seq_ == -1)
return sequence_number;
uint16_t cropped_last = static_cast<uint16_t>(last_seq_);
int64_t delta = sequence_number - cropped_last;
if (IsNewerSequenceNumber(sequence_number, cropped_last)) {
if (delta < 0)
delta += (1 << 16); // Wrap forwards.
} else if (delta > 0 && (last_seq_ + delta - (1 << 16)) >= 0) {
// If sequence_number is older but delta is positive, this is a backwards
// wrap-around. However, don't wrap backwards past 0 (unwrapped).
delta -= (1 << 16);
}
return last_seq_ + delta;
}
// Only update the internal state to the specified last (unwrapped) sequence.
void UpdateLast(int64_t last_sequence) { last_seq_ = last_sequence; }
// Unwrap the sequence number and update the internal state.
int64_t Unwrap(uint16_t sequence_number) {
int64_t unwrapped = UnwrapWithoutUpdate(sequence_number);
UpdateLast(unwrapped);
return unwrapped;
}
private:
int64_t last_seq_;
};
} // namespace webrtc
#endif // MODULE_COMMON_TYPES_H

View File

@ -92,6 +92,8 @@
'source/rtp_format_h264.h',
'source/rtp_format_vp8.cc',
'source/rtp_format_vp8.h',
'source/rtp_format_vp9.cc',
'source/rtp_format_vp9.h',
'source/rtp_format_video_generic.cc',
'source/rtp_format_video_generic.h',
'source/vp8_partition_aggregator.cc',

View File

@ -13,6 +13,7 @@
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
namespace webrtc {
RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type,
@ -26,6 +27,8 @@ RtpPacketizer* RtpPacketizer::Create(RtpVideoCodecTypes type,
assert(rtp_type_header != NULL);
return new RtpPacketizerVp8(rtp_type_header->VP8, max_payload_len);
case kRtpVideoVp9:
assert(rtp_type_header != NULL);
return new RtpPacketizerVp9(rtp_type_header->VP9, max_payload_len);
case kRtpVideoGeneric:
return new RtpPacketizerGeneric(frame_type, max_payload_len);
case kRtpVideoNone:
@ -40,7 +43,8 @@ RtpDepacketizer* RtpDepacketizer::Create(RtpVideoCodecTypes type) {
return new RtpDepacketizerH264();
case kRtpVideoVp8:
return new RtpDepacketizerVp8();
case kRtpVideoVp9: // XXX fix vp9 packetization (bug 1138629)
case kRtpVideoVp9:
return new RtpDepacketizerVp9();
case kRtpVideoGeneric:
return new RtpDepacketizerGeneric();
case kRtpVideoNone:

View File

@ -0,0 +1,743 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
#include <assert.h>
#include <string.h>
#include <cmath>
#include "webrtc/base/bitbuffer.h"
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#define RETURN_FALSE_ON_ERROR(x) \
if (!(x)) { \
return false; \
}
namespace webrtc {
namespace {
// Length of VP9 payload descriptors' fixed part.
const size_t kFixedPayloadDescriptorBytes = 1;
// Packet fragmentation mode. If true, packets are split into (almost) equal
// sizes. Otherwise, as many bytes as possible are fit into one packet.
const bool kBalancedMode = true;
const uint32_t kReservedBitValue0 = 0;
uint8_t TemporalIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
return (hdr.temporal_idx == kNoTemporalIdx) ? def : hdr.temporal_idx;
}
uint8_t SpatialIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
return (hdr.spatial_idx == kNoSpatialIdx) ? def : hdr.spatial_idx;
}
int16_t Tl0PicIdxField(const RTPVideoHeaderVP9& hdr, uint8_t def) {
return (hdr.tl0_pic_idx == kNoTl0PicIdx) ? def : hdr.tl0_pic_idx;
}
// Picture ID:
//
// +-+-+-+-+-+-+-+-+
// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
// M: | EXTENDED PID |
// +-+-+-+-+-+-+-+-+
//
size_t PictureIdLength(const RTPVideoHeaderVP9& hdr) {
if (hdr.picture_id == kNoPictureId)
return 0;
return (hdr.max_picture_id == kMaxOneBytePictureId) ? 1 : 2;
}
bool PictureIdPresent(const RTPVideoHeaderVP9& hdr) {
return PictureIdLength(hdr) > 0;
}
// Layer indices:
//
// Flexible mode (F=1): Non-flexible mode (F=0):
//
// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
// L: | T |U| S |D| | T |U| S |D|
// +-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
// | TL0PICIDX |
// +-+-+-+-+-+-+-+-+
//
size_t LayerInfoLength(const RTPVideoHeaderVP9& hdr) {
if (hdr.temporal_idx == kNoTemporalIdx &&
hdr.spatial_idx == kNoSpatialIdx) {
return 0;
}
return hdr.flexible_mode ? 1 : 2;
}
bool LayerInfoPresent(const RTPVideoHeaderVP9& hdr) {
return LayerInfoLength(hdr) > 0;
}
// Reference indices:
//
// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index
// P,F: | P_DIFF |N| up to 3 times has to be specified.
// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows
// current P_DIFF.
//
size_t RefIndicesLength(const RTPVideoHeaderVP9& hdr) {
if (!hdr.inter_pic_predicted || !hdr.flexible_mode)
return 0;
RTC_DCHECK_GT(hdr.num_ref_pics, 0U);
RTC_DCHECK_LE(hdr.num_ref_pics, kMaxVp9RefPics);
return hdr.num_ref_pics;
}
// Scalability structure (SS).
//
// +-+-+-+-+-+-+-+-+
// V: | N_S |Y|G|-|-|-|
// +-+-+-+-+-+-+-+-+ -|
// Y: | WIDTH | (OPTIONAL) .
// + + .
// | | (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ . N_S + 1 times
// | HEIGHT | (OPTIONAL) .
// + + .
// | | (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ -|
// G: | N_G | (OPTIONAL)
// +-+-+-+-+-+-+-+-+ -|
// N_G: | T |U| R |-|-| (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ -| . N_G times
// | P_DIFF | (OPTIONAL) . R times .
// +-+-+-+-+-+-+-+-+ -| -|
//
size_t SsDataLength(const RTPVideoHeaderVP9& hdr) {
if (!hdr.ss_data_available)
return 0;
RTC_DCHECK_GT(hdr.num_spatial_layers, 0U);
RTC_DCHECK_LE(hdr.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
RTC_DCHECK_LE(hdr.gof.num_frames_in_gof, kMaxVp9FramesInGof);
size_t length = 1; // V
if (hdr.spatial_layer_resolution_present) {
length += 4 * hdr.num_spatial_layers; // Y
}
if (hdr.gof.num_frames_in_gof > 0) {
++length; // G
}
// N_G
length += hdr.gof.num_frames_in_gof; // T, U, R
for (size_t i = 0; i < hdr.gof.num_frames_in_gof; ++i) {
RTC_DCHECK_LE(hdr.gof.num_ref_pics[i], kMaxVp9RefPics);
length += hdr.gof.num_ref_pics[i]; // R times
}
return length;
}
size_t PayloadDescriptorLengthMinusSsData(const RTPVideoHeaderVP9& hdr) {
return kFixedPayloadDescriptorBytes + PictureIdLength(hdr) +
LayerInfoLength(hdr) + RefIndicesLength(hdr);
}
size_t PayloadDescriptorLength(const RTPVideoHeaderVP9& hdr) {
return PayloadDescriptorLengthMinusSsData(hdr) + SsDataLength(hdr);
}
void QueuePacket(size_t start_pos,
size_t size,
bool layer_begin,
bool layer_end,
RtpPacketizerVp9::PacketInfoQueue* packets) {
RtpPacketizerVp9::PacketInfo packet_info;
packet_info.payload_start_pos = start_pos;
packet_info.size = size;
packet_info.layer_begin = layer_begin;
packet_info.layer_end = layer_end;
packets->push(packet_info);
}
// Picture ID:
//
// +-+-+-+-+-+-+-+-+
// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
// M: | EXTENDED PID |
// +-+-+-+-+-+-+-+-+
//
bool WritePictureId(const RTPVideoHeaderVP9& vp9,
rtc::BitBufferWriter* writer) {
bool m_bit = (PictureIdLength(vp9) == 2);
RETURN_FALSE_ON_ERROR(writer->WriteBits(m_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.picture_id, m_bit ? 15 : 7));
return true;
}
// Layer indices:
//
// Flexible mode (F=1):
//
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
//
bool WriteLayerInfoCommon(const RTPVideoHeaderVP9& vp9,
rtc::BitBufferWriter* writer) {
RETURN_FALSE_ON_ERROR(writer->WriteBits(TemporalIdxField(vp9, 0), 3));
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.temporal_up_switch ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer->WriteBits(SpatialIdxField(vp9, 0), 3));
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.inter_layer_predicted ? 1: 0, 1));
return true;
}
// Non-flexible mode (F=0):
//
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX |
// +-+-+-+-+-+-+-+-+
//
bool WriteLayerInfoNonFlexibleMode(const RTPVideoHeaderVP9& vp9,
rtc::BitBufferWriter* writer) {
RETURN_FALSE_ON_ERROR(writer->WriteUInt8(Tl0PicIdxField(vp9, 0)));
return true;
}
bool WriteLayerInfo(const RTPVideoHeaderVP9& vp9,
rtc::BitBufferWriter* writer) {
if (!WriteLayerInfoCommon(vp9, writer))
return false;
if (vp9.flexible_mode)
return true;
return WriteLayerInfoNonFlexibleMode(vp9, writer);
}
// Reference indices:
//
// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index
// P,F: | P_DIFF |N| up to 3 times has to be specified.
// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows
// current P_DIFF.
//
bool WriteRefIndices(const RTPVideoHeaderVP9& vp9,
rtc::BitBufferWriter* writer) {
if (!PictureIdPresent(vp9) ||
vp9.num_ref_pics == 0 || vp9.num_ref_pics > kMaxVp9RefPics) {
return false;
}
for (uint8_t i = 0; i < vp9.num_ref_pics; ++i) {
bool n_bit = !(i == vp9.num_ref_pics - 1);
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.pid_diff[i], 7));
RETURN_FALSE_ON_ERROR(writer->WriteBits(n_bit ? 1 : 0, 1));
}
return true;
}
// Scalability structure (SS).
//
// +-+-+-+-+-+-+-+-+
// V: | N_S |Y|G|-|-|-|
// +-+-+-+-+-+-+-+-+ -|
// Y: | WIDTH | (OPTIONAL) .
// + + .
// | | (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ . N_S + 1 times
// | HEIGHT | (OPTIONAL) .
// + + .
// | | (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ -|
// G: | N_G | (OPTIONAL)
// +-+-+-+-+-+-+-+-+ -|
// N_G: | T |U| R |-|-| (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ -| . N_G times
// | P_DIFF | (OPTIONAL) . R times .
// +-+-+-+-+-+-+-+-+ -| -|
//
bool WriteSsData(const RTPVideoHeaderVP9& vp9, rtc::BitBufferWriter* writer) {
RTC_DCHECK_GT(vp9.num_spatial_layers, 0U);
RTC_DCHECK_LE(vp9.num_spatial_layers, kMaxVp9NumberOfSpatialLayers);
RTC_DCHECK_LE(vp9.gof.num_frames_in_gof, kMaxVp9FramesInGof);
bool g_bit = vp9.gof.num_frames_in_gof > 0;
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.num_spatial_layers - 1, 3));
RETURN_FALSE_ON_ERROR(
writer->WriteBits(vp9.spatial_layer_resolution_present ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer->WriteBits(g_bit ? 1 : 0, 1)); // G
RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 3));
if (vp9.spatial_layer_resolution_present) {
for (size_t i = 0; i < vp9.num_spatial_layers; ++i) {
RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.width[i]));
RETURN_FALSE_ON_ERROR(writer->WriteUInt16(vp9.height[i]));
}
}
if (g_bit) {
RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.num_frames_in_gof));
}
for (size_t i = 0; i < vp9.gof.num_frames_in_gof; ++i) {
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.temporal_idx[i], 3));
RETURN_FALSE_ON_ERROR(
writer->WriteBits(vp9.gof.temporal_up_switch[i] ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer->WriteBits(vp9.gof.num_ref_pics[i], 2));
RETURN_FALSE_ON_ERROR(writer->WriteBits(kReservedBitValue0, 2));
for (uint8_t r = 0; r < vp9.gof.num_ref_pics[i]; ++r) {
RETURN_FALSE_ON_ERROR(writer->WriteUInt8(vp9.gof.pid_diff[i][r]));
}
}
return true;
}
// Picture ID:
//
// +-+-+-+-+-+-+-+-+
// I: |M| PICTURE ID | M:0 => picture id is 7 bits.
// +-+-+-+-+-+-+-+-+ M:1 => picture id is 15 bits.
// M: | EXTENDED PID |
// +-+-+-+-+-+-+-+-+
//
bool ParsePictureId(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
uint32_t picture_id;
uint32_t m_bit;
RETURN_FALSE_ON_ERROR(parser->ReadBits(&m_bit, 1));
if (m_bit) {
RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 15));
vp9->max_picture_id = kMaxTwoBytePictureId;
} else {
RETURN_FALSE_ON_ERROR(parser->ReadBits(&picture_id, 7));
vp9->max_picture_id = kMaxOneBytePictureId;
}
vp9->picture_id = picture_id;
return true;
}
// Layer indices (flexible mode):
//
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
//
bool ParseLayerInfoCommon(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
uint32_t t, u_bit, s, d_bit;
RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&s, 3));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&d_bit, 1));
vp9->temporal_idx = t;
vp9->temporal_up_switch = u_bit ? true : false;
vp9->spatial_idx = s;
vp9->inter_layer_predicted = d_bit ? true : false;
return true;
}
// Layer indices (non-flexible mode):
//
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D|
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX |
// +-+-+-+-+-+-+-+-+
//
bool ParseLayerInfoNonFlexibleMode(rtc::BitBuffer* parser,
RTPVideoHeaderVP9* vp9) {
uint8_t tl0picidx;
RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&tl0picidx));
vp9->tl0_pic_idx = tl0picidx;
return true;
}
bool ParseLayerInfo(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
if (!ParseLayerInfoCommon(parser, vp9))
return false;
if (vp9->flexible_mode)
return true;
return ParseLayerInfoNonFlexibleMode(parser, vp9);
}
// Reference indices:
//
// +-+-+-+-+-+-+-+-+ P=1,F=1: At least one reference index
// P,F: | P_DIFF |N| up to 3 times has to be specified.
// +-+-+-+-+-+-+-+-+ N=1: An additional P_DIFF follows
// current P_DIFF.
//
bool ParseRefIndices(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
if (vp9->picture_id == kNoPictureId)
return false;
vp9->num_ref_pics = 0;
uint32_t n_bit;
do {
if (vp9->num_ref_pics == kMaxVp9RefPics)
return false;
uint32_t p_diff;
RETURN_FALSE_ON_ERROR(parser->ReadBits(&p_diff, 7));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_bit, 1));
vp9->pid_diff[vp9->num_ref_pics] = p_diff;
uint32_t scaled_pid = vp9->picture_id;
if (p_diff > scaled_pid) {
// TODO(asapersson): Max should correspond to the picture id of last wrap.
scaled_pid += vp9->max_picture_id + 1;
}
vp9->ref_picture_id[vp9->num_ref_pics++] = scaled_pid - p_diff;
} while (n_bit);
return true;
}
// Scalability structure (SS).
//
// +-+-+-+-+-+-+-+-+
// V: | N_S |Y|G|-|-|-|
// +-+-+-+-+-+-+-+-+ -|
// Y: | WIDTH | (OPTIONAL) .
// + + .
// | | (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ . N_S + 1 times
// | HEIGHT | (OPTIONAL) .
// + + .
// | | (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ -|
// G: | N_G | (OPTIONAL)
// +-+-+-+-+-+-+-+-+ -|
// N_G: | T |U| R |-|-| (OPTIONAL) .
// +-+-+-+-+-+-+-+-+ -| . N_G times
// | P_DIFF | (OPTIONAL) . R times .
// +-+-+-+-+-+-+-+-+ -| -|
//
bool ParseSsData(rtc::BitBuffer* parser, RTPVideoHeaderVP9* vp9) {
uint32_t n_s, y_bit, g_bit;
RETURN_FALSE_ON_ERROR(parser->ReadBits(&n_s, 3));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&y_bit, 1));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&g_bit, 1));
RETURN_FALSE_ON_ERROR(parser->ConsumeBits(3));
vp9->num_spatial_layers = n_s + 1;
vp9->spatial_layer_resolution_present = y_bit ? true : false;
vp9->gof.num_frames_in_gof = 0;
if (y_bit) {
for (size_t i = 0; i < vp9->num_spatial_layers; ++i) {
RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->width[i]));
RETURN_FALSE_ON_ERROR(parser->ReadUInt16(&vp9->height[i]));
}
}
if (g_bit) {
uint8_t n_g;
RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&n_g));
vp9->gof.num_frames_in_gof = n_g;
}
for (size_t i = 0; i < vp9->gof.num_frames_in_gof; ++i) {
uint32_t t, u_bit, r;
RETURN_FALSE_ON_ERROR(parser->ReadBits(&t, 3));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&u_bit, 1));
RETURN_FALSE_ON_ERROR(parser->ReadBits(&r, 2));
RETURN_FALSE_ON_ERROR(parser->ConsumeBits(2));
vp9->gof.temporal_idx[i] = t;
vp9->gof.temporal_up_switch[i] = u_bit ? true : false;
vp9->gof.num_ref_pics[i] = r;
for (uint8_t p = 0; p < vp9->gof.num_ref_pics[i]; ++p) {
uint8_t p_diff;
RETURN_FALSE_ON_ERROR(parser->ReadUInt8(&p_diff));
vp9->gof.pid_diff[i][p] = p_diff;
}
}
return true;
}
// Gets the size of next payload chunk to send. Returns 0 on error.
size_t CalcNextSize(size_t max_length, size_t rem_bytes) {
if (max_length == 0 || rem_bytes == 0) {
return 0;
}
if (kBalancedMode) {
size_t num_frags = std::ceil(static_cast<double>(rem_bytes) / max_length);
return static_cast<size_t>(
static_cast<double>(rem_bytes) / num_frags + 0.5);
}
return max_length >= rem_bytes ? rem_bytes : max_length;
}
} // namespace
RtpPacketizerVp9::RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr,
size_t max_payload_length)
: hdr_(hdr),
max_payload_length_(max_payload_length),
payload_(nullptr),
payload_size_(0) {
}
RtpPacketizerVp9::~RtpPacketizerVp9() {
}
ProtectionType RtpPacketizerVp9::GetProtectionType() {
bool protect =
hdr_.temporal_idx == 0 || hdr_.temporal_idx == kNoTemporalIdx;
return protect ? kProtectedPacket : kUnprotectedPacket;
}
StorageType RtpPacketizerVp9::GetStorageType(uint32_t retransmission_settings) {
StorageType storage = kAllowRetransmission;
if (hdr_.temporal_idx == 0 &&
!(retransmission_settings & kRetransmitBaseLayer)) {
storage = kDontRetransmit;
} else if (hdr_.temporal_idx != kNoTemporalIdx && hdr_.temporal_idx > 0 &&
!(retransmission_settings & kRetransmitHigherLayers)) {
storage = kDontRetransmit;
}
return storage;
}
std::string RtpPacketizerVp9::ToString() {
return "RtpPacketizerVp9";
}
void RtpPacketizerVp9::SetPayloadData(
const uint8_t* payload,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) {
payload_ = payload;
payload_size_ = payload_size;
GeneratePackets();
}
void RtpPacketizerVp9::GeneratePackets() {
if (max_payload_length_ < PayloadDescriptorLength(hdr_) + 1) {
LOG(LS_ERROR) << "Payload header and one payload byte won't fit.";
return;
}
size_t bytes_processed = 0;
while (bytes_processed < payload_size_) {
size_t rem_bytes = payload_size_ - bytes_processed;
size_t rem_payload_len = max_payload_length_ -
(bytes_processed ? PayloadDescriptorLengthMinusSsData(hdr_)
: PayloadDescriptorLength(hdr_));
size_t packet_bytes = CalcNextSize(rem_payload_len, rem_bytes);
if (packet_bytes == 0) {
LOG(LS_ERROR) << "Failed to generate VP9 packets.";
while (!packets_.empty())
packets_.pop();
return;
}
QueuePacket(bytes_processed, packet_bytes, bytes_processed == 0,
rem_bytes == packet_bytes, &packets_);
bytes_processed += packet_bytes;
}
assert(bytes_processed == payload_size_);
}
bool RtpPacketizerVp9::NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) {
if (packets_.empty()) {
return false;
}
PacketInfo packet_info = packets_.front();
packets_.pop();
if (!WriteHeaderAndPayload(packet_info, buffer, bytes_to_send)) {
return false;
}
*last_packet =
packets_.empty() && (hdr_.spatial_idx == kNoSpatialIdx ||
hdr_.spatial_idx == hdr_.num_spatial_layers - 1);
return true;
}
// VP9 format:
//
// Payload descriptor for F = 1 (flexible mode)
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |I|P|L|F|B|E|V|-| (REQUIRED)
// +-+-+-+-+-+-+-+-+
// I: |M| PICTURE ID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// M: | EXTENDED PID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
// +-+-+-+-+-+-+-+-+ -|
// P,F: | P_DIFF |N| (CONDITIONALLY RECOMMENDED) . up to 3 times
// +-+-+-+-+-+-+-+-+ -|
// V: | SS |
// | .. |
// +-+-+-+-+-+-+-+-+
//
// Payload descriptor for F = 0 (non-flexible mode)
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |I|P|L|F|B|E|V|-| (REQUIRED)
// +-+-+-+-+-+-+-+-+
// I: |M| PICTURE ID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// M: | EXTENDED PID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX | (CONDITIONALLY REQUIRED)
// +-+-+-+-+-+-+-+-+
// V: | SS |
// | .. |
// +-+-+-+-+-+-+-+-+
bool RtpPacketizerVp9::WriteHeaderAndPayload(const PacketInfo& packet_info,
uint8_t* buffer,
size_t* bytes_to_send) const {
size_t header_length;
if (!WriteHeader(packet_info, buffer, &header_length))
return false;
// Copy payload data.
memcpy(&buffer[header_length],
&payload_[packet_info.payload_start_pos], packet_info.size);
*bytes_to_send = header_length + packet_info.size;
return true;
}
bool RtpPacketizerVp9::WriteHeader(const PacketInfo& packet_info,
uint8_t* buffer,
size_t* header_length) const {
// Required payload descriptor byte.
bool i_bit = PictureIdPresent(hdr_);
bool p_bit = hdr_.inter_pic_predicted;
bool l_bit = LayerInfoPresent(hdr_);
bool f_bit = hdr_.flexible_mode;
bool b_bit = packet_info.layer_begin;
bool e_bit = packet_info.layer_end;
bool v_bit = hdr_.ss_data_available && b_bit;
rtc::BitBufferWriter writer(buffer, max_payload_length_);
RETURN_FALSE_ON_ERROR(writer.WriteBits(i_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer.WriteBits(p_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer.WriteBits(l_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer.WriteBits(f_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer.WriteBits(b_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer.WriteBits(e_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer.WriteBits(v_bit ? 1 : 0, 1));
RETURN_FALSE_ON_ERROR(writer.WriteBits(kReservedBitValue0, 1));
// Add fields that are present.
if (i_bit && !WritePictureId(hdr_, &writer)) {
LOG(LS_ERROR) << "Failed writing VP9 picture id.";
return false;
}
if (l_bit && !WriteLayerInfo(hdr_, &writer)) {
LOG(LS_ERROR) << "Failed writing VP9 layer info.";
return false;
}
if (p_bit && f_bit && !WriteRefIndices(hdr_, &writer)) {
LOG(LS_ERROR) << "Failed writing VP9 ref indices.";
return false;
}
if (v_bit && !WriteSsData(hdr_, &writer)) {
LOG(LS_ERROR) << "Failed writing VP9 SS data.";
return false;
}
size_t offset_bytes = 0;
size_t offset_bits = 0;
writer.GetCurrentOffset(&offset_bytes, &offset_bits);
assert(offset_bits == 0);
*header_length = offset_bytes;
return true;
}
bool RtpDepacketizerVp9::Parse(ParsedPayload* parsed_payload,
const uint8_t* payload,
size_t payload_length) {
assert(parsed_payload != nullptr);
if (payload_length == 0) {
LOG(LS_ERROR) << "Payload length is zero.";
return false;
}
// Parse mandatory first byte of payload descriptor.
rtc::BitBuffer parser(payload, payload_length);
uint32_t i_bit, p_bit, l_bit, f_bit, b_bit, e_bit, v_bit;
RETURN_FALSE_ON_ERROR(parser.ReadBits(&i_bit, 1));
RETURN_FALSE_ON_ERROR(parser.ReadBits(&p_bit, 1));
RETURN_FALSE_ON_ERROR(parser.ReadBits(&l_bit, 1));
RETURN_FALSE_ON_ERROR(parser.ReadBits(&f_bit, 1));
RETURN_FALSE_ON_ERROR(parser.ReadBits(&b_bit, 1));
RETURN_FALSE_ON_ERROR(parser.ReadBits(&e_bit, 1));
RETURN_FALSE_ON_ERROR(parser.ReadBits(&v_bit, 1));
RETURN_FALSE_ON_ERROR(parser.ConsumeBits(1));
// Parsed payload.
parsed_payload->type.Video.width = 0;
parsed_payload->type.Video.height = 0;
parsed_payload->type.Video.simulcastIdx = 0;
parsed_payload->type.Video.codec = kRtpVideoVp9;
parsed_payload->frame_type = p_bit ? kVideoFrameDelta : kVideoFrameKey;
RTPVideoHeaderVP9* vp9 = &parsed_payload->type.Video.codecHeader.VP9;
vp9->InitRTPVideoHeaderVP9();
vp9->inter_pic_predicted = p_bit ? true : false;
vp9->flexible_mode = f_bit ? true : false;
vp9->beginning_of_frame = b_bit ? true : false;
vp9->end_of_frame = e_bit ? true : false;
vp9->ss_data_available = v_bit ? true : false;
vp9->spatial_idx = 0;
// Parse fields that are present.
if (i_bit && !ParsePictureId(&parser, vp9)) {
LOG(LS_ERROR) << "Failed parsing VP9 picture id.";
return false;
}
if (l_bit && !ParseLayerInfo(&parser, vp9)) {
LOG(LS_ERROR) << "Failed parsing VP9 layer info.";
return false;
}
if (p_bit && f_bit && !ParseRefIndices(&parser, vp9)) {
LOG(LS_ERROR) << "Failed parsing VP9 ref indices.";
return false;
}
if (v_bit) {
if (!ParseSsData(&parser, vp9)) {
LOG(LS_ERROR) << "Failed parsing VP9 SS data.";
return false;
}
if (vp9->spatial_layer_resolution_present) {
// TODO(asapersson): Add support for spatial layers.
parsed_payload->type.Video.width = vp9->width[0];
parsed_payload->type.Video.height = vp9->height[0];
}
}
parsed_payload->type.Video.isFirstPacket =
b_bit && (!l_bit || !vp9->inter_layer_predicted);
uint64_t rem_bits = parser.RemainingBitCount();
assert(rem_bits % 8 == 0);
parsed_payload->payload_length = rem_bits / 8;
if (parsed_payload->payload_length == 0) {
LOG(LS_ERROR) << "Failed parsing VP9 payload data.";
return false;
}
parsed_payload->payload =
payload + payload_length - parsed_payload->payload_length;
return true;
}
} // namespace webrtc

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
//
// This file contains the declaration of the VP9 packetizer class.
// A packetizer object is created for each encoded video frame. The
// constructor is called with the payload data and size.
//
// After creating the packetizer, the method NextPacket is called
// repeatedly to get all packets for the frame. The method returns
// false as long as there are more packets left to fetch.
//
#ifndef WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
#define WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_
#include <queue>
#include <string>
#include "webrtc/base/constructormagic.h"
#include "webrtc/modules/interface/module_common_types.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format.h"
#include "webrtc/typedefs.h"
namespace webrtc {
class RtpPacketizerVp9 : public RtpPacketizer {
public:
RtpPacketizerVp9(const RTPVideoHeaderVP9& hdr, size_t max_payload_length);
virtual ~RtpPacketizerVp9();
ProtectionType GetProtectionType() override;
StorageType GetStorageType(uint32_t retransmission_settings) override;
std::string ToString() override;
// The payload data must be one encoded VP9 frame.
void SetPayloadData(const uint8_t* payload,
size_t payload_size,
const RTPFragmentationHeader* fragmentation) override;
// Gets the next payload with VP9 payload header.
// |buffer| is a pointer to where the output will be written.
// |bytes_to_send| is an output variable that will contain number of bytes
// written to buffer.
// |last_packet| is true for the last packet of the frame, false otherwise
// (i.e. call the function again to get the next packet).
// Returns true on success, false otherwise.
bool NextPacket(uint8_t* buffer,
size_t* bytes_to_send,
bool* last_packet) override;
typedef struct {
size_t payload_start_pos;
size_t size;
bool layer_begin;
bool layer_end;
} PacketInfo;
typedef std::queue<PacketInfo> PacketInfoQueue;
private:
// Calculates all packet sizes and loads info to packet queue.
void GeneratePackets();
// Writes the payload descriptor header and copies payload to the |buffer|.
// |packet_info| determines which part of the payload to write.
// |bytes_to_send| contains the number of written bytes to the buffer.
// Returns true on success, false otherwise.
bool WriteHeaderAndPayload(const PacketInfo& packet_info,
uint8_t* buffer,
size_t* bytes_to_send) const;
// Writes payload descriptor header to |buffer|.
// Returns true on success, false otherwise.
bool WriteHeader(const PacketInfo& packet_info,
uint8_t* buffer,
size_t* header_length) const;
const RTPVideoHeaderVP9 hdr_;
const size_t max_payload_length_; // The max length in bytes of one packet.
const uint8_t* payload_; // The payload data to be packetized.
size_t payload_size_; // The size in bytes of the payload data.
PacketInfoQueue packets_;
DISALLOW_COPY_AND_ASSIGN(RtpPacketizerVp9);
};
class RtpDepacketizerVp9 : public RtpDepacketizer {
public:
virtual ~RtpDepacketizerVp9() {}
bool Parse(ParsedPayload* parsed_payload,
const uint8_t* payload,
size_t payload_length) override;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_RTP_RTCP_SOURCE_RTP_FORMAT_VP9_H_

View File

@ -0,0 +1,690 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <vector>
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
#include "webrtc/typedefs.h"
namespace webrtc {
namespace {
void VerifyHeader(const RTPVideoHeaderVP9& expected,
const RTPVideoHeaderVP9& actual) {
EXPECT_EQ(expected.inter_layer_predicted, actual.inter_layer_predicted);
EXPECT_EQ(expected.inter_pic_predicted, actual.inter_pic_predicted);
EXPECT_EQ(expected.flexible_mode, actual.flexible_mode);
EXPECT_EQ(expected.beginning_of_frame, actual.beginning_of_frame);
EXPECT_EQ(expected.end_of_frame, actual.end_of_frame);
EXPECT_EQ(expected.ss_data_available, actual.ss_data_available);
EXPECT_EQ(expected.picture_id, actual.picture_id);
EXPECT_EQ(expected.max_picture_id, actual.max_picture_id);
EXPECT_EQ(expected.temporal_idx, actual.temporal_idx);
EXPECT_EQ(expected.spatial_idx == kNoSpatialIdx ? 0 : expected.spatial_idx,
actual.spatial_idx);
EXPECT_EQ(expected.gof_idx, actual.gof_idx);
EXPECT_EQ(expected.tl0_pic_idx, actual.tl0_pic_idx);
EXPECT_EQ(expected.temporal_up_switch, actual.temporal_up_switch);
EXPECT_EQ(expected.num_ref_pics, actual.num_ref_pics);
for (uint8_t i = 0; i < expected.num_ref_pics; ++i) {
EXPECT_EQ(expected.pid_diff[i], actual.pid_diff[i]);
EXPECT_EQ(expected.ref_picture_id[i], actual.ref_picture_id[i]);
}
if (expected.ss_data_available) {
EXPECT_EQ(expected.spatial_layer_resolution_present,
actual.spatial_layer_resolution_present);
EXPECT_EQ(expected.num_spatial_layers, actual.num_spatial_layers);
if (expected.spatial_layer_resolution_present) {
for (size_t i = 0; i < expected.num_spatial_layers; i++) {
EXPECT_EQ(expected.width[i], actual.width[i]);
EXPECT_EQ(expected.height[i], actual.height[i]);
}
}
EXPECT_EQ(expected.gof.num_frames_in_gof, actual.gof.num_frames_in_gof);
for (size_t i = 0; i < expected.gof.num_frames_in_gof; i++) {
EXPECT_EQ(expected.gof.temporal_up_switch[i],
actual.gof.temporal_up_switch[i]);
EXPECT_EQ(expected.gof.temporal_idx[i], actual.gof.temporal_idx[i]);
EXPECT_EQ(expected.gof.num_ref_pics[i], actual.gof.num_ref_pics[i]);
for (uint8_t j = 0; j < expected.gof.num_ref_pics[i]; j++) {
EXPECT_EQ(expected.gof.pid_diff[i][j], actual.gof.pid_diff[i][j]);
}
}
}
}
void VerifyPayload(const RtpDepacketizer::ParsedPayload& parsed,
const uint8_t* payload,
size_t payload_length) {
EXPECT_EQ(payload, parsed.payload);
EXPECT_EQ(payload_length, parsed.payload_length);
EXPECT_THAT(std::vector<uint8_t>(parsed.payload,
parsed.payload + parsed.payload_length),
::testing::ElementsAreArray(payload, payload_length));
}
void ParseAndCheckPacket(const uint8_t* packet,
const RTPVideoHeaderVP9& expected,
size_t expected_hdr_length,
size_t expected_length) {
rtc::scoped_ptr<RtpDepacketizer> depacketizer(new RtpDepacketizerVp9());
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer->Parse(&parsed, packet, expected_length));
EXPECT_EQ(kRtpVideoVp9, parsed.type.Video.codec);
VerifyHeader(expected, parsed.type.Video.codecHeader.VP9);
const size_t kExpectedPayloadLength = expected_length - expected_hdr_length;
VerifyPayload(parsed, packet + expected_hdr_length, kExpectedPayloadLength);
}
} // namespace
// Payload descriptor for flexible mode
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |I|P|L|F|B|E|V|-| (REQUIRED)
// +-+-+-+-+-+-+-+-+
// I: |M| PICTURE ID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// M: | EXTENDED PID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
// +-+-+-+-+-+-+-+-+ -|
// P,F: | P_DIFF |N| (CONDITIONALLY RECOMMENDED) . up to 3 times
// +-+-+-+-+-+-+-+-+ -|
// V: | SS |
// | .. |
// +-+-+-+-+-+-+-+-+
//
// Payload descriptor for non-flexible mode
// 0 1 2 3 4 5 6 7
// +-+-+-+-+-+-+-+-+
// |I|P|L|F|B|E|V|-| (REQUIRED)
// +-+-+-+-+-+-+-+-+
// I: |M| PICTURE ID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// M: | EXTENDED PID | (RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// L: | T |U| S |D| (CONDITIONALLY RECOMMENDED)
// +-+-+-+-+-+-+-+-+
// | TL0PICIDX | (CONDITIONALLY REQUIRED)
// +-+-+-+-+-+-+-+-+
// V: | SS |
// | .. |
// +-+-+-+-+-+-+-+-+
class RtpPacketizerVp9Test : public ::testing::Test {
protected:
RtpPacketizerVp9Test() {}
virtual void SetUp() {
expected_.InitRTPVideoHeaderVP9();
}
rtc::scoped_ptr<uint8_t[]> packet_;
rtc::scoped_ptr<uint8_t[]> payload_;
size_t payload_size_;
size_t payload_pos_;
RTPVideoHeaderVP9 expected_;
rtc::scoped_ptr<RtpPacketizerVp9> packetizer_;
void Init(size_t payload_size, size_t packet_size) {
payload_.reset(new uint8_t[payload_size]);
memset(payload_.get(), 7, payload_size);
payload_size_ = payload_size;
payload_pos_ = 0;
packetizer_.reset(new RtpPacketizerVp9(expected_, packet_size));
packetizer_->SetPayloadData(payload_.get(), payload_size_, NULL);
const int kMaxPayloadDescriptorLength = 100;
packet_.reset(new uint8_t[payload_size_ + kMaxPayloadDescriptorLength]);
}
void CheckPayload(const uint8_t* packet,
size_t start_pos,
size_t end_pos,
bool last) {
for (size_t i = start_pos; i < end_pos; ++i) {
EXPECT_EQ(packet[i], payload_[payload_pos_++]);
}
EXPECT_EQ(last, payload_pos_ == payload_size_);
}
void CreateParseAndCheckPackets(const size_t* expected_hdr_sizes,
const size_t* expected_sizes,
size_t expected_num_packets) {
ASSERT_TRUE(packetizer_.get() != NULL);
size_t length = 0;
bool last = false;
if (expected_num_packets == 0) {
EXPECT_FALSE(packetizer_->NextPacket(packet_.get(), &length, &last));
return;
}
for (size_t i = 0; i < expected_num_packets; ++i) {
EXPECT_TRUE(packetizer_->NextPacket(packet_.get(), &length, &last));
EXPECT_EQ(expected_sizes[i], length);
RTPVideoHeaderVP9 hdr = expected_;
hdr.beginning_of_frame = (i == 0);
hdr.end_of_frame = last;
ParseAndCheckPacket(packet_.get(), hdr, expected_hdr_sizes[i], length);
CheckPayload(packet_.get(), expected_hdr_sizes[i], length, last);
}
EXPECT_TRUE(last);
}
};
TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_OnePacket) {
const size_t kFrameSize = 25;
const size_t kPacketSize = 26;
Init(kFrameSize, kPacketSize);
// One packet:
// I:0, P:0, L:0, F:0, B:1, E:1, V:0 (1hdr + 25 payload)
const size_t kExpectedHdrSizes[] = {1};
const size_t kExpectedSizes[] = {26};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestEqualSizedMode_TwoPackets) {
const size_t kFrameSize = 27;
const size_t kPacketSize = 27;
Init(kFrameSize, kPacketSize);
// Two packets:
// I:0, P:0, L:0, F:0, B:1, E:0, V:0 (1hdr + 14 payload)
// I:0, P:0, L:0, F:0, B:0, E:1, V:0 (1hdr + 13 payload)
const size_t kExpectedHdrSizes[] = {1, 1};
const size_t kExpectedSizes[] = {15, 14};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestTooShortBufferToFitPayload) {
const size_t kFrameSize = 1;
const size_t kPacketSize = 1;
Init(kFrameSize, kPacketSize); // 1hdr + 1 payload
const size_t kExpectedNum = 0;
CreateParseAndCheckPackets(NULL, NULL, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestOneBytePictureId) {
const size_t kFrameSize = 30;
const size_t kPacketSize = 12;
expected_.picture_id = kMaxOneBytePictureId; // 2 byte payload descriptor
expected_.max_picture_id = kMaxOneBytePictureId;
Init(kFrameSize, kPacketSize);
// Three packets:
// I:1, P:0, L:0, F:0, B:1, E:0, V:0 (2hdr + 10 payload)
// I:1, P:0, L:0, F:0, B:0, E:0, V:0 (2hdr + 10 payload)
// I:1, P:0, L:0, F:0, B:0, E:1, V:0 (2hdr + 10 payload)
const size_t kExpectedHdrSizes[] = {2, 2, 2};
const size_t kExpectedSizes[] = {12, 12, 12};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestTwoBytePictureId) {
const size_t kFrameSize = 31;
const size_t kPacketSize = 13;
expected_.picture_id = kMaxTwoBytePictureId; // 3 byte payload descriptor
Init(kFrameSize, kPacketSize);
// Four packets:
// I:1, P:0, L:0, F:0, B:1, E:0, V:0 (3hdr + 8 payload)
// I:1, P:0, L:0, F:0, B:0, E:0, V:0 (3hdr + 8 payload)
// I:1, P:0, L:0, F:0, B:0, E:0, V:0 (3hdr + 8 payload)
// I:1, P:0, L:0, F:0, B:0, E:1, V:0 (3hdr + 7 payload)
const size_t kExpectedHdrSizes[] = {3, 3, 3, 3};
const size_t kExpectedSizes[] = {11, 11, 11, 10};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithNonFlexibleMode) {
const size_t kFrameSize = 30;
const size_t kPacketSize = 25;
expected_.temporal_idx = 3;
expected_.temporal_up_switch = true; // U
expected_.num_spatial_layers = 3;
expected_.spatial_idx = 2;
expected_.inter_layer_predicted = true; // D
expected_.tl0_pic_idx = 117;
Init(kFrameSize, kPacketSize);
// Two packets:
// | I:0, P:0, L:1, F:0, B:1, E:0, V:0 | (3hdr + 15 payload)
// L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 |
// | I:0, P:0, L:1, F:0, B:0, E:1, V:0 | (3hdr + 15 payload)
// L: | T:3, U:1, S:2, D:1 | TL0PICIDX:117 |
const size_t kExpectedHdrSizes[] = {3, 3};
const size_t kExpectedSizes[] = {18, 18};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestLayerInfoWithFlexibleMode) {
const size_t kFrameSize = 21;
const size_t kPacketSize = 23;
expected_.flexible_mode = true;
expected_.temporal_idx = 3;
expected_.temporal_up_switch = true; // U
expected_.num_spatial_layers = 3;
expected_.spatial_idx = 2;
expected_.inter_layer_predicted = false; // D
Init(kFrameSize, kPacketSize);
// One packet:
// I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 21 payload)
// L: T:3, U:1, S:2, D:0
const size_t kExpectedHdrSizes[] = {2};
const size_t kExpectedSizes[] = {23};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestRefIdx) {
const size_t kFrameSize = 16;
const size_t kPacketSize = 21;
expected_.inter_pic_predicted = true; // P
expected_.flexible_mode = true; // F
expected_.picture_id = 2;
expected_.max_picture_id = kMaxOneBytePictureId;
expected_.num_ref_pics = 3;
expected_.pid_diff[0] = 1;
expected_.pid_diff[1] = 3;
expected_.pid_diff[2] = 127;
expected_.ref_picture_id[0] = 1; // 2 - 1 = 1
expected_.ref_picture_id[1] = 127; // (kMaxPictureId + 1) + 2 - 3 = 127
expected_.ref_picture_id[2] = 3; // (kMaxPictureId + 1) + 2 - 127 = 3
Init(kFrameSize, kPacketSize);
// Two packets:
// I:1, P:1, L:0, F:1, B:1, E:1, V:0 (5hdr + 16 payload)
// I: 2
// P,F: P_DIFF:1, N:1
// P_DIFF:3, N:1
// P_DIFF:127, N:0
const size_t kExpectedHdrSizes[] = {5};
const size_t kExpectedSizes[] = {21};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestRefIdxFailsWithoutPictureId) {
const size_t kFrameSize = 16;
const size_t kPacketSize = 21;
expected_.inter_pic_predicted = true;
expected_.flexible_mode = true;
expected_.num_ref_pics = 1;
expected_.pid_diff[0] = 3;
Init(kFrameSize, kPacketSize);
const size_t kExpectedNum = 0;
CreateParseAndCheckPackets(NULL, NULL, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestSsDataWithoutSpatialResolutionPresent) {
const size_t kFrameSize = 21;
const size_t kPacketSize = 26;
expected_.ss_data_available = true;
expected_.num_spatial_layers = 1;
expected_.spatial_layer_resolution_present = false;
expected_.gof.num_frames_in_gof = 1;
expected_.gof.temporal_idx[0] = 0;
expected_.gof.temporal_up_switch[0] = true;
expected_.gof.num_ref_pics[0] = 1;
expected_.gof.pid_diff[0][0] = 4;
Init(kFrameSize, kPacketSize);
// One packet:
// I:0, P:0, L:0, F:0, B:1, E:1, V:1 (5hdr + 21 payload)
// N_S:0, Y:0, G:1
// N_G:1
// T:0, U:1, R:1 | P_DIFF[0][0]:4
const size_t kExpectedHdrSizes[] = {5};
const size_t kExpectedSizes[] = {26};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestSsDataWithoutGbitPresent) {
const size_t kFrameSize = 21;
const size_t kPacketSize = 23;
expected_.ss_data_available = true;
expected_.num_spatial_layers = 1;
expected_.spatial_layer_resolution_present = false;
expected_.gof.num_frames_in_gof = 0;
Init(kFrameSize, kPacketSize);
// One packet:
// I:0, P:0, L:0, F:0, B:1, E:1, V:1 (2hdr + 21 payload)
// N_S:0, Y:0, G:0
const size_t kExpectedHdrSizes[] = {2};
const size_t kExpectedSizes[] = {23};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestSsData) {
const size_t kFrameSize = 21;
const size_t kPacketSize = 40;
expected_.ss_data_available = true;
expected_.num_spatial_layers = 2;
expected_.spatial_layer_resolution_present = true;
expected_.width[0] = 640;
expected_.width[1] = 1280;
expected_.height[0] = 360;
expected_.height[1] = 720;
expected_.gof.num_frames_in_gof = 3;
expected_.gof.temporal_idx[0] = 0;
expected_.gof.temporal_idx[1] = 1;
expected_.gof.temporal_idx[2] = 2;
expected_.gof.temporal_up_switch[0] = true;
expected_.gof.temporal_up_switch[1] = true;
expected_.gof.temporal_up_switch[2] = false;
expected_.gof.num_ref_pics[0] = 0;
expected_.gof.num_ref_pics[1] = 3;
expected_.gof.num_ref_pics[2] = 2;
expected_.gof.pid_diff[1][0] = 5;
expected_.gof.pid_diff[1][1] = 6;
expected_.gof.pid_diff[1][2] = 7;
expected_.gof.pid_diff[2][0] = 8;
expected_.gof.pid_diff[2][1] = 9;
Init(kFrameSize, kPacketSize);
// One packet:
// I:0, P:0, L:0, F:0, B:1, E:1, V:1 (19hdr + 21 payload)
// N_S:1, Y:1, G:1
// WIDTH:640 // 2 bytes
// HEIGHT:360 // 2 bytes
// WIDTH:1280 // 2 bytes
// HEIGHT:720 // 2 bytes
// N_G:3
// T:0, U:1, R:0
// T:1, U:1, R:3 | P_DIFF[1][0]:5 | P_DIFF[1][1]:6 | P_DIFF[1][2]:7
// T:2, U:0, R:2 | P_DIFF[2][0]:8 | P_DIFF[2][0]:9
const size_t kExpectedHdrSizes[] = {19};
const size_t kExpectedSizes[] = {40};
const size_t kExpectedNum = GTEST_ARRAY_SIZE_(kExpectedSizes);
CreateParseAndCheckPackets(kExpectedHdrSizes, kExpectedSizes, kExpectedNum);
}
TEST_F(RtpPacketizerVp9Test, TestBaseLayerProtectionAndStorageType) {
const size_t kFrameSize = 10;
const size_t kPacketSize = 12;
// I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload)
// L: T:0, U:0, S:0, D:0
expected_.flexible_mode = true;
expected_.temporal_idx = 0;
Init(kFrameSize, kPacketSize);
EXPECT_EQ(kProtectedPacket, packetizer_->GetProtectionType());
EXPECT_EQ(kAllowRetransmission,
packetizer_->GetStorageType(kRetransmitBaseLayer));
EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitOff));
}
TEST_F(RtpPacketizerVp9Test, TestHigherLayerProtectionAndStorageType) {
const size_t kFrameSize = 10;
const size_t kPacketSize = 12;
// I:0, P:0, L:1, F:1, B:1, E:1, V:0 (2hdr + 10 payload)
// L: T:1, U:0, S:0, D:0
expected_.flexible_mode = true;
expected_.temporal_idx = 1;
Init(kFrameSize, kPacketSize);
EXPECT_EQ(kUnprotectedPacket, packetizer_->GetProtectionType());
EXPECT_EQ(kDontRetransmit, packetizer_->GetStorageType(kRetransmitBaseLayer));
EXPECT_EQ(kAllowRetransmission,
packetizer_->GetStorageType(kRetransmitHigherLayers));
}
class RtpDepacketizerVp9Test : public ::testing::Test {
protected:
RtpDepacketizerVp9Test()
: depacketizer_(new RtpDepacketizerVp9()) {}
virtual void SetUp() {
expected_.InitRTPVideoHeaderVP9();
}
RTPVideoHeaderVP9 expected_;
rtc::scoped_ptr<RtpDepacketizer> depacketizer_;
};
TEST_F(RtpDepacketizerVp9Test, ParseBasicHeader) {
const uint8_t kHeaderLength = 1;
uint8_t packet[4] = {0};
packet[0] = 0x0C; // I:0 P:0 L:0 F:0 B:1 E:1 V:0 R:0
expected_.beginning_of_frame = true;
expected_.end_of_frame = true;
ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
}
TEST_F(RtpDepacketizerVp9Test, ParseOneBytePictureId) {
const uint8_t kHeaderLength = 2;
uint8_t packet[10] = {0};
packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 R:0
packet[1] = kMaxOneBytePictureId;
expected_.picture_id = kMaxOneBytePictureId;
expected_.max_picture_id = kMaxOneBytePictureId;
ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
}
TEST_F(RtpDepacketizerVp9Test, ParseTwoBytePictureId) {
const uint8_t kHeaderLength = 3;
uint8_t packet[10] = {0};
packet[0] = 0x80; // I:1 P:0 L:0 F:0 B:0 E:0 V:0 R:0
packet[1] = 0x80 | ((kMaxTwoBytePictureId >> 8) & 0x7F);
packet[2] = kMaxTwoBytePictureId & 0xFF;
expected_.picture_id = kMaxTwoBytePictureId;
expected_.max_picture_id = kMaxTwoBytePictureId;
ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
}
TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithNonFlexibleMode) {
const uint8_t kHeaderLength = 3;
const uint8_t kTemporalIdx = 2;
const uint8_t kUbit = 1;
const uint8_t kSpatialIdx = 1;
const uint8_t kDbit = 1;
const uint8_t kTl0PicIdx = 17;
uint8_t packet[13] = {0};
packet[0] = 0x20; // I:0 P:0 L:1 F:0 B:0 E:0 V:0 R:0
packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit;
packet[2] = kTl0PicIdx;
// T:2 U:1 S:1 D:1
// TL0PICIDX:17
expected_.temporal_idx = kTemporalIdx;
expected_.temporal_up_switch = kUbit ? true : false;
expected_.spatial_idx = kSpatialIdx;
expected_.inter_layer_predicted = kDbit ? true : false;
expected_.tl0_pic_idx = kTl0PicIdx;
ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
}
TEST_F(RtpDepacketizerVp9Test, ParseLayerInfoWithFlexibleMode) {
const uint8_t kHeaderLength = 2;
const uint8_t kTemporalIdx = 2;
const uint8_t kUbit = 1;
const uint8_t kSpatialIdx = 0;
const uint8_t kDbit = 0;
uint8_t packet[13] = {0};
packet[0] = 0x38; // I:0 P:0 L:1 F:1 B:1 E:0 V:0 R:0
packet[1] = (kTemporalIdx << 5) | (kUbit << 4) | (kSpatialIdx << 1) | kDbit;
// I:0 P:0 L:1 F:1 B:1 E:0 V:0
// L: T:2 U:1 S:0 D:0
expected_.beginning_of_frame = true;
expected_.flexible_mode = true;
expected_.temporal_idx = kTemporalIdx;
expected_.temporal_up_switch = kUbit ? true : false;
expected_.spatial_idx = kSpatialIdx;
expected_.inter_layer_predicted = kDbit ? true : false;
ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
}
TEST_F(RtpDepacketizerVp9Test, ParseRefIdx) {
const uint8_t kHeaderLength = 6;
const int16_t kPictureId = 17;
const uint8_t kPdiff1 = 17;
const uint8_t kPdiff2 = 18;
const uint8_t kPdiff3 = 127;
uint8_t packet[13] = {0};
packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0
packet[1] = 0x80 | ((kPictureId >> 8) & 0x7F); // Two byte pictureID.
packet[2] = kPictureId;
packet[3] = (kPdiff1 << 1) | 1; // P_DIFF N:1
packet[4] = (kPdiff2 << 1) | 1; // P_DIFF N:1
packet[5] = (kPdiff3 << 1) | 0; // P_DIFF N:0
// I:1 P:1 L:0 F:1 B:1 E:0 V:0
// I: PICTURE ID:17
// I:
// P,F: P_DIFF:17 N:1 => refPicId = 17 - 17 = 0
// P,F: P_DIFF:18 N:1 => refPicId = (kMaxPictureId + 1) + 17 - 18 = 0x7FFF
// P,F: P_DIFF:127 N:0 => refPicId = (kMaxPictureId + 1) + 17 - 127 = 32658
expected_.beginning_of_frame = true;
expected_.inter_pic_predicted = true;
expected_.flexible_mode = true;
expected_.picture_id = kPictureId;
expected_.num_ref_pics = 3;
expected_.pid_diff[0] = kPdiff1;
expected_.pid_diff[1] = kPdiff2;
expected_.pid_diff[2] = kPdiff3;
expected_.ref_picture_id[0] = 0;
expected_.ref_picture_id[1] = 0x7FFF;
expected_.ref_picture_id[2] = 32658;
ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
}
TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithNoPictureId) {
const uint8_t kPdiff = 3;
uint8_t packet[13] = {0};
packet[0] = 0x58; // I:0 P:1 L:0 F:1 B:1 E:0 V:0 R:0
packet[1] = (kPdiff << 1); // P,F: P_DIFF:3 N:0
RtpDepacketizer::ParsedPayload parsed;
EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
}
TEST_F(RtpDepacketizerVp9Test, ParseRefIdxFailsWithTooManyRefPics) {
const uint8_t kPdiff = 3;
uint8_t packet[13] = {0};
packet[0] = 0xD8; // I:1 P:1 L:0 F:1 B:1 E:0 V:0 R:0
packet[1] = kMaxOneBytePictureId; // I: PICTURE ID:127
packet[2] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1
packet[3] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1
packet[4] = (kPdiff << 1) | 1; // P,F: P_DIFF:3 N:1
packet[5] = (kPdiff << 1) | 0; // P,F: P_DIFF:3 N:0
RtpDepacketizer::ParsedPayload parsed;
EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
}
TEST_F(RtpDepacketizerVp9Test, ParseSsData) {
const uint8_t kHeaderLength = 6;
const uint8_t kYbit = 0;
const size_t kNs = 2;
const size_t kNg = 2;
uint8_t packet[23] = {0};
packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 R:0
packet[1] = ((kNs - 1) << 5) | (kYbit << 4) | (1 << 3); // N_S Y G:1 -
packet[2] = kNg; // N_G
packet[3] = (0 << 5) | (1 << 4) | (0 << 2) | 0; // T:0 U:1 R:0 -
packet[4] = (2 << 5) | (0 << 4) | (1 << 2) | 0; // T:2 U:0 R:1 -
packet[5] = 33;
expected_.beginning_of_frame = true;
expected_.ss_data_available = true;
expected_.num_spatial_layers = kNs;
expected_.spatial_layer_resolution_present = kYbit ? true : false;
expected_.gof.num_frames_in_gof = kNg;
expected_.gof.temporal_idx[0] = 0;
expected_.gof.temporal_idx[1] = 2;
expected_.gof.temporal_up_switch[0] = true;
expected_.gof.temporal_up_switch[1] = false;
expected_.gof.num_ref_pics[0] = 0;
expected_.gof.num_ref_pics[1] = 1;
expected_.gof.pid_diff[1][0] = 33;
ParseAndCheckPacket(packet, expected_, kHeaderLength, sizeof(packet));
}
TEST_F(RtpDepacketizerVp9Test, ParseFirstPacketInKeyFrame) {
uint8_t packet[2] = {0};
packet[0] = 0x08; // I:0 P:0 L:0 F:0 B:1 E:0 V:0 R:0
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
EXPECT_EQ(kVideoFrameKey, parsed.frame_type);
EXPECT_TRUE(parsed.type.Video.isFirstPacket);
}
TEST_F(RtpDepacketizerVp9Test, ParseLastPacketInDeltaFrame) {
uint8_t packet[2] = {0};
packet[0] = 0x44; // I:0 P:1 L:0 F:0 B:0 E:1 V:0 R:0
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
EXPECT_EQ(kVideoFrameDelta, parsed.frame_type);
EXPECT_FALSE(parsed.type.Video.isFirstPacket);
}
TEST_F(RtpDepacketizerVp9Test, ParseResolution) {
const uint16_t kWidth[2] = {640, 1280};
const uint16_t kHeight[2] = {360, 720};
uint8_t packet[20] = {0};
packet[0] = 0x0A; // I:0 P:0 L:0 F:0 B:1 E:0 V:1 R:0
packet[1] = (1 << 5) | (1 << 4) | 0; // N_S:1 Y:1 G:0
packet[2] = kWidth[0] >> 8;
packet[3] = kWidth[0] & 0xFF;
packet[4] = kHeight[0] >> 8;
packet[5] = kHeight[0] & 0xFF;
packet[6] = kWidth[1] >> 8;
packet[7] = kWidth[1] & 0xFF;
packet[8] = kHeight[1] >> 8;
packet[9] = kHeight[1] & 0xFF;
RtpDepacketizer::ParsedPayload parsed;
ASSERT_TRUE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
EXPECT_EQ(kWidth[0], parsed.type.Video.width);
EXPECT_EQ(kHeight[0], parsed.type.Video.height);
}
TEST_F(RtpDepacketizerVp9Test, ParseFailsForNoPayloadLength) {
uint8_t packet[1] = {0};
RtpDepacketizer::ParsedPayload parsed;
EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, 0));
}
TEST_F(RtpDepacketizerVp9Test, ParseFailsForTooShortBufferToFitPayload) {
const uint8_t kHeaderLength = 1;
uint8_t packet[kHeaderLength] = {0};
RtpDepacketizer::ParsedPayload parsed;
EXPECT_FALSE(depacketizer_->Parse(&parsed, packet, sizeof(packet)));
}
} // namespace webrtc

View File

@ -14,11 +14,15 @@
#include <stdlib.h>
#include <string.h>
#include "webrtc/base/checks.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/trace_event.h"
#include "webrtc/modules/rtp_rtcp/interface/rtp_rtcp_defines.h"
#include "webrtc/modules/rtp_rtcp/source/byte_io.h"
#include "webrtc/modules/rtp_rtcp/source/producer_fec.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_video_generic.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp8.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_vp9.h"
#include "webrtc/modules/rtp_rtcp/source/rtp_format_h264.h"
#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
#include "webrtc/system_wrappers/interface/logging.h"
@ -323,7 +327,7 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType,
// output multiple partitions for VP8. Should remove below check after the
// issue is fixed.
const RTPFragmentationHeader* frag =
(videoType == kRtpVideoVp8 || videoType == kRtpVideoVp9) ? NULL : fragmentation;
(videoType == kRtpVideoVp8) ? NULL : fragmentation;
packetizer->SetPayloadData(data, payload_bytes_to_send, frag);
@ -360,7 +364,7 @@ bool RTPSenderVideo::Send(const RtpVideoCodecTypes videoType,
// a lock. It'll be a no-op if it's not registered.
// TODO(guoweis): For now, all packets sent will carry the CVO such that
// the RTP header length is consistent, although the receiver side will
// only exam the packets with market bit set.
// only exam the packets with marker bit set.
size_t packetSize = payloadSize + rtp_header_length;
RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize);
RTPHeader rtp_header;

View File

@ -164,7 +164,7 @@ int32_t DeviceInfoLinux::GetDeviceName(
} else {
// if there's no bus info to use for uniqueId, invent one - and it has to be repeatable
if (snprintf(deviceUniqueIdUTF8, deviceUniqueIdUTF8Length, "fake_%u", device_index) >=
deviceUniqueIdUTF8Length)
(int) deviceUniqueIdUTF8Length)
{
WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCapture, _id,
"buffer passed is too small");

View File

@ -43,16 +43,35 @@ struct CodecSpecificInfoVP8 {
};
struct CodecSpecificInfoVP9 {
bool hasReceivedSLI;
uint8_t pictureIdSLI;
bool hasReceivedRPSI;
uint64_t pictureIdRPSI;
int16_t pictureId; // Negative value to skip pictureId.
bool nonReference;
uint8_t temporalIdx;
bool layerSync;
int tl0PicIdx; // Negative value to skip tl0PicIdx.
int8_t keyIdx; // Negative value to skip keyIdx.
bool has_received_sli;
uint8_t picture_id_sli;
bool has_received_rpsi;
uint64_t picture_id_rpsi;
int16_t picture_id; // Negative value to skip pictureId.
bool inter_pic_predicted; // This layer frame is dependent on previously
// coded frame(s).
bool flexible_mode;
bool ss_data_available;
int tl0_pic_idx; // Negative value to skip tl0PicIdx.
uint8_t temporal_idx;
uint8_t spatial_idx;
bool temporal_up_switch;
bool inter_layer_predicted; // Frame is dependent on directly lower spatial
// layer frame.
uint8_t gof_idx;
// SS data.
size_t num_spatial_layers; // Always populated.
bool spatial_layer_resolution_present;
uint16_t width[kMaxVp9NumberOfSpatialLayers];
uint16_t height[kMaxVp9NumberOfSpatialLayers];
GofInfoVP9 gof;
// Frame reference data.
uint8_t num_ref_pics;
uint8_t p_diff[kMaxVp9RefPics];
};
struct CodecSpecificInfoGeneric {

View File

@ -0,0 +1,93 @@
/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <algorithm>
#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
#include "webrtc/base/checks.h"
namespace webrtc {
ScreenshareLayersVP9::ScreenshareLayersVP9(uint8_t num_layers)
: num_layers_(num_layers),
start_layer_(0),
last_timestamp_(0),
timestamp_initialized_(false) {
DCHECK_GT(num_layers, 0);
DCHECK_LE(num_layers, kMaxVp9NumberOfSpatialLayers);
memset(bits_used_, 0, sizeof(bits_used_));
memset(threshold_kbps_, 0, sizeof(threshold_kbps_));
}
uint8_t ScreenshareLayersVP9::GetStartLayer() const {
return start_layer_;
}
void ScreenshareLayersVP9::ConfigureBitrate(int threshold_kbps,
uint8_t layer_id) {
// The upper layer is always the layer we spill frames
// to when the bitrate becomes to high, therefore setting
// a max limit is not allowed. The top layer bitrate is
// never used either so configuring it makes no difference.
DCHECK_LT(layer_id, num_layers_ - 1);
threshold_kbps_[layer_id] = threshold_kbps;
}
void ScreenshareLayersVP9::LayerFrameEncoded(unsigned int size_bytes,
uint8_t layer_id) {
DCHECK_LT(layer_id, num_layers_);
bits_used_[layer_id] += size_bytes * 8;
}
VP9EncoderImpl::SuperFrameRefSettings
ScreenshareLayersVP9::GetSuperFrameSettings(uint32_t timestamp,
bool is_keyframe) {
VP9EncoderImpl::SuperFrameRefSettings settings;
if (!timestamp_initialized_) {
last_timestamp_ = timestamp;
timestamp_initialized_ = true;
}
float time_diff = (timestamp - last_timestamp_) / 90.f;
float total_bits_used = 0;
float total_threshold_kbps = 0;
start_layer_ = 0;
// Up to (num_layers - 1) because we only have
// (num_layers - 1) thresholds to check.
for (int layer_id = 0; layer_id < num_layers_ - 1; ++layer_id) {
bits_used_[layer_id] = std::max(
0.f, bits_used_[layer_id] - time_diff * threshold_kbps_[layer_id]);
total_bits_used += bits_used_[layer_id];
total_threshold_kbps += threshold_kbps_[layer_id];
// If this is a keyframe then there should be no
// references to any previous frames.
if (!is_keyframe) {
settings.layer[layer_id].ref_buf1 = layer_id;
if (total_bits_used > total_threshold_kbps * 1000)
start_layer_ = layer_id + 1;
}
settings.layer[layer_id].upd_buf = layer_id;
}
// Since the above loop does not iterate over the last layer
// the reference of the last layer has to be set after the loop,
// and if this is a keyframe there should be no references to
// any previous frames.
if (!is_keyframe)
settings.layer[num_layers_ - 1].ref_buf1 = num_layers_ - 1;
settings.layer[num_layers_ - 1].upd_buf = num_layers_ - 1;
settings.is_keyframe = is_keyframe;
settings.start_layer = start_layer_;
settings.stop_layer = num_layers_ - 1;
last_timestamp_ = timestamp;
return settings;
}
} // namespace webrtc

View File

@ -0,0 +1,66 @@
/* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_
#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
namespace webrtc {
class ScreenshareLayersVP9 {
public:
explicit ScreenshareLayersVP9(uint8_t num_layers);
// The target bitrate for layer with id layer_id.
void ConfigureBitrate(int threshold_kbps, uint8_t layer_id);
// The current start layer.
uint8_t GetStartLayer() const;
// Update the layer with the size of the layer frame.
void LayerFrameEncoded(unsigned int size_bytes, uint8_t layer_id);
// Get the layer settings for the next superframe.
//
// In short, each time the GetSuperFrameSettings is called the
// bitrate of every layer is calculated and if the cummulative
// bitrate exceeds the configured cummulative bitrates
// (ConfigureBitrate to configure) up to and including that
// layer then the resulting encoding settings for the
// superframe will only encode layers above that layer.
VP9EncoderImpl::SuperFrameRefSettings GetSuperFrameSettings(
uint32_t timestamp,
bool is_keyframe);
private:
// How many layers that are used.
uint8_t num_layers_;
// The index of the first layer to encode.
uint8_t start_layer_;
// Cummulative target kbps for the different layers.
float threshold_kbps_[kMaxVp9NumberOfSpatialLayers - 1];
// How many bits that has been used for a certain layer. Increased in
// FrameEncoded() by the size of the encoded frame and decreased in
// GetSuperFrameSettings() depending on the time between frames.
float bits_used_[kMaxVp9NumberOfSpatialLayers];
// Timestamp of last frame.
uint32_t last_timestamp_;
// If the last_timestamp_ has been set.
bool timestamp_initialized_;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_SCREENSHARE_LAYERS_H_

View File

@ -0,0 +1,323 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <limits>
#include "testing/gtest/include/gtest/gtest.h"
#include "vpx/vp8cx.h"
#include "webrtc/base/logging.h"
#include "webrtc/modules/video_coding/codecs/vp9/screenshare_layers.h"
#include "webrtc/modules/video_coding/codecs/vp9/vp9_impl.h"
#include "webrtc/system_wrappers/include/clock.h"
namespace webrtc {
typedef VP9EncoderImpl::SuperFrameRefSettings Settings;
const uint32_t kTickFrequency = 90000;
class ScreenshareLayerTestVP9 : public ::testing::Test {
protected:
ScreenshareLayerTestVP9() : clock_(0) {}
virtual ~ScreenshareLayerTestVP9() {}
void InitScreenshareLayers(int layers) {
layers_.reset(new ScreenshareLayersVP9(layers));
}
void ConfigureBitrateForLayer(int kbps, uint8_t layer_id) {
layers_->ConfigureBitrate(kbps, layer_id);
}
void AdvanceTime(int64_t milliseconds) {
clock_.AdvanceTimeMilliseconds(milliseconds);
}
void AddKilobitsToLayer(int kilobits, uint8_t layer_id) {
layers_->LayerFrameEncoded(kilobits * 1000 / 8, layer_id);
}
void EqualRefsForLayer(const Settings& actual, uint8_t layer_id) {
EXPECT_EQ(expected_.layer[layer_id].upd_buf,
actual.layer[layer_id].upd_buf);
EXPECT_EQ(expected_.layer[layer_id].ref_buf1,
actual.layer[layer_id].ref_buf1);
EXPECT_EQ(expected_.layer[layer_id].ref_buf2,
actual.layer[layer_id].ref_buf2);
EXPECT_EQ(expected_.layer[layer_id].ref_buf3,
actual.layer[layer_id].ref_buf3);
}
void EqualRefs(const Settings& actual) {
for (unsigned int layer_id = 0; layer_id < kMaxVp9NumberOfSpatialLayers;
++layer_id) {
EqualRefsForLayer(actual, layer_id);
}
}
void EqualStartStopKeyframe(const Settings& actual) {
EXPECT_EQ(expected_.start_layer, actual.start_layer);
EXPECT_EQ(expected_.stop_layer, actual.stop_layer);
EXPECT_EQ(expected_.is_keyframe, actual.is_keyframe);
}
// Check that the settings returned by GetSuperFrameSettings() is
// equal to the expected_ settings.
void EqualToExpected() {
uint32_t frame_timestamp_ =
clock_.TimeInMilliseconds() * (kTickFrequency / 1000);
Settings actual =
layers_->GetSuperFrameSettings(frame_timestamp_, expected_.is_keyframe);
EqualRefs(actual);
EqualStartStopKeyframe(actual);
}
Settings expected_;
SimulatedClock clock_;
rtc::scoped_ptr<ScreenshareLayersVP9> layers_;
};
TEST_F(ScreenshareLayerTestVP9, NoRefsOnKeyFrame) {
const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
InitScreenshareLayers(kNumLayers);
expected_.start_layer = 0;
expected_.stop_layer = kNumLayers - 1;
for (int l = 0; l < kNumLayers; ++l) {
expected_.layer[l].upd_buf = l;
}
expected_.is_keyframe = true;
EqualToExpected();
for (int l = 0; l < kNumLayers; ++l) {
expected_.layer[l].ref_buf1 = l;
}
expected_.is_keyframe = false;
EqualToExpected();
}
// Test if it is possible to send at a high bitrate (over the threshold)
// after a longer period of low bitrate. This should not be possible.
TEST_F(ScreenshareLayerTestVP9, DontAccumelateAvailableBitsOverTime) {
InitScreenshareLayers(2);
ConfigureBitrateForLayer(100, 0);
expected_.layer[0].upd_buf = 0;
expected_.layer[0].ref_buf1 = 0;
expected_.layer[1].upd_buf = 1;
expected_.layer[1].ref_buf1 = 1;
expected_.start_layer = 0;
expected_.stop_layer = 1;
// Send 10 frames at a low bitrate (50 kbps)
for (int i = 0; i < 10; ++i) {
AdvanceTime(200);
EqualToExpected();
AddKilobitsToLayer(10, 0);
}
AdvanceTime(200);
EqualToExpected();
AddKilobitsToLayer(301, 0);
// Send 10 frames at a high bitrate (200 kbps)
expected_.start_layer = 1;
for (int i = 0; i < 10; ++i) {
AdvanceTime(200);
EqualToExpected();
AddKilobitsToLayer(40, 1);
}
}
// Test if used bits are accumelated over layers, as they should;
TEST_F(ScreenshareLayerTestVP9, AccumelateUsedBitsOverLayers) {
const int kNumLayers = kMaxVp9NumberOfSpatialLayers;
InitScreenshareLayers(kNumLayers);
for (int l = 0; l < kNumLayers - 1; ++l)
ConfigureBitrateForLayer(100, l);
for (int l = 0; l < kNumLayers; ++l) {
expected_.layer[l].upd_buf = l;
expected_.layer[l].ref_buf1 = l;
}
expected_.start_layer = 0;
expected_.stop_layer = kNumLayers - 1;
EqualToExpected();
for (int layer = 0; layer < kNumLayers - 1; ++layer) {
expected_.start_layer = layer;
EqualToExpected();
AddKilobitsToLayer(101, layer);
}
}
// General testing of the bitrate controller.
TEST_F(ScreenshareLayerTestVP9, 2LayerBitrate) {
InitScreenshareLayers(2);
ConfigureBitrateForLayer(100, 0);
expected_.layer[0].upd_buf = 0;
expected_.layer[1].upd_buf = 1;
expected_.layer[0].ref_buf1 = -1;
expected_.layer[1].ref_buf1 = -1;
expected_.start_layer = 0;
expected_.stop_layer = 1;
expected_.is_keyframe = true;
EqualToExpected();
AddKilobitsToLayer(100, 0);
expected_.layer[0].ref_buf1 = 0;
expected_.layer[1].ref_buf1 = 1;
expected_.is_keyframe = false;
AdvanceTime(199);
EqualToExpected();
AddKilobitsToLayer(100, 0);
expected_.start_layer = 1;
for (int frame = 0; frame < 3; ++frame) {
AdvanceTime(200);
EqualToExpected();
AddKilobitsToLayer(100, 1);
}
// Just before enough bits become available for L0 @0.999 seconds.
AdvanceTime(199);
EqualToExpected();
AddKilobitsToLayer(100, 1);
// Just after enough bits become available for L0 @1.0001 seconds.
expected_.start_layer = 0;
AdvanceTime(2);
EqualToExpected();
AddKilobitsToLayer(100, 0);
// Keyframes always encode all layers, even if it is over budget.
expected_.layer[0].ref_buf1 = -1;
expected_.layer[1].ref_buf1 = -1;
expected_.is_keyframe = true;
AdvanceTime(499);
EqualToExpected();
expected_.layer[0].ref_buf1 = 0;
expected_.layer[1].ref_buf1 = 1;
expected_.start_layer = 1;
expected_.is_keyframe = false;
EqualToExpected();
AddKilobitsToLayer(100, 0);
// 400 kb in L0 --> @3 second mark to fall below the threshold..
// just before @2.999 seconds.
expected_.is_keyframe = false;
AdvanceTime(1499);
EqualToExpected();
AddKilobitsToLayer(100, 1);
// just after @3.001 seconds.
expected_.start_layer = 0;
AdvanceTime(2);
EqualToExpected();
AddKilobitsToLayer(100, 0);
}
// General testing of the bitrate controller.
TEST_F(ScreenshareLayerTestVP9, 3LayerBitrate) {
InitScreenshareLayers(3);
ConfigureBitrateForLayer(100, 0);
ConfigureBitrateForLayer(100, 1);
for (int l = 0; l < 3; ++l) {
expected_.layer[l].upd_buf = l;
expected_.layer[l].ref_buf1 = l;
}
expected_.start_layer = 0;
expected_.stop_layer = 2;
EqualToExpected();
AddKilobitsToLayer(105, 0);
AddKilobitsToLayer(30, 1);
AdvanceTime(199);
EqualToExpected();
AddKilobitsToLayer(105, 0);
AddKilobitsToLayer(30, 1);
expected_.start_layer = 1;
AdvanceTime(200);
EqualToExpected();
AddKilobitsToLayer(130, 1);
expected_.start_layer = 2;
AdvanceTime(200);
EqualToExpected();
// 400 kb in L1 --> @1.0 second mark to fall below threshold.
// 210 kb in L0 --> @1.1 second mark to fall below threshold.
// Just before L1 @0.999 seconds.
AdvanceTime(399);
EqualToExpected();
// Just after L1 @1.001 seconds.
expected_.start_layer = 1;
AdvanceTime(2);
EqualToExpected();
// Just before L0 @1.099 seconds.
AdvanceTime(99);
EqualToExpected();
// Just after L0 @1.101 seconds.
expected_.start_layer = 0;
AdvanceTime(2);
EqualToExpected();
// @1.1 seconds
AdvanceTime(99);
EqualToExpected();
AddKilobitsToLayer(200, 1);
expected_.is_keyframe = true;
for (int l = 0; l < 3; ++l)
expected_.layer[l].ref_buf1 = -1;
AdvanceTime(200);
EqualToExpected();
expected_.is_keyframe = false;
expected_.start_layer = 2;
for (int l = 0; l < 3; ++l)
expected_.layer[l].ref_buf1 = l;
AdvanceTime(200);
EqualToExpected();
}
// Test that the bitrate calculations are
// correct when the timestamp wrap.
TEST_F(ScreenshareLayerTestVP9, TimestampWrap) {
InitScreenshareLayers(2);
ConfigureBitrateForLayer(100, 0);
expected_.layer[0].upd_buf = 0;
expected_.layer[0].ref_buf1 = 0;
expected_.layer[1].upd_buf = 1;
expected_.layer[1].ref_buf1 = 1;
expected_.start_layer = 0;
expected_.stop_layer = 1;
// Advance time to just before the timestamp wraps.
AdvanceTime(std::numeric_limits<uint32_t>::max() / (kTickFrequency / 1000));
EqualToExpected();
AddKilobitsToLayer(200, 0);
// Wrap
expected_.start_layer = 1;
AdvanceTime(1);
EqualToExpected();
}
} // namespace webrtc

View File

@ -22,12 +22,20 @@
'conditions': [
['build_libvpx==1', {
'dependencies': [
'<(libvpx_dir)/libvpx.gyp:libvpx',
'<(libvpx_dir)/libvpx.gyp:libvpx_new',
],
}],
}, {
'include_dirs': [
'../../../../../../../libvpx',
],
}],
['build_vp9==1', {
'sources': [
'include/vp9.h',
'screenshare_layers.cc',
'screenshare_layers.h',
'vp9_frame_buffer_pool.cc',
'vp9_frame_buffer_pool.h',
'vp9_impl.cc',
'vp9_impl.h',
],

View File

@ -0,0 +1,136 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#include "webrtc/modules/video_coding/codecs/vp9/vp9_frame_buffer_pool.h"
#include "vpx/vpx_codec.h"
#include "vpx/vpx_decoder.h"
#include "vpx/vpx_frame_buffer.h"
#include "webrtc/base/checks.h"
#include "webrtc/system_wrappers/interface/logging.h"
namespace webrtc {
uint8_t* Vp9FrameBufferPool::Vp9FrameBuffer::GetData() {
return (uint8_t*)(data_.data()); //data<uint8_t>();
}
size_t Vp9FrameBufferPool::Vp9FrameBuffer::GetDataSize() const {
return data_.size();
}
void Vp9FrameBufferPool::Vp9FrameBuffer::SetSize(size_t size) {
data_.SetSize(size);
}
bool Vp9FrameBufferPool::InitializeVpxUsePool(
vpx_codec_ctx* vpx_codec_context) {
DCHECK(vpx_codec_context);
// Tell libvpx to use this pool.
if (vpx_codec_set_frame_buffer_functions(
// In which context to use these callback functions.
vpx_codec_context,
// Called by libvpx when it needs another frame buffer.
&Vp9FrameBufferPool::VpxGetFrameBuffer,
// Called by libvpx when it no longer uses a frame buffer.
&Vp9FrameBufferPool::VpxReleaseFrameBuffer,
// |this| will be passed as |user_priv| to VpxGetFrameBuffer.
this)) {
// Failed to configure libvpx to use Vp9FrameBufferPool.
return false;
}
return true;
}
rtc::scoped_refptr<Vp9FrameBufferPool::Vp9FrameBuffer>
Vp9FrameBufferPool::GetFrameBuffer(size_t min_size) {
DCHECK_GT(min_size, 0u);
rtc::scoped_refptr<Vp9FrameBuffer> available_buffer = nullptr;
{
rtc::CritScope cs(&buffers_lock_);
// Do we have a buffer we can recycle?
for (const auto& buffer : allocated_buffers_) {
if (buffer->HasOneRef()) {
available_buffer = buffer;
break;
}
}
// Otherwise create one.
if (available_buffer == nullptr) {
available_buffer = new rtc::RefCountedObject<Vp9FrameBuffer>();
allocated_buffers_.push_back(available_buffer);
if (allocated_buffers_.size() > max_num_buffers_) {
LOG(LS_WARNING)
<< allocated_buffers_.size() << " Vp9FrameBuffers have been "
<< "allocated by a Vp9FrameBufferPool (exceeding what is "
<< "considered reasonable, " << max_num_buffers_ << ").";
RTC_NOTREACHED();
}
}
}
available_buffer->SetSize(min_size);
return available_buffer;
}
int Vp9FrameBufferPool::GetNumBuffersInUse() const {
int num_buffers_in_use = 0;
rtc::CritScope cs(&buffers_lock_);
for (const auto& buffer : allocated_buffers_) {
if (!buffer->HasOneRef())
++num_buffers_in_use;
}
return num_buffers_in_use;
}
void Vp9FrameBufferPool::ClearPool() {
rtc::CritScope cs(&buffers_lock_);
allocated_buffers_.clear();
}
// static
int32_t Vp9FrameBufferPool::VpxGetFrameBuffer(void* user_priv,
size_t min_size,
vpx_codec_frame_buffer* fb) {
DCHECK(user_priv);
DCHECK(fb);
Vp9FrameBufferPool* pool = static_cast<Vp9FrameBufferPool*>(user_priv);
rtc::scoped_refptr<Vp9FrameBuffer> buffer = pool->GetFrameBuffer(min_size);
fb->data = buffer->GetData();
fb->size = buffer->GetDataSize();
// Store Vp9FrameBuffer* in |priv| for use in VpxReleaseFrameBuffer.
// This also makes vpx_codec_get_frame return images with their |fb_priv| set
// to |buffer| which is important for external reference counting.
// Release from refptr so that the buffer's |ref_count_| remains 1 when
// |buffer| goes out of scope.
fb->priv = static_cast<void*>(buffer.release());
return 0;
}
// static
int32_t Vp9FrameBufferPool::VpxReleaseFrameBuffer(void* user_priv,
vpx_codec_frame_buffer* fb) {
DCHECK(user_priv);
DCHECK(fb);
Vp9FrameBuffer* buffer = static_cast<Vp9FrameBuffer*>(fb->priv);
if (buffer != nullptr) {
buffer->Release();
// When libvpx fails to decode and you continue to try to decode (and fail)
// libvpx can for some reason try to release the same buffer multiple times.
// Setting |priv| to null protects against trying to Release multiple times.
fb->priv = nullptr;
}
return 0;
}
} // namespace webrtc

View File

@ -0,0 +1,117 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_FRAME_BUFFER_POOL_H_
#define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_FRAME_BUFFER_POOL_H_
#include <vector>
#include "webrtc/base/basictypes.h"
#include "webrtc/base/buffer.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/refcount.h"
#include "webrtc/base/scoped_ref_ptr.h"
struct vpx_codec_ctx;
struct vpx_codec_frame_buffer;
namespace webrtc {
// This memory pool is used to serve buffers to libvpx for decoding purposes in
// VP9, which is set up in InitializeVPXUsePool. After the initialization any
// time libvpx wants to decode a frame it will use buffers provided and released
// through VpxGetFrameBuffer and VpxReleaseFrameBuffer.
// The benefit of owning the pool that libvpx relies on for decoding is that the
// decoded frames returned by libvpx (from vpx_codec_get_frame) use parts of our
// buffers for the decoded image data. By retaining ownership of this buffer
// using scoped_refptr, the image buffer can be reused by VideoFrames and no
// frame copy has to occur during decoding and frame delivery.
//
// Pseudo example usage case:
// Vp9FrameBufferPool pool;
// pool.InitializeVpxUsePool(decoder_ctx);
// ...
//
// // During decoding, libvpx will get and release buffers from the pool.
// vpx_codec_decode(decoder_ctx, ...);
//
// vpx_image_t* img = vpx_codec_get_frame(decoder_ctx, &iter);
// // Important to use scoped_refptr to protect it against being recycled by
// // the pool.
// scoped_refptr<Vp9FrameBuffer> img_buffer = (Vp9FrameBuffer*)img->fb_priv;
// ...
//
// // Destroying the codec will make libvpx release any buffers it was using.
// vpx_codec_destroy(decoder_ctx);
class Vp9FrameBufferPool {
public:
class Vp9FrameBuffer : public rtc::RefCountInterface {
public:
uint8_t* GetData();
size_t GetDataSize() const;
void SetSize(size_t size);
virtual bool HasOneRef() const = 0;
private:
// Data as an easily resizable buffer.
rtc::Buffer data_;
};
// Configures libvpx to, in the specified context, use this memory pool for
// buffers used to decompress frames. This is only supported for VP9.
bool InitializeVpxUsePool(vpx_codec_ctx* vpx_codec_context);
// Gets a frame buffer of at least |min_size|, recycling an available one or
// creating a new one. When no longer referenced from the outside the buffer
// becomes recyclable.
rtc::scoped_refptr<Vp9FrameBuffer> GetFrameBuffer(size_t min_size);
// Gets the number of buffers currently in use (not ready to be recycled).
int GetNumBuffersInUse() const;
// Releases allocated buffers, deleting available buffers. Buffers in use are
// not deleted until they are no longer referenced.
void ClearPool();
// InitializeVpxUsePool configures libvpx to call this function when it needs
// a new frame buffer. Parameters:
// |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up
// to be a pointer to the pool.
// |min_size| Minimum size needed by libvpx (to decompress a frame).
// |fb| Pointer to the libvpx frame buffer object, this is updated to
// use the pool's buffer.
// Returns 0 on success. Returns < 0 on failure.
static int32_t VpxGetFrameBuffer(void* user_priv,
size_t min_size,
vpx_codec_frame_buffer* fb);
// InitializeVpxUsePool configures libvpx to call this function when it has
// finished using one of the pool's frame buffer. Parameters:
// |user_priv| Private data passed to libvpx, InitializeVpxUsePool sets it up
// to be a pointer to the pool.
// |fb| Pointer to the libvpx frame buffer object, its |priv| will be
// a pointer to one of the pool's Vp9FrameBuffer.
static int32_t VpxReleaseFrameBuffer(void* user_priv,
vpx_codec_frame_buffer* fb);
private:
// Protects |allocated_buffers_|.
mutable rtc::CriticalSection buffers_lock_;
// All buffers, in use or ready to be recycled.
std::vector<rtc::scoped_refptr<Vp9FrameBuffer>> allocated_buffers_
GUARDED_BY(buffers_lock_);
// If more buffers than this are allocated we print warnings, and crash if
// in debug mode.
static const size_t max_num_buffers_ = 10;
};
} // namespace webrtc
#endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP9_FRAME_BUFFER_POOL_H_

Some files were not shown because too many files have changed in this diff Show More