Merge inbound to m-c. a=merge

This commit is contained in:
Ryan VanderMeulen 2015-08-17 09:06:59 -04:00
commit 387dd3a494
148 changed files with 2672 additions and 1440 deletions

View File

@ -317,7 +317,7 @@
<key id="printKb" key="&printCmd.commandkey;" command="cmd_print" modifiers="accel"/>
<key id="key_close" key="&closeCmd.key;" command="cmd_close" modifiers="accel"/>
<key id="key_closeWindow" key="&closeCmd.key;" command="cmd_closeWindow" modifiers="accel,shift"/>
<key id="key_toggleMute" key="&toggleMuteCmd.key;" command="cmd_toggleMute" modifiers="alt,shift"/>
<key id="key_toggleMute" key="&toggleMuteCmd.key;" command="cmd_toggleMute" modifiers="control"/>
<key id="key_undo"
key="&undoCmd.key;"
modifiers="accel"/>

View File

@ -44,14 +44,18 @@ function* test_tooltip(icon, expectedTooltip) {
leave_icon(icon);
}
function* test_mute_tab(tab, icon, expectMuted) {
let mutedPromise = BrowserTestUtils.waitForEvent(tab, "TabAttrModified", false, (event) => {
function get_wait_for_mute_promise(tab, expectMuted) {
return BrowserTestUtils.waitForEvent(tab, "TabAttrModified", false, event => {
if (event.detail.changed.indexOf("muted") >= 0) {
is(tab.hasAttribute("muted"), expectMuted, "The tab should " + (expectMuted ? "" : "not ") + "be muted");
return true;
}
return false;
});
}
function* test_mute_tab(tab, icon, expectMuted) {
let mutedPromise = test_mute_keybinding(tab, expectMuted);
let activeTab = gBrowser.selectedTab;
@ -260,6 +264,66 @@ function* test_cross_process_load() {
}, test_on_browser);
}
function* test_mute_keybinding() {
function* test_muting_using_keyboard(tab) {
let mutedPromise = get_wait_for_mute_promise(tab, true);
EventUtils.synthesizeKey("m", {ctrlKey: true});
yield mutedPromise;
mutedPromise = get_wait_for_mute_promise(tab, false);
EventUtils.synthesizeKey("m", {ctrlKey: true});
yield mutedPromise;
}
function* test_on_browser(browser) {
let tab = gBrowser.getTabForBrowser(browser);
// Make sure it's possible to mute before the tab is playing.
yield test_muting_using_keyboard(tab);
// Start playback.
yield ContentTask.spawn(browser, {}, function* () {
let audio = content.document.querySelector("audio");
audio.play();
});
// Wait for playback to start.
yield wait_for_tab_playing_event(tab, true);
// Make sure it's possible to mute after the tab is playing.
yield test_muting_using_keyboard(tab);
// Start playback.
yield ContentTask.spawn(browser, {}, function* () {
let audio = content.document.querySelector("audio");
audio.pause();
});
// Make sure things work if the tab is pinned.
gBrowser.pinTab(tab);
// Make sure it's possible to mute before the tab is playing.
yield test_muting_using_keyboard(tab);
// Start playback.
yield ContentTask.spawn(browser, {}, function* () {
let audio = content.document.querySelector("audio");
audio.play();
});
// Wait for playback to start.
yield wait_for_tab_playing_event(tab, true);
// Make sure it's possible to mute after the tab is playing.
yield test_muting_using_keyboard(tab);
gBrowser.unpinTab(tab);
}
yield BrowserTestUtils.withNewTab({
gBrowser,
url: PAGE
}, test_on_browser);
}
function* test_on_browser(browser) {
let tab = gBrowser.getTabForBrowser(browser);
@ -304,3 +368,5 @@ add_task(function* test_page() {
add_task(test_click_on_pinned_tab_after_mute);
add_task(test_cross_process_load);
add_task(test_mute_keybinding);

View File

@ -103,6 +103,11 @@ private:
virtual void run(const MatchFinder::MatchResult &Result);
};
class NoAutoTypeChecker : public MatchFinder::MatchCallback {
public:
virtual void run(const MatchFinder::MatchResult &Result);
};
ScopeChecker scopeChecker;
ArithmeticArgChecker arithmeticArgChecker;
TrivialCtorDtorChecker trivialCtorDtorChecker;
@ -114,6 +119,7 @@ private:
NeedsNoVTableTypeChecker needsNoVTableTypeChecker;
NonMemMovableChecker nonMemMovableChecker;
ExplicitImplicitChecker explicitImplicitChecker;
NoAutoTypeChecker noAutoTypeChecker;
MatchFinder astMatcher;
};
@ -251,6 +257,15 @@ public:
}
void dumpAnnotationReason(DiagnosticsEngine &Diag, QualType T, SourceLocation Loc);
void reportErrorIfAbsent(DiagnosticsEngine &Diag, QualType T, SourceLocation Loc,
unsigned ErrorID, unsigned NoteID) {
if (hasEffectiveAnnotation(T)) {
Diag.Report(Loc, ErrorID) << T;
Diag.Report(Loc, NoteID);
dumpAnnotationReason(Diag, T, Loc);
}
}
private:
bool hasLiteralAnnotation(QualType T) const;
AnnotationReason directAnnotationReason(QualType T);
@ -777,6 +792,15 @@ AST_MATCHER(CXXRecordDecl, isConcreteClass) {
return !Node.isAbstract();
}
AST_MATCHER(QualType, autoNonAutoableType) {
if (const AutoType *T = Node->getContainedAutoType()) {
if (const CXXRecordDecl *Rec = T->getAsCXXRecordDecl()) {
return MozChecker::hasCustomAnnotation(Rec, "moz_non_autoable");
}
}
return false;
}
}
}
@ -1023,6 +1047,9 @@ DiagnosticsMatcher::DiagnosticsMatcher() {
ofClass(allOf(isConcreteClass(), decl().bind("class"))),
unless(isMarkedImplicit())).bind("ctor"),
&explicitImplicitChecker);
astMatcher.addMatcher(varDecl(hasType(autoNonAutoableType())
).bind("node"), &noAutoTypeChecker);
}
// These enum variants determine whether an allocation has occured in the code.
@ -1100,60 +1127,24 @@ void DiagnosticsMatcher::ScopeChecker::run(
return;
case AV_Global:
if (StackClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, StackID) << T;
Diag.Report(Loc, GlobalNoteID);
StackClass.dumpAnnotationReason(Diag, T, Loc);
}
if (HeapClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, HeapID) << T;
Diag.Report(Loc, GlobalNoteID);
HeapClass.dumpAnnotationReason(Diag, T, Loc);
}
StackClass.reportErrorIfAbsent(Diag, T, Loc, StackID, GlobalNoteID);
HeapClass.reportErrorIfAbsent(Diag, T, Loc, HeapID, GlobalNoteID);
break;
case AV_Automatic:
if (GlobalClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, GlobalID) << T;
Diag.Report(Loc, StackNoteID);
GlobalClass.dumpAnnotationReason(Diag, T, Loc);
}
if (HeapClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, HeapID) << T;
Diag.Report(Loc, StackNoteID);
HeapClass.dumpAnnotationReason(Diag, T, Loc);
}
GlobalClass.reportErrorIfAbsent(Diag, T, Loc, GlobalID, StackNoteID);
HeapClass.reportErrorIfAbsent(Diag, T, Loc, HeapID, StackNoteID);
break;
case AV_Temporary:
if (GlobalClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, GlobalID) << T;
Diag.Report(Loc, TemporaryNoteID);
GlobalClass.dumpAnnotationReason(Diag, T, Loc);
}
if (HeapClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, HeapID) << T;
Diag.Report(Loc, TemporaryNoteID);
HeapClass.dumpAnnotationReason(Diag, T, Loc);
}
GlobalClass.reportErrorIfAbsent(Diag, T, Loc, GlobalID, TemporaryNoteID);
HeapClass.reportErrorIfAbsent(Diag, T, Loc, HeapID, TemporaryNoteID);
break;
case AV_Heap:
if (GlobalClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, GlobalID) << T;
Diag.Report(Loc, HeapNoteID);
GlobalClass.dumpAnnotationReason(Diag, T, Loc);
}
if (StackClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, StackID) << T;
Diag.Report(Loc, HeapNoteID);
StackClass.dumpAnnotationReason(Diag, T, Loc);
}
if (NonHeapClass.hasEffectiveAnnotation(T)) {
Diag.Report(Loc, NonHeapID) << T;
Diag.Report(Loc, HeapNoteID);
NonHeapClass.dumpAnnotationReason(Diag, T, Loc);
}
GlobalClass.reportErrorIfAbsent(Diag, T, Loc, GlobalID, HeapNoteID);
StackClass.reportErrorIfAbsent(Diag, T, Loc, StackID, HeapNoteID);
NonHeapClass.reportErrorIfAbsent(Diag, T, Loc, NonHeapID, HeapNoteID);
break;
}
}
@ -1375,6 +1366,20 @@ void DiagnosticsMatcher::ExplicitImplicitChecker::run(
Diag.Report(Ctor->getLocation(), NoteID);
}
void DiagnosticsMatcher::NoAutoTypeChecker::run(
const MatchFinder::MatchResult &Result) {
DiagnosticsEngine &Diag = Result.Context->getDiagnostics();
unsigned ErrorID = Diag.getDiagnosticIDs()->getCustomDiagID(
DiagnosticIDs::Error, "Cannot use auto to declare a variable of type %0");
unsigned NoteID = Diag.getDiagnosticIDs()->getCustomDiagID(
DiagnosticIDs::Note, "Please write out this type explicitly");
const VarDecl *D = Result.Nodes.getNodeAs<VarDecl>("node");
Diag.Report(D->getLocation(), ErrorID) << D->getType();
Diag.Report(D->getLocation(), NoteID);
}
class MozCheckAction : public PluginASTAction {
public:
ASTConsumerPtr CreateASTConsumer(CompilerInstance &CI, StringRef fileName) override {

View File

@ -0,0 +1,41 @@
#define MOZ_NON_AUTOABLE __attribute__((annotate("moz_non_autoable")))
template<class T>
struct MOZ_NON_AUTOABLE ExplicitTypeTemplate {};
struct MOZ_NON_AUTOABLE ExplicitType {};
struct NonExplicitType {};
void f() {
{
ExplicitType a;
auto b = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType'}} expected-note {{Please write out this type explicitly}}
auto &br = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType &'}} expected-note {{Please write out this type explicitly}}
const auto &brc = a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitType &'}} expected-note {{Please write out this type explicitly}}
auto *bp = &a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType *'}} expected-note {{Please write out this type explicitly}}
const auto *bpc = &a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitType *'}} expected-note {{Please write out this type explicitly}}
}
{
ExplicitTypeTemplate<int> a;
auto b = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitTypeTemplate<int>'}} expected-note {{Please write out this type explicitly}}
auto &br = a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitTypeTemplate<int> &'}} expected-note {{Please write out this type explicitly}}
const auto &brc = a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitTypeTemplate<int> &'}} expected-note {{Please write out this type explicitly}}
auto *bp = &a; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitTypeTemplate<int> *'}} expected-note {{Please write out this type explicitly}}
const auto *bpc = &a; // expected-error {{Cannot use auto to declare a variable of type 'const ExplicitTypeTemplate<int> *'}} expected-note {{Please write out this type explicitly}}
}
{
NonExplicitType c;
auto d = c;
auto &dr = c;
const auto &drc = c;
auto *dp = &c;
const auto *dpc = &c;
}
}
ExplicitType A;
auto B = A; // expected-error {{Cannot use auto to declare a variable of type 'ExplicitType'}} expected-note {{Please write out this type explicitly}}
NonExplicitType C;
auto D = C;

View File

@ -19,6 +19,7 @@ SOURCES += [
'TestNeedsNoVTableType.cpp',
'TestNoAddRefReleaseOnReturn.cpp',
'TestNoArithmeticExprInArgument.cpp',
'TestNoAutoType.cpp',
'TestNoDuplicateRefCntMember.cpp',
'TestNonHeapClass.cpp',
'TestNonMemMovable.cpp',

View File

@ -191,6 +191,8 @@ def UploadFiles(user, host, path, files, verbose=False, port=None, ssh_key=None,
print "Running post-upload command: " + post_upload_command
file_list = '"' + '" "'.join(remote_files) + '"'
output = DoSSHCommand('%s "%s" %s' % (post_upload_command, path, file_list), user, host, port=port, ssh_key=ssh_key)
# We print since mozharness may parse URLs from the output stream.
print output
if properties_file:
with open(properties_file, 'w') as outfile:
properties = GetUrlProperties(output, package)

View File

@ -51,6 +51,9 @@ Animation::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
void
Animation::SetEffect(KeyframeEffectReadOnly* aEffect)
{
// FIXME: We should perform an early return if aEffect == mEffect but
// current nsAnimationManager::CheckAnimationRule is relying on this
// method updating timing even in that case.
if (mEffect) {
mEffect->SetParentTime(Nullable<TimeDuration>());
}
@ -58,7 +61,8 @@ Animation::SetEffect(KeyframeEffectReadOnly* aEffect)
if (mEffect) {
mEffect->SetParentTime(GetCurrentTime());
}
UpdateRelevance();
UpdateTiming(SeekFlag::NoSeek, SyncNotifyFlag::Async);
}
void

View File

@ -2261,7 +2261,7 @@ nsDOMWindowUtils::GetLayerManagerRemote(bool* retval)
}
NS_IMETHODIMP
nsDOMWindowUtils::GetSupportsHardwareH264Decoding(bool* retval)
nsDOMWindowUtils::GetSupportsHardwareH264Decoding(nsAString& aRetval)
{
MOZ_RELEASE_ASSERT(nsContentUtils::IsCallerChrome());
@ -2274,9 +2274,15 @@ nsDOMWindowUtils::GetSupportsHardwareH264Decoding(bool* retval)
if (!mgr)
return NS_ERROR_FAILURE;
*retval = MP4Decoder::IsVideoAccelerated(mgr->GetCompositorBackendType());
nsCString failureReason;
if (MP4Decoder::IsVideoAccelerated(mgr->GetCompositorBackendType(), failureReason)) {
aRetval.AssignLiteral("Yes");
} else {
aRetval.AssignLiteral("No; ");
AppendUTF8toUTF16(failureReason, aRetval);
}
#else
*retval = false;
aRetval.AssignLiteral("No; Compiled without MP4 support.");
#endif
return NS_OK;
}

View File

@ -83,12 +83,12 @@ nsDataDocumentContentPolicy::ShouldLoad(uint32_t aContentType,
// OR
// - URI loadable by subsumers, e.g. blob URIs
// Any URI that doesn't meet these requirements will be rejected below.
if (!HasFlags(aContentLocation,
nsIProtocolHandler::URI_IS_LOCAL_RESOURCE) ||
(!HasFlags(aContentLocation,
nsIProtocolHandler::URI_INHERITS_SECURITY_CONTEXT) &&
!HasFlags(aContentLocation,
nsIProtocolHandler::URI_LOADABLE_BY_SUBSUMERS))) {
if (!(HasFlags(aContentLocation,
nsIProtocolHandler::URI_IS_LOCAL_RESOURCE) &&
(HasFlags(aContentLocation,
nsIProtocolHandler::URI_INHERITS_SECURITY_CONTEXT) ||
HasFlags(aContentLocation,
nsIProtocolHandler::URI_LOADABLE_BY_SUBSUMERS)))) {
*aDecision = nsIContentPolicy::REJECT_TYPE;
// Report error, if we can.

View File

@ -2871,6 +2871,10 @@ public:
if (mElement) {
nsRefPtr<HTMLMediaElement> deathGrip = mElement;
mElement->PlaybackEnded();
// Update NextFrameStatus() to move to NEXT_FRAME_UNAVAILABLE and
// HAVE_CURRENT_DATA.
mElement = nullptr;
NotifyWatchers();
}
}

View File

@ -49,7 +49,7 @@ interface nsIJSRAIIHelper;
interface nsIContentPermissionRequest;
interface nsIObserver;
[scriptable, uuid(6064615a-a782-4d08-86db-26ef3851208a)]
[scriptable, uuid(47fa312b-2ad1-4b80-8a0a-c9822e2d1ec9)]
interface nsIDOMWindowUtils : nsISupports {
/**
@ -1342,7 +1342,7 @@ interface nsIDOMWindowUtils : nsISupports {
* test video, does not mean that all h264 video decoding will be done
* in hardware.
*/
readonly attribute boolean supportsHardwareH264Decoding;
readonly attribute AString supportsHardwareH264Decoding;
/**
* Record (and return) frame-intervals for frames which were presented

View File

@ -612,7 +612,6 @@ ContentChild::Init(MessageLoop* aIOLoop,
IPC::Channel* aChannel)
{
#ifdef MOZ_WIDGET_GTK
// sigh
gtk_init(nullptr, nullptr);
#endif

View File

@ -5196,6 +5196,9 @@ ContentParent::RecvBeginDriverCrashGuard(const uint32_t& aGuardType, bool* aOutC
case gfx::CrashGuardType::D3D9Video:
guard = MakeUnique<gfx::D3D9VideoCrashGuard>(this);
break;
case gfx::CrashGuardType::GLContext:
guard = MakeUnique<gfx::GLContextCrashGuard>(this);
break;
default:
MOZ_ASSERT_UNREACHABLE("unknown crash guard type");
return false;

View File

@ -4,13 +4,13 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "DecodedStream.h"
#include "MediaStreamGraph.h"
#include "AudioSegment.h"
#include "VideoSegment.h"
#include "MediaQueue.h"
#include "DecodedStream.h"
#include "MediaData.h"
#include "MediaQueue.h"
#include "MediaStreamGraph.h"
#include "SharedBuffer.h"
#include "VideoSegment.h"
#include "VideoUtils.h"
namespace mozilla {
@ -18,11 +18,15 @@ namespace mozilla {
class DecodedStreamGraphListener : public MediaStreamListener {
typedef MediaStreamListener::MediaStreamGraphEvent MediaStreamGraphEvent;
public:
explicit DecodedStreamGraphListener(MediaStream* aStream)
DecodedStreamGraphListener(MediaStream* aStream,
MozPromiseHolder<GenericPromise>&& aPromise)
: mMutex("DecodedStreamGraphListener::mMutex")
, mStream(aStream)
, mLastOutputTime(aStream->StreamTimeToMicroseconds(aStream->GetCurrentTime()))
, mStreamFinishedOnMainThread(false) {}
, mStreamFinishedOnMainThread(false)
{
mFinishPromise = Move(aPromise);
}
void NotifyOutput(MediaStreamGraph* aGraph, GraphTime aCurrentTime) override
{
@ -43,6 +47,7 @@ public:
void DoNotifyFinished()
{
mFinishPromise.ResolveIfExists(true, __func__);
MutexAutoLock lock(mMutex);
mStreamFinishedOnMainThread = true;
}
@ -56,6 +61,7 @@ public:
void Forget()
{
MOZ_ASSERT(NS_IsMainThread());
mFinishPromise.ResolveIfExists(true, __func__);
MutexAutoLock lock(mMutex);
mStream = nullptr;
}
@ -72,6 +78,8 @@ private:
nsRefPtr<MediaStream> mStream;
int64_t mLastOutputTime; // microseconds
bool mStreamFinishedOnMainThread;
// Main thread only.
MozPromiseHolder<GenericPromise> mFinishPromise;
};
static void
@ -131,6 +139,8 @@ public:
// True if we need to send a compensation video frame to ensure the
// StreamTime going forward.
bool mEOSVideoCompensation;
// This promise will be resolved when the SourceMediaStream is finished.
nsRefPtr<GenericPromise> mFinishPromise;
};
DecodedStreamData::DecodedStreamData(SourceMediaStream* aStream, bool aPlaying)
@ -145,7 +155,10 @@ DecodedStreamData::DecodedStreamData(SourceMediaStream* aStream, bool aPlaying)
, mPlaying(aPlaying)
, mEOSVideoCompensation(false)
{
mListener = new DecodedStreamGraphListener(mStream);
MozPromiseHolder<GenericPromise> promise;
mFinishPromise = promise.Ensure(__func__);
// DecodedStreamGraphListener will resolve this promise.
mListener = new DecodedStreamGraphListener(mStream, Move(promise));
mStream->AddListener(mListener);
// Block the stream if we are not playing.
@ -244,14 +257,19 @@ DecodedStream::~DecodedStream()
{
}
void
nsRefPtr<GenericPromise>
DecodedStream::StartPlayback(int64_t aStartTime, const MediaInfo& aInfo)
{
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
if (mStartTime.isNothing()) {
mStartTime.emplace(aStartTime);
mInfo = aInfo;
}
MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
mStartTime.emplace(aStartTime);
mInfo = aInfo;
// TODO: Unfortunately, current call flow of MDSM guarantees mData is non-null
// when StartPlayback() is called which imposes an obscure dependency on MDSM.
// We will align the life cycle of mData with {Start,Stop}Playback so that
// DecodedStream doesn't need to make assumptions about mData's life cycle.
return mData->mFinishPromise;
}
void DecodedStream::StopPlayback()
@ -680,12 +698,17 @@ DecodedStream::AdvanceTracks()
}
}
bool
void
DecodedStream::SendData()
{
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
MOZ_ASSERT(mStartTime.isSome(), "Must be called after StartPlayback()");
// Nothing to do when the stream is finished.
if (mData->mHaveSentFinish) {
return;
}
InitTracks();
SendAudio(mVolume, mSameOrigin);
SendVideo(mSameOrigin);
@ -698,8 +721,6 @@ DecodedStream::SendData()
mData->mHaveSentFinish = true;
mData->mStream->Finish();
}
return finished;
}
int64_t

View File

@ -12,6 +12,7 @@
#include "mozilla/CheckedInt.h"
#include "mozilla/Maybe.h"
#include "mozilla/MozPromise.h"
#include "mozilla/nsRefPtr.h"
#include "mozilla/ReentrantMonitor.h"
#include "mozilla/UniquePtr.h"
@ -53,7 +54,11 @@ public:
// Mimic MDSM::StartAudioThread.
// Must be called before any calls to SendData().
void StartPlayback(int64_t aStartTime, const MediaInfo& aInfo);
//
// Return a promise which will be resolved when the stream is finished
// or rejected if any error.
nsRefPtr<GenericPromise> StartPlayback(int64_t aStartTime,
const MediaInfo& aInfo);
// Mimic MDSM::StopAudioThread.
void StopPlayback();
@ -71,8 +76,7 @@ public:
bool IsFinished() const;
bool HasConsumers() const;
// Return true if stream is finished.
bool SendData();
void SendData();
protected:
virtual ~DecodedStream();

View File

@ -45,7 +45,6 @@ struct AutoProfilerUnregisterThread
GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
: mIterationStart(0),
mIterationEnd(0),
mStateComputedTime(0),
mGraphImpl(aGraphImpl),
mWaitState(WAITSTATE_RUNNING),
mCurrentTimeStamp(TimeStamp::Now()),
@ -55,8 +54,7 @@ GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
GraphTime aLastSwitchNextIterationStart,
GraphTime aLastSwitchNextIterationEnd,
GraphTime aLastSwitchStateComputedTime)
GraphTime aLastSwitchNextIterationEnd)
{
// We set mIterationEnd here, because the first thing a driver do when it
// does an iteration is to update graph times, so we are in fact setting
@ -64,7 +62,6 @@ void GraphDriver::SetGraphTime(GraphDriver* aPreviousDriver,
// iteration.
mIterationStart = aLastSwitchNextIterationStart;
mIterationEnd = aLastSwitchNextIterationEnd;
mStateComputedTime = aLastSwitchStateComputedTime;
STREAM_LOG(LogLevel::Debug, ("Setting previous driver: %p (%s)", aPreviousDriver, aPreviousDriver->AsAudioCallbackDriver() ? "AudioCallbackDriver" : "SystemClockDriver"));
MOZ_ASSERT(!mPreviousDriver);
@ -99,17 +96,10 @@ void GraphDriver::EnsureImmediateWakeUpLocked()
mGraphImpl->GetMonitor().Notify();
}
void GraphDriver::UpdateStateComputedTime(GraphTime aStateComputedTime)
GraphTime
GraphDriver::StateComputedTime() const
{
MOZ_ASSERT(aStateComputedTime >= mIterationEnd);
// The next state computed time can be the same as the previous, here: it
// means the driver would be have been blocking indefinitly, but the graph has
// been woken up right after having been to sleep.
if (aStateComputedTime < mStateComputedTime) {
printf("State time can't go backward %ld < %ld.\n", static_cast<long>(aStateComputedTime), static_cast<long>(mStateComputedTime));
}
mStateComputedTime = aStateComputedTime;
return mGraphImpl->mStateComputedTime;
}
void GraphDriver::EnsureNextIteration()
@ -238,8 +228,7 @@ ThreadedDriver::Revive()
// loop again.
MonitorAutoLock mon(mGraphImpl->GetMonitor());
if (mNextDriver) {
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
mStateComputedTime);
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
mGraphImpl->SetCurrentDriver(mNextDriver);
mNextDriver->Start();
} else {
@ -280,9 +269,10 @@ ThreadedDriver::RunThread()
mIterationStart = IterationEnd();
mIterationEnd += GetIntervalForIteration();
if (mStateComputedTime < mIterationEnd) {
GraphTime stateComputedTime = StateComputedTime();
if (stateComputedTime < mIterationEnd) {
STREAM_LOG(LogLevel::Warning, ("Media graph global underrun detected"));
mIterationEnd = mStateComputedTime;
mIterationEnd = stateComputedTime;
}
if (mIterationStart >= mIterationEnd) {
@ -295,22 +285,27 @@ ThreadedDriver::RunThread()
GraphTime nextStateComputedTime =
mGraphImpl->RoundUpToNextAudioBlock(
mIterationEnd + mGraphImpl->MillisecondsToMediaTime(AUDIO_TARGET_MS));
if (nextStateComputedTime < stateComputedTime) {
// A previous driver may have been processing further ahead of
// iterationEnd.
STREAM_LOG(LogLevel::Warning,
("Prevent state from going backwards. interval[%ld; %ld] state[%ld; %ld]",
(long)mIterationStart, (long)mIterationEnd,
(long)stateComputedTime, (long)nextStateComputedTime));
nextStateComputedTime = stateComputedTime;
}
STREAM_LOG(LogLevel::Debug,
("interval[%ld; %ld] state[%ld; %ld]",
(long)mIterationStart, (long)mIterationEnd,
(long)mStateComputedTime, (long)nextStateComputedTime));
(long)stateComputedTime, (long)nextStateComputedTime));
mGraphImpl->mFlushSourcesNow = mGraphImpl->mFlushSourcesOnNextIteration;
mGraphImpl->mFlushSourcesOnNextIteration = false;
stillProcessing = mGraphImpl->OneIteration(mIterationStart,
mIterationEnd,
StateComputedTime(),
nextStateComputedTime);
stillProcessing = mGraphImpl->OneIteration(nextStateComputedTime);
if (mNextDriver && stillProcessing) {
STREAM_LOG(LogLevel::Debug, ("Switching to AudioCallbackDriver"));
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
mStateComputedTime);
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
mGraphImpl->SetCurrentDriver(mNextDriver);
mNextDriver->Start();
return;
@ -327,7 +322,7 @@ SystemClockDriver::GetIntervalForIteration()
mCurrentTimeStamp = now;
MOZ_LOG(gMediaStreamGraphLog, LogLevel::Verbose,
("Updating current time to %f (real %f, mStateComputedTime %f)",
("Updating current time to %f (real %f, StateComputedTime() %f)",
mGraphImpl->MediaTimeToSeconds(IterationEnd() + interval),
(now - mInitialTimeStamp).ToSeconds(),
mGraphImpl->MediaTimeToSeconds(StateComputedTime())));
@ -573,8 +568,7 @@ AudioCallbackDriver::Init()
NS_WARNING("Could not create a cubeb stream for MediaStreamGraph, falling back to a SystemClockDriver");
// Fall back to a driver using a normal thread.
mNextDriver = new SystemClockDriver(GraphImpl());
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
mStateComputedTime);
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
mGraphImpl->SetCurrentDriver(mNextDriver);
DebugOnly<bool> found = mGraphImpl->RemoveMixerCallback(this);
NS_WARN_IF_FALSE(!found, "Mixer callback not added when switching?");
@ -667,8 +661,7 @@ AudioCallbackDriver::Revive()
// If we were switching, switch now. Otherwise, start the audio thread again.
MonitorAutoLock mon(mGraphImpl->GetMonitor());
if (mNextDriver) {
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
mStateComputedTime);
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
mGraphImpl->SetCurrentDriver(mNextDriver);
mNextDriver->Start();
} else {
@ -786,7 +779,8 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
AutoInCallback aic(this);
#endif
if (mStateComputedTime == 0) {
GraphTime stateComputedTime = StateComputedTime();
if (stateComputedTime == 0) {
MonitorAutoLock mon(mGraphImpl->GetMonitor());
// Because this function is called during cubeb_stream_init (to prefill the
// audio buffers), it can be that we don't have a message here (because this
@ -822,15 +816,15 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
// compute the iteration start and end from there, trying to keep the amount
// of buffering in the graph constant.
GraphTime nextStateComputedTime =
mGraphImpl->RoundUpToNextAudioBlock(mStateComputedTime + mBuffer.Available());
mGraphImpl->RoundUpToNextAudioBlock(stateComputedTime + mBuffer.Available());
mIterationStart = mIterationEnd;
// inGraph is the number of audio frames there is between the state time and
// the current time, i.e. the maximum theoretical length of the interval we
// could use as [mIterationStart; mIterationEnd].
GraphTime inGraph = mStateComputedTime - mIterationStart;
GraphTime inGraph = stateComputedTime - mIterationStart;
// We want the interval [mIterationStart; mIterationEnd] to be before the
// interval [mStateComputedTime; nextStateComputedTime]. We also want
// interval [stateComputedTime; nextStateComputedTime]. We also want
// the distance between these intervals to be roughly equivalent each time, to
// ensure there is no clock drift between current time and state time. Since
// we can't act on the state time because we have to fill the audio buffer, we
@ -839,21 +833,18 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
STREAM_LOG(LogLevel::Debug, ("interval[%ld; %ld] state[%ld; %ld] (frames: %ld) (durationMS: %u) (duration ticks: %ld)\n",
(long)mIterationStart, (long)mIterationEnd,
(long)mStateComputedTime, (long)nextStateComputedTime,
(long)stateComputedTime, (long)nextStateComputedTime,
(long)aFrames, (uint32_t)durationMS,
(long)(nextStateComputedTime - mStateComputedTime)));
(long)(nextStateComputedTime - stateComputedTime)));
mCurrentTimeStamp = TimeStamp::Now();
if (mStateComputedTime < mIterationEnd) {
if (stateComputedTime < mIterationEnd) {
STREAM_LOG(LogLevel::Warning, ("Media graph global underrun detected"));
mIterationEnd = mStateComputedTime;
mIterationEnd = stateComputedTime;
}
stillProcessing = mGraphImpl->OneIteration(mIterationStart,
mIterationEnd,
mStateComputedTime,
nextStateComputedTime);
stillProcessing = mGraphImpl->OneIteration(nextStateComputedTime);
} else {
NS_WARNING("DataCallback buffer filled entirely from scratch buffer, skipping iteration.");
stillProcessing = true;
@ -871,8 +862,7 @@ AudioCallbackDriver::DataCallback(AudioDataValue* aBuffer, long aFrames)
}
}
STREAM_LOG(LogLevel::Debug, ("Switching to system driver."));
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
mStateComputedTime);
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
mGraphImpl->SetCurrentDriver(mNextDriver);
mNextDriver->Start();
// Returning less than aFrames starts the draining and eventually stops the
@ -979,8 +969,7 @@ AudioCallbackDriver::DeviceChangedCallback() {
mCallbackReceivedWhileSwitching = 0;
mGraphImpl->mFlushSourcesOnNextIteration = true;
mNextDriver = new SystemClockDriver(GraphImpl());
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd,
mStateComputedTime);
mNextDriver->SetGraphTime(this, mIterationStart, mIterationEnd);
mGraphImpl->SetCurrentDriver(mNextDriver);
mNextDriver->Start();
#endif

View File

@ -127,10 +127,6 @@ public:
return mIterationEnd;
}
GraphTime StateComputedTime() {
return mStateComputedTime;
}
virtual void GetAudioBuffer(float** aBuffer, long& aFrames) {
MOZ_CRASH("This is not an Audio GraphDriver!");
}
@ -155,15 +151,7 @@ public:
*/
void SetGraphTime(GraphDriver* aPreviousDriver,
GraphTime aLastSwitchNextIterationStart,
GraphTime aLastSwitchNextIterationEnd,
GraphTime aLastSwitchStateComputedTime);
/**
* Whenever the graph has computed the time until it has all state
* (mStateComputedState), it calls this to indicate the new time until which
* we have computed state.
*/
void UpdateStateComputedTime(GraphTime aStateComputedTime);
GraphTime aLastSwitchNextIterationEnd);
/**
* Call this to indicate that another iteration of the control loop is
@ -190,12 +178,12 @@ public:
virtual bool OnThread() = 0;
protected:
GraphTime StateComputedTime() const;
// Time of the start of this graph iteration.
GraphTime mIterationStart;
// Time of the end of this graph iteration.
GraphTime mIterationEnd;
// Time, in the future, for which blocking has been computed.
GraphTime mStateComputedTime;
// The MediaStreamGraphImpl that owns this driver. This has a lifetime longer
// than the driver, and will never be null.
MediaStreamGraphImpl* mGraphImpl;

View File

@ -1984,6 +1984,9 @@ MediaCacheStream::Close()
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
if (!mInitialized)
return;
ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
CloseInternal(mon);
// Queue an Update since we may have created more free space. Don't do

View File

@ -308,7 +308,7 @@ public:
bool aKeyframe,
int64_t aTimecode,
IntSize aDisplay,
int32_t aFrameID);
uint32_t aFrameID);
protected:
~VideoData();

View File

@ -376,7 +376,7 @@ void MediaDecoderStateMachine::SendStreamData()
AssertCurrentThreadInMonitor();
MOZ_ASSERT(!mAudioSink, "Should've been stopped in RunStateMachine()");
bool finished = mDecodedStream->SendData();
mDecodedStream->SendData();
const auto clockTime = GetClock();
while (true) {
@ -392,12 +392,6 @@ void MediaDecoderStateMachine::SendStreamData()
}
break;
}
// To be consistent with AudioSink, |mAudioCompleted| is not set
// until all samples are drained.
if (finished && AudioQueue().GetSize() == 0) {
mAudioCompleted = true;
}
}
bool MediaDecoderStateMachine::HaveEnoughDecodedAudio(int64_t aAmpleAudioUSecs)
@ -1079,12 +1073,7 @@ void MediaDecoderStateMachine::MaybeStartPlayback()
MOZ_ASSERT(IsPlaying());
StartAudioThread();
// Tell DecodedStream to start playback with specified start time and media
// info. This is consistent with how we create AudioSink in StartAudioThread().
if (mAudioCaptured) {
mDecodedStream->StartPlayback(GetMediaTime(), mInfo);
}
StartDecodedStream();
DispatchDecodeTasksIfNeeded();
}
@ -1792,6 +1781,32 @@ MediaDecoderStateMachine::StartAudioThread()
}
}
void
MediaDecoderStateMachine::StopDecodedStream()
{
MOZ_ASSERT(OnTaskQueue());
AssertCurrentThreadInMonitor();
mDecodedStream->StopPlayback();
mDecodedStreamPromise.DisconnectIfExists();
}
void
MediaDecoderStateMachine::StartDecodedStream()
{
MOZ_ASSERT(OnTaskQueue());
AssertCurrentThreadInMonitor();
// Tell DecodedStream to start playback with specified start time and media
// info. This is consistent with how we create AudioSink in StartAudioThread().
if (mAudioCaptured && !mDecodedStreamPromise.Exists()) {
mDecodedStreamPromise.Begin(
mDecodedStream->StartPlayback(GetMediaTime(), mInfo)->Then(
OwnerThread(), __func__, this,
&MediaDecoderStateMachine::OnDecodedStreamFinish,
&MediaDecoderStateMachine::OnDecodedStreamError));
}
}
int64_t MediaDecoderStateMachine::AudioDecodedUsecs()
{
MOZ_ASSERT(OnTaskQueue());
@ -2395,7 +2410,7 @@ nsresult MediaDecoderStateMachine::RunStateMachine()
// Stop audio sink after call to AudioEndTime() above, otherwise it will
// return an incorrect value due to a null mAudioSink.
StopAudioThread();
mDecodedStream->StopPlayback();
StopDecodedStream();
}
return NS_OK;
@ -2425,7 +2440,7 @@ MediaDecoderStateMachine::Reset()
// outside of the decoder monitor while we are clearing the queue and causes
// crash for no samples to be popped.
StopAudioThread();
mDecodedStream->StopPlayback();
StopDecodedStream();
mVideoFrameEndTime = -1;
mDecodedVideoEndTime = -1;
@ -2526,7 +2541,7 @@ void MediaDecoderStateMachine::RenderVideoFrames(int32_t aMaxFrames,
img->mFrameID = frame->mFrameID;
img->mProducerID = mProducerID;
VERBOSE_LOG("playing video frame %lld (id=%d) (queued=%i, state-machine=%i, decoder-queued=%i)",
VERBOSE_LOG("playing video frame %lld (id=%x) (queued=%i, state-machine=%i, decoder-queued=%i)",
frame->mTime, frame->mFrameID,
VideoQueue().GetSize() + mReader->SizeOfVideoQueueInFrames(),
VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames());
@ -3076,6 +3091,32 @@ void MediaDecoderStateMachine::OnAudioSinkError()
DecodeError();
}
void
MediaDecoderStateMachine::OnDecodedStreamFinish()
{
MOZ_ASSERT(OnTaskQueue());
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
MOZ_ASSERT(mAudioCaptured, "Audio should be captured.");
mDecodedStreamPromise.Complete();
if (mInfo.HasAudio()) {
mAudioCompleted = true;
}
// To notify PlaybackEnded as soon as possible.
ScheduleStateMachine();
}
void
MediaDecoderStateMachine::OnDecodedStreamError()
{
MOZ_ASSERT(OnTaskQueue());
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
MOZ_ASSERT(mAudioCaptured, "Audio should be captured.");
mDecodedStreamPromise.Complete();
DecodeError();
}
uint32_t MediaDecoderStateMachine::GetAmpleVideoFrames() const
{
MOZ_ASSERT(OnTaskQueue());
@ -3099,7 +3140,7 @@ void MediaDecoderStateMachine::DispatchAudioCaptured()
// Start DecodedStream if we are already playing. Otherwise it will be
// handled in MaybeStartPlayback().
if (self->IsPlaying()) {
self->mDecodedStream->StartPlayback(self->GetMediaTime(), self->mInfo);
self->StartDecodedStream();
}
self->ScheduleStateMachine();
}
@ -3115,7 +3156,8 @@ void MediaDecoderStateMachine::DispatchAudioUncaptured()
MOZ_ASSERT(self->OnTaskQueue());
ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
if (self->mAudioCaptured) {
// Start again the audio sink
self->StopDecodedStream();
// Start again the audio sink.
self->mAudioCaptured = false;
if (self->IsPlaying()) {
self->StartAudioThread();

View File

@ -518,6 +518,10 @@ protected:
// one lock count. Called on the state machine thread.
void StartAudioThread();
void StopDecodedStream();
void StartDecodedStream();
// Notification method invoked when mPlayState changes.
void PlayStateChanged();
@ -670,6 +674,10 @@ private:
// Rejected by the AudioSink to signal errors.
void OnAudioSinkError();
void OnDecodedStreamFinish();
void OnDecodedStreamError();
// Return true if the video decoder's decode speed can not catch up the
// play time.
bool NeedToSkipToNextKeyframe();
@ -1284,6 +1292,7 @@ private:
nsRefPtr<MediaResource> mResource;
MozPromiseRequestHolder<GenericPromise> mAudioSinkPromise;
MozPromiseRequestHolder<GenericPromise> mDecodedStreamPromise;
MediaEventListener mAudioQueueListener;
MediaEventListener mVideoQueueListener;

View File

@ -1038,7 +1038,11 @@ MediaFormatReader::DecodeDemuxedSamples(TrackType aTrack,
if (aTrack == TrackInfo::kVideoTrack) {
aA.mParsed++;
}
decoder.mDecoder->Input(sample);
if (NS_FAILED(decoder.mDecoder->Input(sample))) {
LOG("Unable to pass frame to decoder");
NotifyError(aTrack);
return;
}
decoder.mQueuedSamples.RemoveElementAt(0);
samplesPending = true;
}
@ -1114,8 +1118,9 @@ MediaFormatReader::Update(TrackType aTrack)
if (!decoder.mOutput.IsEmpty()) {
// We have a decoded sample ready to be returned.
if (aTrack == TrackType::kVideoTrack) {
nsCString error;
mVideo.mIsHardwareAccelerated =
mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated();
mVideo.mDecoder && mVideo.mDecoder->IsHardwareAccelerated(error);
}
while (decoder.mOutput.Length()) {
nsRefPtr<MediaData> output = decoder.mOutput[0];

View File

@ -284,6 +284,26 @@ public:
uint32_t mIndex;
};
Chunk* FindChunkContaining(StreamTime aOffset, StreamTime* aStart = nullptr)
{
if (aOffset < 0) {
return nullptr;
}
StreamTime offset = 0;
for (uint32_t i = 0; i < mChunks.Length(); ++i) {
Chunk& c = mChunks[i];
StreamTime nextOffset = offset + c.GetDuration();
if (aOffset < nextOffset) {
if (aStart) {
*aStart = offset;
}
return &c;
}
offset = nextOffset;
}
return nullptr;
}
void RemoveLeading(StreamTime aDuration)
{
RemoveLeading(aDuration, 0);
@ -356,26 +376,6 @@ protected:
return c;
}
Chunk* FindChunkContaining(StreamTime aOffset, StreamTime* aStart = nullptr)
{
if (aOffset < 0) {
return nullptr;
}
StreamTime offset = 0;
for (uint32_t i = 0; i < mChunks.Length(); ++i) {
Chunk& c = mChunks[i];
StreamTime nextOffset = offset + c.GetDuration();
if (aOffset < nextOffset) {
if (aStart) {
*aStart = offset;
}
return &c;
}
offset = nextOffset;
}
return nullptr;
}
Chunk* GetLastChunk()
{
if (mChunks.IsEmpty()) {

View File

@ -79,10 +79,6 @@ MediaStreamGraphImpl::FinishStream(MediaStream* aStream)
STREAM_LOG(LogLevel::Debug, ("MediaStream %p will finish", aStream));
aStream->mFinished = true;
aStream->mBuffer.AdvanceKnownTracksTime(STREAM_TIME_MAX);
// Force at least one more iteration of the control loop, since we rely
// on UpdateCurrentTimeForStreams to notify our listeners once the stream end
// has been reached.
EnsureNextIteration();
SetStreamOrderDirty();
}
@ -108,7 +104,7 @@ MediaStreamGraphImpl::AddStream(MediaStream* aStream)
mSuspendedStreams.AppendElement(aStream);
STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to the graph, in the suspended stream array", aStream));
} else {
aStream->mBufferStartTime = IterationEnd();
aStream->mBufferStartTime = mProcessedTime;
mStreams.AppendElement(aStream);
STREAM_LOG(LogLevel::Debug, ("Adding media stream %p to the graph", aStream));
}
@ -171,8 +167,8 @@ MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
// the stream at all between mBlockingDecisionsMadeUntilTime and
// aDesiredUpToTime.
StreamTime t =
GraphTimeToStreamTime(aStream, CurrentDriver()->StateComputedTime()) +
(aDesiredUpToTime - CurrentDriver()->StateComputedTime());
GraphTimeToStreamTime(aStream, mStateComputedTime) +
(aDesiredUpToTime - mStateComputedTime);
STREAM_LOG(LogLevel::Verbose, ("Calling NotifyPull aStream=%p t=%f current end=%f", aStream,
MediaTimeToSeconds(t),
MediaTimeToSeconds(aStream->mBuffer.GetEnd())));
@ -254,12 +250,12 @@ StreamTime
MediaStreamGraphImpl::GraphTimeToStreamTime(MediaStream* aStream,
GraphTime aTime)
{
MOZ_ASSERT(aTime <= CurrentDriver()->StateComputedTime(),
MOZ_ASSERT(aTime <= mStateComputedTime,
"Don't ask about times where we haven't made blocking decisions yet");
if (aTime <= IterationEnd()) {
if (aTime <= mProcessedTime) {
return std::max<StreamTime>(0, aTime - aStream->mBufferStartTime);
}
GraphTime t = IterationEnd();
GraphTime t = mProcessedTime;
StreamTime s = t - aStream->mBufferStartTime;
while (t < aTime) {
GraphTime end;
@ -275,7 +271,7 @@ StreamTime
MediaStreamGraphImpl::GraphTimeToStreamTimeOptimistic(MediaStream* aStream,
GraphTime aTime)
{
GraphTime computedUpToTime = std::min(CurrentDriver()->StateComputedTime(), aTime);
GraphTime computedUpToTime = std::min(mStateComputedTime, aTime);
StreamTime s = GraphTimeToStreamTime(aStream, computedUpToTime);
return s + (aTime - computedUpToTime);
}
@ -287,7 +283,8 @@ MediaStreamGraphImpl::StreamTimeToGraphTime(MediaStream* aStream,
if (aTime >= STREAM_TIME_MAX) {
return GRAPH_TIME_MAX;
}
MediaTime bufferElapsedToCurrentTime = IterationEnd() - aStream->mBufferStartTime;
MediaTime bufferElapsedToCurrentTime =
mProcessedTime - aStream->mBufferStartTime;
if (aTime < bufferElapsedToCurrentTime ||
(aTime == bufferElapsedToCurrentTime && !(aFlags & INCLUDE_TRAILING_BLOCKED_INTERVAL))) {
return aTime + aStream->mBufferStartTime;
@ -296,16 +293,16 @@ MediaStreamGraphImpl::StreamTimeToGraphTime(MediaStream* aStream,
MediaTime streamAmount = aTime - bufferElapsedToCurrentTime;
NS_ASSERTION(streamAmount >= 0, "Can't answer queries before current time");
GraphTime t = IterationEnd();
GraphTime t = mProcessedTime;
while (t < GRAPH_TIME_MAX) {
if (!(aFlags & INCLUDE_TRAILING_BLOCKED_INTERVAL) && streamAmount == 0) {
return t;
}
bool blocked;
GraphTime end;
if (t < CurrentDriver()->StateComputedTime()) {
if (t < mStateComputedTime) {
blocked = aStream->mBlocked.GetAt(t, &end);
end = std::min(end, CurrentDriver()->StateComputedTime());
end = std::min(end, mStateComputedTime);
} else {
blocked = false;
end = GRAPH_TIME_MAX;
@ -336,7 +333,7 @@ MediaStreamGraphImpl::StreamNotifyOutput(MediaStream* aStream)
{
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
MediaStreamListener* l = aStream->mListeners[j];
l->NotifyOutput(this, IterationEnd());
l->NotifyOutput(this, mProcessedTime);
}
}
@ -348,12 +345,9 @@ MediaStreamGraphImpl::StreamReadyToFinish(MediaStream* aStream)
// The stream is fully finished when all of its track data has been played
// out.
if (IterationEnd() >=
if (mProcessedTime >=
aStream->StreamTimeToGraphTime(aStream->GetStreamBuffer().GetAllTracksEnd())) {
NS_WARN_IF_FALSE(aStream->mNotifiedBlocked,
"Should've notified blocked=true for a fully finished stream");
aStream->mNotifiedFinished = true;
aStream->mLastPlayedVideoFrame.SetNull();
SetStreamOrderDirty();
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
MediaStreamListener* l = aStream->mListeners[j];
@ -406,9 +400,7 @@ MediaStreamGraphImpl::UpdateCurrentTimeForStreams(GraphTime aPrevCurrentTime,
if (runningAndSuspendedPair[array] == &mStreams) {
bool streamHasOutput = blockedTime < aNextCurrentTime - aPrevCurrentTime;
// Make this an assertion when bug 957832 is fixed.
NS_WARN_IF_FALSE(
!streamHasOutput || !stream->mNotifiedFinished,
NS_ASSERTION(!streamHasOutput || !stream->mNotifiedFinished,
"Shouldn't have already notified of finish *and* have output!");
if (streamHasOutput) {
@ -441,13 +433,13 @@ MediaStreamGraphImpl::WillUnderrun(MediaStream* aStream, GraphTime aTime,
StreamTimeToGraphTime(aStream, aStream->GetBufferEnd(),
INCLUDE_TRAILING_BLOCKED_INTERVAL);
#ifdef DEBUG
if (bufferEnd < IterationEnd()) {
if (bufferEnd < mProcessedTime) {
STREAM_LOG(LogLevel::Error, ("MediaStream %p underrun, "
"bufferEnd %f < IterationEnd() %f (%lld < %lld), Streamtime %lld",
aStream, MediaTimeToSeconds(bufferEnd), MediaTimeToSeconds(IterationEnd()),
bufferEnd, IterationEnd(), aStream->GetBufferEnd()));
"bufferEnd %f < mProcessedTime %f (%lld < %lld), Streamtime %lld",
aStream, MediaTimeToSeconds(bufferEnd), MediaTimeToSeconds(mProcessedTime),
bufferEnd, mProcessedTime, aStream->GetBufferEnd()));
aStream->DumpTrackInfo();
NS_ASSERTION(bufferEnd >= IterationEnd(), "Buffer underran");
NS_ASSERTION(bufferEnd >= mProcessedTime, "Buffer underran");
}
#endif
// We should block after bufferEnd.
@ -749,10 +741,8 @@ MediaStreamGraphImpl::UpdateStreamOrder()
void
MediaStreamGraphImpl::RecomputeBlocking(GraphTime aEndBlockingDecisions)
{
bool blockingDecisionsWillChange = false;
STREAM_LOG(LogLevel::Verbose, ("Media graph %p computing blocking for time %f",
this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime())));
this, MediaTimeToSeconds(mStateComputedTime)));
nsTArray<MediaStream*>* runningAndSuspendedPair[2];
runningAndSuspendedPair[0] = &mStreams;
runningAndSuspendedPair[1] = &mSuspendedStreams;
@ -768,33 +758,24 @@ MediaStreamGraphImpl::RecomputeBlocking(GraphTime aEndBlockingDecisions)
AddBlockingRelatedStreamsToSet(&streamSet, stream);
GraphTime end;
for (GraphTime t = CurrentDriver()->StateComputedTime();
for (GraphTime t = mStateComputedTime;
t < aEndBlockingDecisions; t = end) {
end = GRAPH_TIME_MAX;
RecomputeBlockingAt(streamSet, t, aEndBlockingDecisions, &end);
if (end < GRAPH_TIME_MAX) {
blockingDecisionsWillChange = true;
}
}
}
GraphTime end;
stream->mBlocked.GetAt(IterationEnd(), &end);
if (end < GRAPH_TIME_MAX) {
blockingDecisionsWillChange = true;
}
}
}
STREAM_LOG(LogLevel::Verbose, ("Media graph %p computed blocking for interval %f to %f",
this, MediaTimeToSeconds(CurrentDriver()->StateComputedTime()),
this, MediaTimeToSeconds(mStateComputedTime),
MediaTimeToSeconds(aEndBlockingDecisions)));
CurrentDriver()->UpdateStateComputedTime(aEndBlockingDecisions);
if (blockingDecisionsWillChange) {
// Make sure we wake up to notify listeners about these changes.
EnsureNextIteration();
}
MOZ_ASSERT(aEndBlockingDecisions >= mProcessedTime);
// The next state computed time can be the same as the previous: it
// means the driver would be have been blocking indefinitly, but the graph has
// been woken up right after having been to sleep.
MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);
mStateComputedTime = aEndBlockingDecisions;
}
void
@ -1123,69 +1104,140 @@ MediaStreamGraphImpl::PlayVideo(MediaStream* aStream)
if (aStream->mVideoOutputs.IsEmpty())
return;
// Display the next frame a bit early. This is better than letting the current
// frame be displayed for too long. Because depending on the GraphDriver in
// use, we can't really estimate the graph interval duration, we clamp it to
// the current state computed time.
GraphTime framePosition = IterationEnd() + MillisecondsToMediaTime(CurrentDriver()->IterationDuration());
if (framePosition > CurrentDriver()->StateComputedTime()) {
#ifdef DEBUG
if (std::abs(framePosition - CurrentDriver()->StateComputedTime()) >= MillisecondsToMediaTime(5)) {
STREAM_LOG(LogLevel::Debug, ("Graph thread slowdown?"));
}
#endif
framePosition = CurrentDriver()->StateComputedTime();
}
MOZ_ASSERT(framePosition >= aStream->mBufferStartTime, "frame position before buffer?");
StreamTime frameBufferTime = GraphTimeToStreamTime(aStream, framePosition);
TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
// Collect any new frames produced in this iteration.
nsAutoTArray<ImageContainer::NonOwningImage,4> newImages;
nsRefPtr<Image> blackImage;
MOZ_ASSERT(mProcessedTime >= aStream->mBufferStartTime, "frame position before buffer?");
StreamTime frameBufferTime = GraphTimeToStreamTime(aStream, mProcessedTime);
StreamTime bufferEndTime = GraphTimeToStreamTime(aStream, mStateComputedTime);
StreamTime start;
const VideoFrame* frame = nullptr;
for (StreamBuffer::TrackIter tracks(aStream->GetStreamBuffer(), MediaSegment::VIDEO);
!tracks.IsEnded(); tracks.Next()) {
VideoSegment* segment = tracks->Get<VideoSegment>();
StreamTime thisStart;
const VideoFrame* thisFrame =
segment->GetFrameAt(frameBufferTime, &thisStart);
if (thisFrame && thisFrame->GetImage()) {
start = thisStart;
frame = thisFrame;
const VideoChunk* chunk;
for ( ;
frameBufferTime < bufferEndTime;
frameBufferTime = start + chunk->GetDuration()) {
// Pick the last track that has a video chunk for the time, and
// schedule its frame.
chunk = nullptr;
for (StreamBuffer::TrackIter tracks(aStream->GetStreamBuffer(),
MediaSegment::VIDEO);
!tracks.IsEnded();
tracks.Next()) {
VideoSegment* segment = tracks->Get<VideoSegment>();
StreamTime thisStart;
const VideoChunk* thisChunk =
segment->FindChunkContaining(frameBufferTime, &thisStart);
if (thisChunk && thisChunk->mFrame.GetImage()) {
start = thisStart;
chunk = thisChunk;
}
}
if (!chunk)
break;
const VideoFrame* frame = &chunk->mFrame;
if (*frame == aStream->mLastPlayedVideoFrame) {
continue;
}
Image* image = frame->GetImage();
STREAM_LOG(LogLevel::Verbose,
("MediaStream %p writing video frame %p (%dx%d)",
aStream, image, frame->GetIntrinsicSize().width,
frame->GetIntrinsicSize().height));
// Schedule this frame after the previous frame finishes, instead of at
// its start time. These times only differ in the case of multiple
// tracks.
GraphTime frameTime =
StreamTimeToGraphTime(aStream, frameBufferTime,
INCLUDE_TRAILING_BLOCKED_INTERVAL);
TimeStamp targetTime = currentTimeStamp +
TimeDuration::FromSeconds(MediaTimeToSeconds(frameTime - IterationEnd()));
if (frame->GetForceBlack()) {
if (!blackImage) {
blackImage = aStream->mVideoOutputs[0]->
GetImageContainer()->CreateImage(ImageFormat::PLANAR_YCBCR);
if (blackImage) {
// Sets the image to a single black pixel, which will be scaled to
// fill the rendered size.
SetImageToBlackPixel(static_cast<PlanarYCbCrImage*>
(blackImage.get()));
}
}
if (blackImage) {
image = blackImage;
}
}
newImages.AppendElement(ImageContainer::NonOwningImage(image, targetTime));
aStream->mLastPlayedVideoFrame = *frame;
}
if (!frame || *frame == aStream->mLastPlayedVideoFrame)
if (!aStream->mLastPlayedVideoFrame.GetImage())
return;
STREAM_LOG(LogLevel::Verbose, ("MediaStream %p writing video frame %p (%dx%d)",
aStream, frame->GetImage(), frame->GetIntrinsicSize().width,
frame->GetIntrinsicSize().height));
GraphTime startTime = StreamTimeToGraphTime(aStream,
start, INCLUDE_TRAILING_BLOCKED_INTERVAL);
TimeStamp targetTime = CurrentDriver()->GetCurrentTimeStamp() +
TimeDuration::FromMilliseconds(double(startTime - IterationEnd()));
nsAutoTArray<ImageContainer::NonOwningImage,4> images;
bool haveMultipleImages = false;
for (uint32_t i = 0; i < aStream->mVideoOutputs.Length(); ++i) {
VideoFrameContainer* output = aStream->mVideoOutputs[i];
if (frame->GetForceBlack()) {
nsRefPtr<Image> image =
output->GetImageContainer()->CreateImage(ImageFormat::PLANAR_YCBCR);
if (image) {
// Sets the image to a single black pixel, which will be scaled to fill
// the rendered size.
SetImageToBlackPixel(static_cast<PlanarYCbCrImage*>(image.get()));
}
output->SetCurrentFrame(frame->GetIntrinsicSize(), image,
targetTime);
} else {
output->SetCurrentFrame(frame->GetIntrinsicSize(), frame->GetImage(),
targetTime);
// Find previous frames that may still be valid.
nsAutoTArray<ImageContainer::OwningImage,4> previousImages;
output->GetImageContainer()->GetCurrentImages(&previousImages);
uint32_t j = previousImages.Length();
if (j) {
// Re-use the most recent frame before currentTimeStamp and subsequent,
// always keeping at least one frame.
do {
--j;
} while (j > 0 && previousImages[j].mTimeStamp > currentTimeStamp);
}
if (previousImages.Length() - j + newImages.Length() > 1) {
haveMultipleImages = true;
}
// Don't update if there are no changes.
if (j == 0 && newImages.IsEmpty())
continue;
for ( ; j < previousImages.Length(); ++j) {
const auto& image = previousImages[j];
// Cope with potential clock skew with AudioCallbackDriver.
if (newImages.Length() && image.mTimeStamp > newImages[0].mTimeStamp) {
STREAM_LOG(LogLevel::Warning,
("Dropping %u video frames due to clock skew",
unsigned(previousImages.Length() - j)));
break;
}
images.AppendElement(ImageContainer::
NonOwningImage(image.mImage,
image.mTimeStamp, image.mFrameID));
}
// Add the frames from this iteration.
for (auto& image : newImages) {
image.mFrameID = output->NewFrameID();
images.AppendElement(image);
}
output->SetCurrentFrames(aStream->mLastPlayedVideoFrame.GetIntrinsicSize(),
images);
nsCOMPtr<nsIRunnable> event =
new VideoFrameContainerInvalidateRunnable(output);
DispatchToMainThreadAfterStreamStateUpdate(event.forget());
images.ClearAndRetainStorage();
}
if (!aStream->mNotifiedFinished) {
aStream->mLastPlayedVideoFrame = *frame;
// If the stream has finished and the timestamps of all frames have expired
// then no more updates are required.
if (aStream->mFinished && !haveMultipleImages) {
aStream->mLastPlayedVideoFrame.SetNull();
}
}
@ -1221,7 +1273,7 @@ MediaStreamGraphImpl::PrepareUpdatesToMainThreadState(bool aFinalUpdate)
StreamUpdate* update = mStreamUpdates.AppendElement();
update->mStream = stream;
update->mNextMainThreadCurrentTime =
GraphTimeToStreamTime(stream, IterationEnd());
GraphTimeToStreamTime(stream, mProcessedTime);
update->mNextMainThreadFinished = stream->mNotifiedFinished;
}
if (!mPendingUpdateRunnables.IsEmpty()) {
@ -1316,13 +1368,13 @@ MediaStreamGraphImpl::UpdateGraph(GraphTime aEndBlockingDecision)
}
// The loop is woken up so soon that IterationEnd() barely advances and we
// end up having aEndBlockingDecision == CurrentDriver()->StateComputedTime().
// end up having aEndBlockingDecision == mStateComputedTime.
// Since stream blocking is computed in the interval of
// [CurrentDriver()->StateComputedTime(), aEndBlockingDecision), it won't be computed at all.
// [mStateComputedTime, aEndBlockingDecision), it won't be computed at all.
// We should ensure next iteration so that pending blocking changes will be
// computed in next loop.
if (ensureNextIteration ||
aEndBlockingDecision == CurrentDriver()->StateComputedTime()) {
aEndBlockingDecision == mStateComputedTime) {
EnsureNextIteration();
}
@ -1418,8 +1470,7 @@ MediaStreamGraphImpl::Process(GraphTime aFrom, GraphTime aTo)
}
bool
MediaStreamGraphImpl::OneIteration(GraphTime aFrom, GraphTime aTo,
GraphTime aStateFrom, GraphTime aStateEnd)
MediaStreamGraphImpl::OneIteration(GraphTime aStateEnd)
{
{
MonitorAutoLock lock(mMemoryReportMonitor);
@ -1439,12 +1490,14 @@ MediaStreamGraphImpl::OneIteration(GraphTime aFrom, GraphTime aTo,
}
}
UpdateCurrentTimeForStreams(aFrom, aTo);
GraphTime stateFrom = mStateComputedTime;
GraphTime stateEnd = std::min(aStateEnd, mEndTime);
UpdateGraph(stateEnd);
Process(aStateFrom, stateEnd);
Process(stateFrom, stateEnd);
mProcessedTime = stateEnd;
UpdateCurrentTimeForStreams(stateFrom, stateEnd);
// Send updates to the main thread and wait for the next control loop
// iteration.
@ -1589,7 +1642,6 @@ public:
virtual void Run() override
{
mStream->GraphImpl()->AddStream(mStream);
mStream->Init();
}
virtual void RunDuringShutdown() override
{
@ -1898,15 +1950,6 @@ MediaStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
}
void
MediaStream::Init()
{
MediaStreamGraphImpl* graph = GraphImpl();
mBlocked.SetAtAndAfter(graph->IterationEnd(), true);
mExplicitBlockerCount.SetAtAndAfter(graph->IterationEnd(), true);
mExplicitBlockerCount.SetAtAndAfter(graph->CurrentDriver()->StateComputedTime(), false);
}
MediaStreamGraphImpl*
MediaStream::GraphImpl()
{
@ -2139,7 +2182,7 @@ MediaStream::ChangeExplicitBlockerCount(int32_t aDelta)
virtual void Run()
{
mStream->ChangeExplicitBlockerCountImpl(
mStream->GraphImpl()->CurrentDriver()->StateComputedTime(), mDelta);
mStream->GraphImpl()->mStateComputedTime, mDelta);
}
int32_t mDelta;
};
@ -2162,7 +2205,7 @@ MediaStream::BlockStreamIfNeeded()
virtual void Run()
{
mStream->BlockStreamIfNeededImpl(
mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
mStream->GraphImpl()->mStateComputedTime);
}
};
@ -2182,7 +2225,7 @@ MediaStream::UnblockStreamIfNeeded()
virtual void Run()
{
mStream->UnblockStreamIfNeededImpl(
mStream->GraphImpl()->CurrentDriver()->StateComputedTime());
mStream->GraphImpl()->mStateComputedTime);
}
};
@ -2635,7 +2678,7 @@ SourceMediaStream::GetBufferedTicks(TrackID aID)
MediaSegment* segment = track->GetSegment();
if (segment) {
return segment->GetDuration() -
GraphTimeToStreamTime(GraphImpl()->CurrentDriver()->StateComputedTime());
GraphTimeToStreamTime(GraphImpl()->mStateComputedTime);
}
}
return 0;
@ -3259,7 +3302,7 @@ MediaStreamGraphImpl::MoveStreams(AudioContextOperation aAudioContextOperation,
// set their buffer start time to the appropriate value now:
if (aAudioContextOperation == AudioContextOperation::Resume &&
stream->mBufferStartTime == START_TIME_DELAYED) {
stream->mBufferStartTime = IterationEnd();
stream->mBufferStartTime = mProcessedTime;
}
stream->remove();
@ -3445,7 +3488,7 @@ MediaStreamGraph::StartNonRealtimeProcessing(uint32_t aTicksToProcess)
return;
graph->mEndTime =
graph->RoundUpToNextAudioBlock(graph->CurrentDriver()->StateComputedTime() +
graph->RoundUpToNextAudioBlock(graph->mStateComputedTime +
aTicksToProcess - 1);
graph->mNonRealtimeProcessing = true;
graph->EnsureRunInStableState();

View File

@ -433,8 +433,6 @@ public:
virtual AudioNodeStream* AsAudioNodeStream() { return nullptr; }
virtual CameraPreviewMediaStream* AsCameraPreviewStream() { return nullptr; }
// media graph thread only
void Init();
// These Impl methods perform the core functionality of the control methods
// above, on the media graph thread.
/**
@ -632,8 +630,8 @@ protected:
};
nsTArray<AudioOutput> mAudioOutputs;
nsTArray<nsRefPtr<VideoFrameContainer> > mVideoOutputs;
// We record the last played video frame to avoid redundant setting
// of the current video frame.
// We record the last played video frame to avoid playing the frame again
// with a different frame id.
VideoFrame mLastPlayedVideoFrame;
// The number of times this stream has been explicitly blocked by the control
// API, minus the number of times it has been explicitly unblocked.

View File

@ -54,8 +54,8 @@ public:
MOZ_COUNT_DTOR(ControlMessage);
}
// Do the action of this message on the MediaStreamGraph thread. Any actions
// affecting graph processing should take effect at mStateComputedTime.
// All stream data for times < mStateComputedTime has already been
// affecting graph processing should take effect at mProcessedTime.
// All stream data for times < mProcessedTime has already been
// computed.
virtual void Run() = 0;
// When we're shutting down the application, most messages are ignored but
@ -173,8 +173,7 @@ public:
*/
void DoIteration();
bool OneIteration(GraphTime aFrom, GraphTime aTo,
GraphTime aStateFrom, GraphTime aStateEnd);
bool OneIteration(GraphTime aStateEnd);
bool Running() const
{
@ -448,7 +447,8 @@ public:
double MediaTimeToSeconds(GraphTime aTime) const
{
NS_ASSERTION(0 <= aTime && aTime <= STREAM_TIME_MAX, "Bad time");
NS_ASSERTION(aTime > -STREAM_TIME_MAX && aTime <= STREAM_TIME_MAX,
"Bad time");
return static_cast<double>(aTime)/GraphRate();
}
@ -561,6 +561,17 @@ public:
* cycles.
*/
uint32_t mFirstCycleBreaker;
/**
* Blocking decisions have been computed up to this time.
* Between each iteration, this is the same as mProcessedTime.
*/
GraphTime mStateComputedTime = 0;
/**
* All stream contents have been computed up to this time.
* The next batch of updates from the main thread will be processed
* at this time. This is behind mStateComputedTime during processing.
*/
GraphTime mProcessedTime = 0;
/**
* Date of the last time we updated the main thread with the graph state.
*/

View File

@ -56,6 +56,15 @@ public:
// E.g. if the last painted frame should have been painted at time t,
// but was actually painted at t+n, this returns n in seconds. Threadsafe.
double GetFrameDelay();
// Returns a new frame ID for SetCurrentFrames(). The client must either
// call this on only one thread or provide barriers. Do not use together
// with SetCurrentFrame().
ImageContainer::FrameID NewFrameID()
{
return ++mFrameID;
}
// Call on main thread
enum {
INVALIDATE_DEFAULT,
@ -83,8 +92,8 @@ protected:
// specifies that the Image should be stretched to have the correct aspect
// ratio.
gfxIntSize mIntrinsicSize;
// For SetCurrentFrame callers we maintain our own mFrameID which is auto-
// incremented at every SetCurrentFrame.
// We maintain our own mFrameID which is auto-incremented at every
// SetCurrentFrame() or NewFrameID() call.
ImageContainer::FrameID mFrameID;
// True when the intrinsic size has been changed by SetCurrentFrame() since
// the last call to Invalidate().

View File

@ -117,14 +117,6 @@ public:
StreamTime aDuration,
const IntSize& aIntrinsicSize,
bool aForceBlack = false);
const VideoFrame* GetFrameAt(StreamTime aOffset, StreamTime* aStart = nullptr)
{
VideoChunk* c = FindChunkContaining(aOffset, aStart);
if (!c) {
return nullptr;
}
return &c->mFrame;
}
const VideoFrame* GetLastFrame(StreamTime* aStart = nullptr)
{
VideoChunk* c = GetLastChunk();

View File

@ -283,14 +283,15 @@ CreateTestH264Decoder(layers::LayersBackend aBackend,
}
/* static */ bool
MP4Decoder::IsVideoAccelerated(layers::LayersBackend aBackend)
MP4Decoder::IsVideoAccelerated(layers::LayersBackend aBackend, nsACString& aFailureReason)
{
VideoInfo config;
nsRefPtr<MediaDataDecoder> decoder(CreateTestH264Decoder(aBackend, config));
if (!decoder) {
aFailureReason.AssignLiteral("Failed to create H264 decoder");
return false;
}
bool result = decoder->IsHardwareAccelerated();
bool result = decoder->IsHardwareAccelerated(aFailureReason);
return result;
}

View File

@ -42,7 +42,7 @@ public:
// Returns true if the MP4 backend is preffed on.
static bool IsEnabled();
static bool IsVideoAccelerated(layers::LayersBackend aBackend);
static bool IsVideoAccelerated(layers::LayersBackend aBackend, nsACString& aReason);
static bool CanCreateAACDecoder();
static bool CanCreateH264Decoder();
};

View File

@ -263,7 +263,7 @@ public:
// Called from the state machine task queue or main thread.
// Decoder needs to decide whether or not hardware accelearation is supported
// after creating. It doesn't need to call Init() before calling this function.
virtual bool IsHardwareAccelerated() const { return false; }
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
// ConfigurationChanged will be called to inform the video or audio decoder
// that the format of the next input sample is about to change.

View File

@ -299,9 +299,9 @@ SharedDecoderProxy::Shutdown()
}
bool
SharedDecoderProxy::IsHardwareAccelerated() const
SharedDecoderProxy::IsHardwareAccelerated(nsACString& aFailureReason) const
{
return mManager->mDecoder->IsHardwareAccelerated();
return mManager->mDecoder->IsHardwareAccelerated(aFailureReason);
}
} // namespace mozilla

View File

@ -79,7 +79,7 @@ public:
virtual nsresult Flush() override;
virtual nsresult Drain() override;
virtual nsresult Shutdown() override;
virtual bool IsHardwareAccelerated() const override;
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
friend class SharedDecoderManager;

View File

@ -29,7 +29,6 @@ VPXDecoder::VPXDecoder(const VideoInfo& aConfig,
: mImageContainer(aImageContainer)
, mTaskQueue(aTaskQueue)
, mCallback(aCallback)
, mIter(nullptr)
, mInfo(aConfig)
{
MOZ_COUNT_CTOR(VPXDecoder);
@ -74,7 +73,6 @@ nsresult
VPXDecoder::Flush()
{
mTaskQueue->Flush();
mIter = nullptr;
return NS_OK;
}
@ -99,9 +97,10 @@ VPXDecoder::DoDecodeFrame(MediaRawData* aSample)
return -1;
}
vpx_codec_iter_t iter = nullptr;
vpx_image_t *img;
if ((img = vpx_codec_get_frame(&mVPX, &mIter))) {
while ((img = vpx_codec_get_frame(&mVPX, &iter))) {
NS_ASSERTION(img->fmt == VPX_IMG_FMT_I420, "WebM image format not I420");
// Chroma shifts are rounded down as per the decoding examples in the SDK
@ -143,9 +142,7 @@ VPXDecoder::DoDecodeFrame(MediaRawData* aSample)
return -1;
}
mCallback->Output(v);
return 1;
}
mIter = nullptr;
return 0;
}

View File

@ -54,7 +54,6 @@ private:
// VPx decoder state
vpx_codec_ctx_t mVPX;
vpx_codec_iter_t mIter;
const VideoInfo& mInfo;

View File

@ -76,7 +76,7 @@ public:
virtual nsresult Flush() override;
virtual nsresult Drain() override;
virtual nsresult Shutdown() override;
virtual bool IsHardwareAccelerated() const override
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override
{
return true;
}

View File

@ -22,7 +22,7 @@ public:
virtual ~AppleVTDecoder();
virtual nsRefPtr<InitPromise> Init() override;
virtual nsresult Input(MediaRawData* aSample) override;
virtual bool IsHardwareAccelerated() const override
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override
{
return mIsHardwareAccelerated;
}

View File

@ -11,10 +11,12 @@
#include "gfxWindowsPlatform.h"
#include "D3D9SurfaceImage.h"
#include "mozilla/layers/D3D11ShareHandleImage.h"
#include "mozilla/layers/ImageBridgeChild.h"
#include "mozilla/Preferences.h"
#include "mfapi.h"
#include "MFTDecoder.h"
#include "DriverCrashGuard.h"
#include "nsPrintfCString.h"
const CLSID CLSID_VideoProcessorMFT =
{
@ -40,6 +42,7 @@ namespace mozilla {
using layers::Image;
using layers::ImageContainer;
using layers::D3D9SurfaceImage;
using layers::D3D9RecycleAllocator;
using layers::D3D11ShareHandleImage;
class D3D9DXVA2Manager : public DXVA2Manager
@ -48,7 +51,7 @@ public:
D3D9DXVA2Manager();
virtual ~D3D9DXVA2Manager();
HRESULT Init();
HRESULT Init(nsACString& aFailureReason);
IUnknown* GetDXVADeviceManager() override;
@ -59,13 +62,120 @@ public:
ImageContainer* aContainer,
Image** aOutImage) override;
virtual bool SupportsConfig(IMFMediaType* aType) override;
private:
nsRefPtr<IDirect3D9Ex> mD3D9;
nsRefPtr<IDirect3DDevice9Ex> mDevice;
nsRefPtr<IDirect3DDeviceManager9> mDeviceManager;
RefPtr<D3D9RecycleAllocator> mTextureClientAllocator;
nsRefPtr<IDirectXVideoDecoderService> mDecoderService;
UINT32 mResetToken;
};
void GetDXVA2ExtendedFormatFromMFMediaType(IMFMediaType *pType,
DXVA2_ExtendedFormat *pFormat)
{
// Get the interlace mode.
MFVideoInterlaceMode interlace =
(MFVideoInterlaceMode)MFGetAttributeUINT32(pType, MF_MT_INTERLACE_MODE, MFVideoInterlace_Unknown);
if (interlace == MFVideoInterlace_MixedInterlaceOrProgressive) {
pFormat->SampleFormat = DXVA2_SampleFieldInterleavedEvenFirst;
} else {
pFormat->SampleFormat = (UINT)interlace;
}
pFormat->VideoChromaSubsampling =
MFGetAttributeUINT32(pType, MF_MT_VIDEO_CHROMA_SITING, MFVideoChromaSubsampling_Unknown);
pFormat->NominalRange =
MFGetAttributeUINT32(pType, MF_MT_VIDEO_NOMINAL_RANGE, MFNominalRange_Unknown);
pFormat->VideoTransferMatrix =
MFGetAttributeUINT32(pType, MF_MT_YUV_MATRIX, MFVideoTransferMatrix_Unknown);
pFormat->VideoLighting =
MFGetAttributeUINT32(pType, MF_MT_VIDEO_LIGHTING, MFVideoLighting_Unknown);
pFormat->VideoPrimaries =
MFGetAttributeUINT32(pType, MF_MT_VIDEO_PRIMARIES, MFVideoPrimaries_Unknown);
pFormat->VideoTransferFunction =
MFGetAttributeUINT32(pType, MF_MT_TRANSFER_FUNCTION, MFVideoTransFunc_Unknown);
}
HRESULT ConvertMFTypeToDXVAType(IMFMediaType *pType, DXVA2_VideoDesc *pDesc)
{
ZeroMemory(pDesc, sizeof(*pDesc));
// The D3D format is the first DWORD of the subtype GUID.
GUID subtype = GUID_NULL;
HRESULT hr = pType->GetGUID(MF_MT_SUBTYPE, &subtype);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
pDesc->Format = (D3DFORMAT)subtype.Data1;
UINT32 width = 0;
UINT32 height = 0;
hr = MFGetAttributeSize(pType, MF_MT_FRAME_SIZE, &width, &height);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
pDesc->SampleWidth = width;
pDesc->SampleHeight = height;
UINT32 fpsNumerator = 0;
UINT32 fpsDenominator = 0;
hr = MFGetAttributeRatio(pType, MF_MT_FRAME_RATE, &fpsNumerator, &fpsDenominator);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
pDesc->InputSampleFreq.Numerator = fpsNumerator;
pDesc->InputSampleFreq.Denominator = fpsDenominator;
GetDXVA2ExtendedFormatFromMFMediaType(pType, &pDesc->SampleFormat);
pDesc->OutputFrameFreq = pDesc->InputSampleFreq;
if ((pDesc->SampleFormat.SampleFormat == DXVA2_SampleFieldInterleavedEvenFirst) ||
(pDesc->SampleFormat.SampleFormat == DXVA2_SampleFieldInterleavedOddFirst)) {
pDesc->OutputFrameFreq.Numerator *= 2;
}
return S_OK;
}
static const GUID DXVA2_ModeH264_E = {
0x1b81be68, 0xa0c7, 0x11d3, { 0xb9, 0x84, 0x00, 0xc0, 0x4f, 0x2e, 0x73, 0xc5 }
};
// This tests if a DXVA video decoder can be created for the given media type/resolution.
// It uses the same decoder device (DXVA2_ModeH264_E - DXVA2_ModeH264_VLD_NoFGT) as the H264
// decoder MFT provided by windows (CLSID_CMSH264DecoderMFT) uses, so we can use it to determine
// if the MFT will use software fallback or not.
bool
D3D9DXVA2Manager::SupportsConfig(IMFMediaType* aType)
{
DXVA2_VideoDesc desc;
HRESULT hr = ConvertMFTypeToDXVAType(aType, &desc);
NS_ENSURE_TRUE(SUCCEEDED(hr), false);
UINT configCount;
DXVA2_ConfigPictureDecode* configs = nullptr;
hr = mDecoderService->GetDecoderConfigurations(DXVA2_ModeH264_E, &desc, nullptr, &configCount, &configs);
NS_ENSURE_TRUE(SUCCEEDED(hr), false);
nsRefPtr<IDirect3DSurface9> surface;
hr = mDecoderService->CreateSurface(desc.SampleWidth, desc.SampleHeight, 0, (D3DFORMAT)MAKEFOURCC('N', 'V', '1', '2'),
D3DPOOL_DEFAULT, 0, DXVA2_VideoDecoderRenderTarget,
surface.StartAssignment(), NULL);
if (!SUCCEEDED(hr)) {
CoTaskMemFree(configs);
return false;
}
for (UINT i = 0; i < configCount; i++) {
nsRefPtr<IDirectXVideoDecoder> decoder;
IDirect3DSurface9* surfaces = surface;
hr = mDecoderService->CreateVideoDecoder(DXVA2_ModeH264_E, &desc, &configs[i], &surfaces, 1, decoder.StartAssignment());
if (SUCCEEDED(hr) && decoder) {
CoTaskMemFree(configs);
return true;
}
}
CoTaskMemFree(configs);
return false;
}
D3D9DXVA2Manager::D3D9DXVA2Manager()
: mResetToken(0)
{
@ -87,13 +197,14 @@ D3D9DXVA2Manager::GetDXVADeviceManager()
}
HRESULT
D3D9DXVA2Manager::Init()
D3D9DXVA2Manager::Init(nsACString& aFailureReason)
{
MOZ_ASSERT(NS_IsMainThread());
gfx::D3D9VideoCrashGuard crashGuard;
if (crashGuard.Crashed()) {
NS_WARNING("DXVA2D3D9 crash detected");
aFailureReason.AssignLiteral("DXVA2D3D9 crashes detected in the past");
return E_FAIL;
}
@ -106,6 +217,7 @@ D3D9DXVA2Manager::Init()
HRESULT hr = d3d9Create(D3D_SDK_VERSION, getter_AddRefs(d3d9Ex));
if (!d3d9Ex) {
NS_WARNING("Direct3DCreate9 failed");
aFailureReason.AssignLiteral("Direct3DCreate9 failed");
return E_FAIL;
}
@ -115,7 +227,10 @@ D3D9DXVA2Manager::Init()
D3DDEVTYPE_HAL,
(D3DFORMAT)MAKEFOURCC('N','V','1','2'),
D3DFMT_X8R8G8B8);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("CheckDeviceFormatConversion failed with error %X", hr);
return hr;
}
// Create D3D9DeviceEx.
D3DPRESENT_PARAMETERS params = {0};
@ -138,7 +253,10 @@ D3D9DXVA2Manager::Init()
&params,
nullptr,
getter_AddRefs(device));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("CreateDeviceEx failed with error %X", hr);
return hr;
}
// Ensure we can create queries to synchronize operations between devices.
// Without this, when we make a copy of the frame in order to share it with
@ -147,7 +265,10 @@ D3D9DXVA2Manager::Init()
nsRefPtr<IDirect3DQuery9> query;
hr = device->CreateQuery(D3DQUERYTYPE_EVENT, getter_AddRefs(query));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("CreateQuery failed with error %X", hr);
return hr;
}
// Create and initialize IDirect3DDeviceManager9.
UINT resetToken = 0;
@ -155,15 +276,54 @@ D3D9DXVA2Manager::Init()
hr = wmf::DXVA2CreateDirect3DDeviceManager9(&resetToken,
getter_AddRefs(deviceManager));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("DXVA2CreateDirect3DDeviceManager9 failed with error %X", hr);
return hr;
}
hr = deviceManager->ResetDevice(device, resetToken);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("IDirect3DDeviceManager9::ResetDevice failed with error %X", hr);
return hr;
}
HANDLE deviceHandle;
nsRefPtr<IDirectXVideoDecoderService> decoderService;
hr = deviceManager->OpenDeviceHandle(&deviceHandle);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
hr = deviceManager->GetVideoService(deviceHandle, IID_PPV_ARGS(decoderService.StartAssignment()));
deviceManager->CloseDeviceHandle(deviceHandle);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
UINT deviceCount;
GUID* decoderDevices = nullptr;
hr = decoderService->GetDecoderDeviceGuids(&deviceCount, &decoderDevices);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
bool found = false;
for (UINT i = 0; i < deviceCount; i++) {
if (decoderDevices[i] == DXVA2_ModeH264_E) {
found = true;
break;
}
}
CoTaskMemFree(decoderDevices);
if (!found) {
return E_FAIL;
}
mDecoderService = decoderService;
mResetToken = resetToken;
mD3D9 = d3d9Ex;
mDevice = device;
mDeviceManager = deviceManager;
mTextureClientAllocator = new D3D9RecycleAllocator(layers::ImageBridgeChild::GetSingleton(),
mDevice);
mTextureClientAllocator->SetMaxPoolSize(5);
return S_OK;
}
@ -190,7 +350,7 @@ D3D9DXVA2Manager::CopyToImage(IMFSample* aSample,
"Wrong format?");
D3D9SurfaceImage* videoImage = static_cast<D3D9SurfaceImage*>(image.get());
hr = videoImage->SetData(D3D9SurfaceImage::Data(surface, aRegion));
hr = videoImage->SetData(D3D9SurfaceImage::Data(surface, aRegion, mTextureClientAllocator));
image.forget(aOutImage);
@ -203,7 +363,7 @@ static uint32_t sDXVAVideosCount = 0;
/* static */
DXVA2Manager*
DXVA2Manager::CreateD3D9DXVA()
DXVA2Manager::CreateD3D9DXVA(nsACString& aFailureReason)
{
MOZ_ASSERT(NS_IsMainThread());
HRESULT hr;
@ -213,11 +373,12 @@ DXVA2Manager::CreateD3D9DXVA()
const uint32_t dxvaLimit =
Preferences::GetInt("media.windows-media-foundation.max-dxva-videos", 8);
if (sDXVAVideosCount == dxvaLimit) {
aFailureReason.AssignLiteral("Too many DXVA videos playing");
return nullptr;
}
nsAutoPtr<D3D9DXVA2Manager> d3d9Manager(new D3D9DXVA2Manager());
hr = d3d9Manager->Init();
hr = d3d9Manager->Init(aFailureReason);
if (SUCCEEDED(hr)) {
return d3d9Manager.forget();
}
@ -232,7 +393,7 @@ public:
D3D11DXVA2Manager();
virtual ~D3D11DXVA2Manager();
HRESULT Init();
HRESULT Init(nsACString& aFailureReason);
IUnknown* GetDXVADeviceManager() override;
@ -281,28 +442,46 @@ D3D11DXVA2Manager::GetDXVADeviceManager()
}
HRESULT
D3D11DXVA2Manager::Init()
D3D11DXVA2Manager::Init(nsACString& aFailureReason)
{
HRESULT hr;
mDevice = gfxWindowsPlatform::GetPlatform()->CreateD3D11DecoderDevice();
NS_ENSURE_TRUE(mDevice, E_FAIL);
if (!mDevice) {
aFailureReason.AssignLiteral("Failed to create D3D11 device for decoder");
return E_FAIL;
}
mDevice->GetImmediateContext(byRef(mContext));
NS_ENSURE_TRUE(mContext, E_FAIL);
if (!mContext) {
aFailureReason.AssignLiteral("Failed to get immediate context for d3d11 device");
return E_FAIL;
}
hr = wmf::MFCreateDXGIDeviceManager(&mDeviceManagerToken, byRef(mDXGIDeviceManager));
NS_ENSURE_TRUE(SUCCEEDED(hr),hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("MFCreateDXGIDeviceManager failed with code %X", hr);
return hr;
}
hr = mDXGIDeviceManager->ResetDevice(mDevice, mDeviceManagerToken);
NS_ENSURE_TRUE(SUCCEEDED(hr),hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("IMFDXGIDeviceManager::ResetDevice failed with code %X", hr);
return hr;
}
mTransform = new MFTDecoder();
hr = mTransform->Create(CLSID_VideoProcessorMFT);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("MFTDecoder::Create(CLSID_VideoProcessorMFT) failed with code %X", hr);
return hr;
}
hr = mTransform->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, ULONG_PTR(mDXGIDeviceManager.get()));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
if (!SUCCEEDED(hr)) {
aFailureReason = nsPrintfCString("MFTDecoder::SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER) failed with code %X", hr);
return hr;
}
return S_OK;
}
@ -458,18 +637,19 @@ D3D11DXVA2Manager::ConfigureForSize(uint32_t aWidth, uint32_t aHeight)
/* static */
DXVA2Manager*
DXVA2Manager::CreateD3D11DXVA()
DXVA2Manager::CreateD3D11DXVA(nsACString& aFailureReason)
{
// DXVA processing takes up a lot of GPU resources, so limit the number of
// videos we use DXVA with at any one time.
const uint32_t dxvaLimit =
Preferences::GetInt("media.windows-media-foundation.max-dxva-videos", 8);
if (sDXVAVideosCount == dxvaLimit) {
aFailureReason.AssignLiteral("Too many DXVA videos playing");
return nullptr;
}
nsAutoPtr<D3D11DXVA2Manager> manager(new D3D11DXVA2Manager());
HRESULT hr = manager->Init();
HRESULT hr = manager->Init(aFailureReason);
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
return manager.forget();

View File

@ -23,8 +23,8 @@ public:
// Creates and initializes a DXVA2Manager. We can use DXVA2 via either
// D3D9Ex or D3D11.
static DXVA2Manager* CreateD3D9DXVA();
static DXVA2Manager* CreateD3D11DXVA();
static DXVA2Manager* CreateD3D9DXVA(nsACString& aFailureReason);
static DXVA2Manager* CreateD3D11DXVA(nsACString& aFailureReason);
// Returns a pointer to the D3D device manager responsible for managing the
// device we're using for hardware accelerated video decoding. If we're using
@ -44,6 +44,8 @@ public:
virtual ~DXVA2Manager();
virtual bool SupportsConfig(IMFMediaType* aType) { return true; }
protected:
Mutex mLock;
DXVA2Manager();

View File

@ -59,6 +59,11 @@ public:
int64_t aTimestampUsecs);
HRESULT Input(IMFSample* aSample);
HRESULT CreateInputSample(const uint8_t* aData,
uint32_t aDataSize,
int64_t aTimestampUsecs,
RefPtr<IMFSample>* aOutSample);
// Retrieves output from the MFT. Call this once Input() returns
// MF_E_NOTACCEPTING. Some MFTs with hardware acceleration (the H.264
// decoder MFT in particular) can't handle it if clients hold onto
@ -80,14 +85,10 @@ public:
// Sends a message to the MFT.
HRESULT SendMFTMessage(MFT_MESSAGE_TYPE aMsg, ULONG_PTR aData);
private:
HRESULT SetDecoderOutputType(ConfigureOutputCallback aCallback, void* aData);
private:
HRESULT CreateInputSample(const uint8_t* aData,
uint32_t aDataSize,
int64_t aTimestampUsecs,
RefPtr<IMFSample>* aOutSample);
HRESULT CreateOutputSample(RefPtr<IMFSample>* aOutSample);

View File

@ -102,7 +102,7 @@ WMFDecoderModule::CreateVideoDecoder(const VideoInfo& aConfig,
new WMFVideoMFTManager(aConfig,
aLayersBackend,
aImageContainer,
sDXVAEnabled && ShouldUseDXVA(aConfig)));
sDXVAEnabled));
nsRefPtr<MFTDecoder> mft = manager->Init();
@ -133,33 +133,12 @@ WMFDecoderModule::CreateAudioDecoder(const AudioInfo& aConfig,
return decoder.forget();
}
bool
WMFDecoderModule::ShouldUseDXVA(const VideoInfo& aConfig) const
{
static bool isAMD = false;
static bool initialized = false;
if (!initialized) {
nsCOMPtr<nsIGfxInfo> gfxInfo = services::GetGfxInfo();
nsAutoString vendor;
gfxInfo->GetAdapterVendorID(vendor);
isAMD = vendor.Equals(widget::GfxDriverInfo::GetDeviceVendor(widget::VendorAMD), nsCaseInsensitiveStringComparator()) ||
vendor.Equals(widget::GfxDriverInfo::GetDeviceVendor(widget::VendorATI), nsCaseInsensitiveStringComparator());
initialized = true;
}
if (!isAMD) {
return true;
}
// Don't use DXVA for 4k videos or above, since it seems to perform poorly.
return aConfig.mDisplay.width <= 1920 && aConfig.mDisplay.height <= 1200;
}
bool
WMFDecoderModule::SupportsSharedDecoders(const VideoInfo& aConfig) const
{
// If DXVA is enabled, but we're not going to use it for this specific config, then
// we can't use the shared decoder.
return !AgnosticMimeType(aConfig.mMimeType) &&
(!sDXVAEnabled || ShouldUseDXVA(aConfig));
return !AgnosticMimeType(aConfig.mMimeType);
}
bool

View File

@ -52,7 +52,6 @@ public:
// Called from any thread, must call init first
static int GetNumDecoderThreads();
private:
bool ShouldUseDXVA(const VideoInfo& aConfig) const;
bool mWMFInitialized;
};

View File

@ -234,10 +234,10 @@ WMFMediaDataDecoder::Drain()
}
bool
WMFMediaDataDecoder::IsHardwareAccelerated() const {
WMFMediaDataDecoder::IsHardwareAccelerated(nsACString& aFailureReason) const {
MOZ_ASSERT(!mIsShutDown);
return mMFTManager && mMFTManager->IsHardwareAccelerated();
return mMFTManager && mMFTManager->IsHardwareAccelerated(aFailureReason);
}
} // namespace mozilla

View File

@ -43,7 +43,7 @@ public:
// Destroys all resources.
virtual void Shutdown() = 0;
virtual bool IsHardwareAccelerated() const { return false; }
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const { return false; }
virtual TrackInfo::TrackType GetType() = 0;
@ -72,7 +72,7 @@ public:
virtual nsresult Shutdown() override;
virtual bool IsHardwareAccelerated() const override;
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
private:

View File

@ -21,6 +21,7 @@
#include "IMFYCbCrImage.h"
#include "mozilla/WindowsVersion.h"
#include "mozilla/Preferences.h"
#include "nsPrintfCString.h"
PRLogModuleInfo* GetDemuxerLog();
#define LOG(...) MOZ_LOG(GetDemuxerLog(), mozilla::LogLevel::Debug, (__VA_ARGS__))
@ -128,8 +129,9 @@ WMFVideoMFTManager::GetMediaSubtypeGUID()
class CreateDXVAManagerEvent : public nsRunnable {
public:
CreateDXVAManagerEvent(LayersBackend aBackend)
CreateDXVAManagerEvent(LayersBackend aBackend, nsCString& aFailureReason)
: mBackend(aBackend)
, mFailureReason(aFailureReason)
{}
NS_IMETHOD Run() {
@ -137,14 +139,15 @@ public:
if (mBackend == LayersBackend::LAYERS_D3D11 &&
Preferences::GetBool("media.windows-media-foundation.allow-d3d11-dxva", false) &&
IsWin8OrLater()) {
mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA();
mDXVA2Manager = DXVA2Manager::CreateD3D11DXVA(mFailureReason);
} else {
mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA();
mDXVA2Manager = DXVA2Manager::CreateD3D9DXVA(mFailureReason);
}
return NS_OK;
}
nsAutoPtr<DXVA2Manager> mDXVA2Manager;
LayersBackend mBackend;
nsACString& mFailureReason;
};
bool
@ -155,15 +158,19 @@ WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
// If we use DXVA but aren't running with a D3D layer manager then the
// readback of decoded video frames from GPU to CPU memory grinds painting
// to a halt, and makes playback performance *worse*.
if (!mDXVAEnabled ||
(mLayersBackend != LayersBackend::LAYERS_D3D9 &&
mLayersBackend != LayersBackend::LAYERS_D3D11)) {
if (!mDXVAEnabled) {
mDXVAFailureReason.AssignLiteral("Hardware video decoding disabled or blacklisted");
return false;
}
if (mLayersBackend != LayersBackend::LAYERS_D3D9 &&
mLayersBackend != LayersBackend::LAYERS_D3D11) {
mDXVAFailureReason.AssignLiteral("Unsupported layers backend");
return false;
}
// The DXVA manager must be created on the main thread.
nsRefPtr<CreateDXVAManagerEvent> event =
new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9 : mLayersBackend);
new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9 : mLayersBackend, mDXVAFailureReason);
if (NS_IsMainThread()) {
event->Run();
@ -184,7 +191,10 @@ WMFVideoMFTManager::Init()
// to d3d9.
if (!decoder && mDXVA2Manager && mDXVA2Manager->IsD3D11()) {
mDXVA2Manager = nullptr;
nsCString d3d11Failure = mDXVAFailureReason;
decoder = InitInternal(true);
mDXVAFailureReason.Append(NS_LITERAL_CSTRING("; "));
mDXVAFailureReason.Append(d3d11Failure);
}
return decoder.forget();
@ -204,15 +214,16 @@ WMFVideoMFTManager::InitInternal(bool aForceD3D9)
RefPtr<IMFAttributes> attr(decoder->GetAttributes());
UINT32 aware = 0;
if (attr) {
attr->GetUINT32(MF_SA_D3D_AWARE, &aware);
attr->SetUINT32(CODECAPI_AVDecNumWorkerThreads,
WMFDecoderModule::GetNumDecoderThreads());
hr = attr->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE);
if (SUCCEEDED(hr)) {
LOG("Enabling Low Latency Mode");
} else {
LOG("Couldn't enable Low Latency Mode");
}
attr->GetUINT32(MF_SA_D3D_AWARE, &aware);
attr->SetUINT32(CODECAPI_AVDecNumWorkerThreads,
WMFDecoderModule::GetNumDecoderThreads());
hr = attr->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE);
if (SUCCEEDED(hr)) {
LOG("Enabling Low Latency Mode");
}
else {
LOG("Couldn't enable Low Latency Mode");
}
}
if (useDxva) {
@ -225,39 +236,20 @@ WMFVideoMFTManager::InitInternal(bool aForceD3D9)
hr = decoder->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, manager);
if (SUCCEEDED(hr)) {
mUseHwAccel = true;
} else {
mDXVA2Manager = nullptr;
mDXVAFailureReason = nsPrintfCString("MFT_MESSAGE_SET_D3D_MANAGER failed with code %X", hr);
}
}
else {
mDXVAFailureReason.AssignLiteral("Decoder returned false for MF_SA_D3D_AWARE");
}
}
// Setup the input/output media types.
RefPtr<IMFMediaType> inputType;
hr = wmf::MFCreateMediaType(byRef(inputType));
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
hr = inputType->SetGUID(MF_MT_SUBTYPE, GetMediaSubtypeGUID());
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_MixedInterlaceOrProgressive);
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
RefPtr<IMFMediaType> outputType;
hr = wmf::MFCreateMediaType(byRef(outputType));
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
GUID outputSubType = mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12;
hr = outputType->SetGUID(MF_MT_SUBTYPE, outputSubType);
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
hr = decoder->SetMediaTypes(inputType, outputType);
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
mDecoder = decoder;
hr = SetDecoderMediaTypes();
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
LOG("Video Decoder initialized, Using DXVA: %s", (mUseHwAccel ? "Yes" : "No"));
// Just in case ConfigureVideoFrameGeometry() does not set these
@ -270,6 +262,37 @@ WMFVideoMFTManager::InitInternal(bool aForceD3D9)
return decoder.forget();
}
HRESULT
WMFVideoMFTManager::SetDecoderMediaTypes()
{
// Setup the input/output media types.
RefPtr<IMFMediaType> inputType;
HRESULT hr = wmf::MFCreateMediaType(byRef(inputType));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
hr = inputType->SetGUID(MF_MT_SUBTYPE, GetMediaSubtypeGUID());
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_MixedInterlaceOrProgressive);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
RefPtr<IMFMediaType> outputType;
hr = wmf::MFCreateMediaType(byRef(outputType));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
GUID outputSubType = mUseHwAccel ? MFVideoFormat_NV12 : MFVideoFormat_YV12;
hr = outputType->SetGUID(MF_MT_SUBTYPE, outputSubType);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
return mDecoder->SetMediaTypes(inputType, outputType);
}
HRESULT
WMFVideoMFTManager::Input(MediaRawData* aSample)
{
@ -277,10 +300,55 @@ WMFVideoMFTManager::Input(MediaRawData* aSample)
// This can happen during shutdown.
return E_FAIL;
}
HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
uint32_t(aSample->Size()),
aSample->mTime,
&mLastInput);
NS_ENSURE_TRUE(SUCCEEDED(hr) && mLastInput != nullptr, hr);
// Forward sample data to the decoder.
return mDecoder->Input(aSample->Data(),
uint32_t(aSample->Size()),
aSample->mTime);
return mDecoder->Input(mLastInput);
}
// The MFTransform we use for decoding h264 video will silently fall
// back to software decoding (even if we've negotiated DXVA) if the GPU
// doesn't support decoding the given resolution. It will then upload
// the software decoded frames into d3d textures to preserve behaviour.
//
// Unfortunately this seems to cause corruption (see bug 1193547) and is
// slow because the upload is done into a non-shareable texture and requires
// us to copy it.
//
// This code tests if the given resolution can be supported directly on the GPU,
// and makes sure we only ask the MFT for DXVA if it can be supported properly.
bool
WMFVideoMFTManager::MaybeToggleDXVA(IMFMediaType* aType)
{
// SupportsConfig only checks for valid h264 decoders currently.
if (!mDXVA2Manager || mStreamType != H264) {
return false;
}
if (mDXVA2Manager->SupportsConfig(aType)) {
if (!mUseHwAccel) {
// DXVA disabled, but supported for this resolution
ULONG_PTR manager = ULONG_PTR(mDXVA2Manager->GetDXVADeviceManager());
HRESULT hr = mDecoder->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, manager);
if (SUCCEEDED(hr)) {
mUseHwAccel = true;
return true;
}
}
} else if (mUseHwAccel) {
// DXVA enabled, and not supported for this resolution
HRESULT hr = mDecoder->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, 0);
MOZ_ASSERT(SUCCEEDED(hr), "Attempting to fall back to software failed?");
mUseHwAccel = false;
return true;
}
return false;
}
HRESULT
@ -290,6 +358,20 @@ WMFVideoMFTManager::ConfigureVideoFrameGeometry()
HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
// If we enabled/disabled DXVA in response to a resolution
// change then we need to renegotiate our media types,
// and resubmit our previous frame (since the MFT appears
// to lose it otherwise).
if (MaybeToggleDXVA(mediaType)) {
hr = SetDecoderMediaTypes();
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
HRESULT hr = mDecoder->GetOutputMediaType(mediaType);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
mDecoder->Input(mLastInput);
}
// Verify that the video subtype is what we expect it to be.
// When using hardware acceleration/DXVA2 the video format should
// be NV12, which is DXVA2's preferred format. For software decoding
@ -555,8 +637,9 @@ WMFVideoMFTManager::Shutdown()
}
bool
WMFVideoMFTManager::IsHardwareAccelerated() const
WMFVideoMFTManager::IsHardwareAccelerated(nsACString& aFailureReason) const
{
aFailureReason = mDXVAFailureReason;
return mDecoder && mUseHwAccel;
}

View File

@ -34,7 +34,7 @@ public:
virtual void Shutdown() override;
virtual bool IsHardwareAccelerated() const override;
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
virtual TrackInfo::TrackType GetType() override {
return TrackInfo::kVideoTrack;
@ -56,6 +56,10 @@ private:
int64_t aStreamOffset,
VideoData** aOutVideoData);
HRESULT SetDecoderMediaTypes();
bool MaybeToggleDXVA(IMFMediaType* aType);
// Video frame geometry.
VideoInfo mVideoInfo;
uint32_t mVideoStride;
@ -67,10 +71,14 @@ private:
RefPtr<layers::ImageContainer> mImageContainer;
nsAutoPtr<DXVA2Manager> mDXVA2Manager;
RefPtr<IMFSample> mLastInput;
const bool mDXVAEnabled;
const layers::LayersBackend mLayersBackend;
bool mUseHwAccel;
nsCString mDXVAFailureReason;
enum StreamType {
Unknown,
H264,

View File

@ -29,7 +29,6 @@ H264Converter::H264Converter(PlatformDecoderModule* aPDM,
, mCallback(aCallback)
, mDecoder(nullptr)
, mNeedAVCC(aPDM->DecoderNeedsConversion(aConfig) == PlatformDecoderModule::kNeedAVCC)
, mDecoderInitializing(false)
, mLastError(NS_OK)
{
CreateDecoder();
@ -46,8 +45,9 @@ H264Converter::Init()
return mDecoder->Init();
}
return MediaDataDecoder::InitPromise::CreateAndReject(
MediaDataDecoder::DecoderFailureReason::INIT_ERROR, __func__);
// We haven't been able to initialize a decoder due to a missing SPS/PPS.
return MediaDataDecoder::InitPromise::CreateAndResolve(
TrackType::kVideoTrack, __func__);
}
nsresult
@ -63,7 +63,7 @@ H264Converter::Input(MediaRawData* aSample)
}
}
if (mDecoderInitializing) {
if (mInitPromiseRequest.Exists()) {
mMediaRawSamples.AppendElement(aSample);
return NS_OK;
}
@ -121,12 +121,12 @@ H264Converter::Shutdown()
}
bool
H264Converter::IsHardwareAccelerated() const
H264Converter::IsHardwareAccelerated(nsACString& aFailureReason) const
{
if (mDecoder) {
return mDecoder->IsHardwareAccelerated();
return mDecoder->IsHardwareAccelerated(aFailureReason);
}
return MediaDataDecoder::IsHardwareAccelerated();
return MediaDataDecoder::IsHardwareAccelerated(aFailureReason);
}
nsresult
@ -163,7 +163,9 @@ H264Converter::CreateDecoderAndInit(MediaRawData* aSample)
nsresult rv = CreateDecoder();
if (NS_SUCCEEDED(rv)) {
mDecoderInitializing = true;
// Queue the incoming sample.
mMediaRawSamples.AppendElement(aSample);
nsRefPtr<H264Converter> self = this;
// The mVideoTaskQueue is flushable which can't be used in MediaPromise. So
@ -187,7 +189,6 @@ H264Converter::OnDecoderInitDone(const TrackType aTrackType)
}
}
mMediaRawSamples.Clear();
mDecoderInitializing = false;
}
void

View File

@ -34,7 +34,7 @@ public:
virtual nsresult Flush() override;
virtual nsresult Drain() override;
virtual nsresult Shutdown() override;
virtual bool IsHardwareAccelerated() const override;
virtual bool IsHardwareAccelerated(nsACString& aFailureReason) const override;
// Return true if mimetype is H.264.
static bool IsH264(const TrackInfo& aConfig);
@ -61,7 +61,6 @@ private:
nsRefPtr<MediaDataDecoder> mDecoder;
MozPromiseRequestHolder<InitPromise> mInitPromiseRequest;
bool mNeedAVCC;
bool mDecoderInitializing;
nsresult mLastError;
};

View File

@ -4,3 +4,4 @@ skip-if = buildapp == 'b2g'
[test_AudioNodeDevtoolsAPI.html]
[test_bug1027864.html]
[test_AudioParamDevtoolsAPI.html]
[test_ScriptProcessorCollected1.html]

View File

@ -0,0 +1,81 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test ScriptProcessorNode in cycle with no listener is collected</title>
<script type="text/javascript" src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="chrome://mochikit/content/tests/SimpleTest/test.css" />
</head>
<body>
<script class="testbody" type="text/javascript">
Components.utils.import('resource://gre/modules/Services.jsm');
SimpleTest.waitForExplicitFinish();
var observer = function(subject, topic, data) {
var id = parseInt(data);
var index = ids.indexOf(id);
if (index != -1) {
ok(true, "Collected AudioNode id " + id + " at index " + index);
ids.splice(index, 1);
}
}
Services.obs.addObserver(observer, "webaudio-node-demise", false);
SimpleTest.registerCleanupFunction(function() {
if (observer) {
Services.obs.removeObserver(observer, "webaudio-node-demise");
}
});
var ac = new AudioContext();
var testProcessor = ac.createScriptProcessor(256, 1, 0);
var delay = ac.createDelay();
testProcessor.connect(delay);
delay.connect(testProcessor);
var referenceProcessor = ac.createScriptProcessor(256, 1, 0);
var gain = ac.createGain();
gain.connect(referenceProcessor);
var processCount = 0;
testProcessor.onaudioprocess = function(event) {
++processCount;
switch (processCount) {
case 1:
// Switch to listening to referenceProcessor;
referenceProcessor.onaudioprocess = event.target.onaudioprocess;
referenceProcessor = null;
event.target.onaudioprocess = null;
case 2:
// There are no references to testProcessor and so GC can begin.
SpecialPowers.forceGC();
break;
case 3:
// Another GC should not be required after testProcessor would have
// received another audioprocess event.
SpecialPowers.forceCC();
// Expect that webaudio-demise has been queued.
// Queue another event to check.
SimpleTest.executeSoon(function() {
Services.obs.removeObserver(observer, "webaudio-node-demise");
observer = null;
event.target.onaudioprocess = null;
ok(ids.length == 0, "All expected nodes should be collected");
SimpleTest.finish();
});
break;
}
};
// Nodes with these ids should be collected.
var ids = [ testProcessor.id, delay.id, gain.id ];
testProcessor = null;
delay = null;
gain = null;
</script>
</pre>
</body>
</html>

View File

@ -18,6 +18,7 @@
#include "NesteggPacketHolder.h"
#include <algorithm>
#include <stdint.h>
#define VPX_DONT_DEFINE_STDINT_TYPES
#include "vpx/vp8dx.h"
@ -33,32 +34,41 @@ extern PRLogModuleInfo* gMediaDecoderLog;
extern PRLogModuleInfo* gNesteggLog;
// Functions for reading and seeking using WebMDemuxer required for
// nestegg_io. The 'user data' passed to these functions is the demuxer
// nestegg_io. The 'user data' passed to these functions is the
// demuxer's MediaResourceIndex
static int webmdemux_read(void* aBuffer, size_t aLength, void* aUserData)
{
MOZ_ASSERT(aUserData);
WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
MediaResourceIndex* resource =
reinterpret_cast<MediaResourceIndex*>(aUserData);
int64_t length = resource->GetLength();
MOZ_ASSERT(aLength < UINT32_MAX);
uint32_t count = aLength;
if (length >= 0 && count + resource->Tell() > length) {
count = uint32_t(length - resource->Tell());
}
uint32_t bytes = 0;
bool eof = false;
char* p = static_cast<char*>(aBuffer);
nsresult rv = demuxer->Read(p, aLength, &bytes);
eof = bytes < aLength;
nsresult rv = resource->Read(static_cast<char*>(aBuffer), count, &bytes);
bool eof = !bytes;
return NS_FAILED(rv) ? -1 : eof ? 0 : 1;
}
static int webmdemux_seek(int64_t aOffset, int aWhence, void* aUserData)
{
MOZ_ASSERT(aUserData);
WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
nsresult rv = demuxer->Seek(aWhence, aOffset);
MediaResourceIndex* resource =
reinterpret_cast<MediaResourceIndex*>(aUserData);
nsresult rv = resource->Seek(aWhence, aOffset);
return NS_SUCCEEDED(rv) ? 0 : -1;
}
static int64_t webmdemux_tell(void* aUserData)
{
MOZ_ASSERT(aUserData);
WebMDemuxer* demuxer = reinterpret_cast<WebMDemuxer*>(aUserData);
return demuxer->Tell();
MediaResourceIndex* resource =
reinterpret_cast<MediaResourceIndex*>(aUserData);
return resource->Tell();
}
static void webmdemux_log(nestegg* aContext,
@ -109,7 +119,6 @@ WebMDemuxer::WebMDemuxer(MediaResource* aResource)
, mBufferedState(nullptr)
, mInitData(nullptr)
, mContext(nullptr)
, mOffset(0)
, mVideoTrack(0)
, mAudioTrack(0)
, mSeekPreroll(0)
@ -162,7 +171,7 @@ WebMDemuxer::InitBufferedState()
already_AddRefed<MediaDataDemuxer>
WebMDemuxer::Clone() const
{
nsRefPtr<WebMDemuxer> demuxer = new WebMDemuxer(mResource);
nsRefPtr<WebMDemuxer> demuxer = new WebMDemuxer(mResource.GetResource());
demuxer->mInitData = mInitData;
if (demuxer->InitBufferedState() != NS_OK ||
demuxer->ReadMetadata() != NS_OK) {
@ -243,10 +252,10 @@ WebMDemuxer::ReadMetadata()
io.read = webmdemux_read;
io.seek = webmdemux_seek;
io.tell = webmdemux_tell;
io.userdata = this;
io.userdata = &mResource;
int64_t maxOffset = mBufferedState->GetInitEndOffset();
if (maxOffset == -1) {
maxOffset = mResource->GetLength();
maxOffset = mResource.GetLength();
}
int r = nestegg_init(&mContext, io, &webmdemux_log, maxOffset);
if (r == -1) {
@ -394,50 +403,6 @@ WebMDemuxer::ReadMetadata()
return NS_OK;
}
nsresult
WebMDemuxer::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes)
{
int64_t length = mResource->GetLength();
if (length >= 0 && aCount + mOffset > length) {
WEBM_DEBUG("requested to large amount, trying to get %ld bytes at %ld (length: %ld)", aCount, mOffset, length);
aCount = length - mOffset;
WEBM_DEBUG("will only return %ld", aCount);
}
nsRefPtr<MediaByteBuffer> bytes = mResource->MediaReadAt(mOffset, aCount);
if (!bytes) {
return NS_ERROR_FAILURE;
}
mOffset += bytes->Length();
*aBytes = bytes->Length();
memcpy(aBuffer, bytes->Elements(), bytes->Length());
return NS_OK;
}
nsresult
WebMDemuxer::Seek(int32_t aWhence, int64_t aOffset)
{
if (aWhence == SEEK_CUR) {
aOffset += mOffset;
} else if (aWhence == SEEK_END) {
int64_t length = mResource->GetLength();
if (length == -1 || length - aOffset < 0) {
return NS_ERROR_FAILURE;
}
aOffset = mResource->GetLength() - aOffset;
}
if (aOffset > mResource->GetLength()) {
return NS_ERROR_FAILURE;
}
mOffset = aOffset;
return NS_OK;
}
int64_t
WebMDemuxer::Tell()
{
return mOffset;
}
bool
WebMDemuxer::IsSeekable() const
{
@ -453,15 +418,15 @@ WebMDemuxer::EnsureUpToDateIndex()
if (mInitData && mBufferedState->GetInitEndOffset() == -1) {
mBufferedState->NotifyDataArrived(mInitData->Elements(), mInitData->Length(), 0);
}
AutoPinned<MediaResource> resource(mResource);
AutoPinned<MediaResource> resource(mResource.GetResource());
nsTArray<MediaByteRange> byteRanges;
nsresult rv = resource->GetCachedRanges(byteRanges);
if (NS_FAILED(rv) || !byteRanges.Length()) {
return;
}
mBufferedState->UpdateIndex(byteRanges, mResource);
mBufferedState->UpdateIndex(byteRanges, resource);
if (!mInitData && mBufferedState->GetInitEndOffset() != -1) {
mInitData = mResource->MediaReadAt(0, mBufferedState->GetInitEndOffset());
mInitData = mResource.MediaReadAt(0, mBufferedState->GetInitEndOffset());
}
mNeedReIndex = false;
}
@ -642,7 +607,7 @@ WebMDemuxer::DemuxPacket()
return nullptr;
}
int64_t offset = Tell();
int64_t offset = mResource.Tell();
nsRefPtr<NesteggPacketHolder> holder = new NesteggPacketHolder();
if (!holder->Init(packet, offset, track, false)) {
return nullptr;
@ -731,7 +696,7 @@ media::TimeIntervals
WebMDemuxer::GetBuffered()
{
EnsureUpToDateIndex();
AutoPinned<MediaResource> resource(mResource);
AutoPinned<MediaResource> resource(mResource.GetResource());
media::TimeIntervals buffered;

View File

@ -85,10 +85,6 @@ public:
// Pushes a packet to the front of the video packet queue.
virtual void PushVideoPacket(NesteggPacketHolder* aItem);
nsresult Read(char* aBuffer, uint32_t aCount, uint32_t * aBytes);
nsresult Seek(int32_t aWhence, int64_t aOffset);
int64_t Tell();
private:
friend class WebMTrackDemuxer;
@ -113,7 +109,7 @@ private:
// is responsible for making sure it doesn't get lost.
nsRefPtr<NesteggPacketHolder> DemuxPacket();
nsRefPtr<MediaResource> mResource;
MediaResourceIndex mResource;
MediaInfo mInfo;
nsTArray<nsRefPtr<WebMTrackDemuxer>> mDemuxers;
@ -126,7 +122,6 @@ private:
// Access on reader's thread for main demuxer,
// or main thread for cloned demuxer
nestegg* mContext;
int64_t mOffset;
// Queue of video and audio packets that have been read but not decoded.
WebMPacketQueue mVideoPackets;

View File

@ -750,11 +750,11 @@ MediaEngineGonkVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth,
layers::GrallocImage* videoImage = static_cast<layers::GrallocImage*>(image.get());
MOZ_ASSERT(mTextureClientAllocator);
RefPtr<layers::TextureClient> textureClient
= mTextureClientAllocator->CreateOrRecycleForDrawing(gfx::SurfaceFormat::YUV,
gfx::IntSize(dstWidth, dstHeight),
layers::BackendSelector::Content,
layers::TextureFlags::DEFAULT,
layers::ALLOC_DISALLOW_BUFFERTEXTURECLIENT);
= mTextureClientAllocator->CreateOrRecycle(gfx::SurfaceFormat::YUV,
gfx::IntSize(dstWidth, dstHeight),
layers::BackendSelector::Content,
layers::TextureFlags::DEFAULT,
layers::ALLOC_DISALLOW_BUFFERTEXTURECLIENT);
if (textureClient) {
RefPtr<layers::GrallocTextureClientOGL> grallocTextureClient =
static_cast<layers::GrallocTextureClientOGL*>(textureClient.get());

View File

@ -38,6 +38,8 @@ UNIFIED_SOURCES += [
'PushManager.cpp',
]
FAIL_ON_WARNINGS = True
LOCAL_INCLUDES += [
'../workers',
]

View File

@ -25,7 +25,6 @@ add_task(function* test_unregister_success() {
quota: Infinity,
});
let unregisterDefer = Promise.defer();
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),

View File

@ -54,7 +54,8 @@ add_task(function* test_notification_ack() {
]);
let acks = 0;
let ackDefer = Promise.defer();
let ackDone;
let ackPromise = new Promise(resolve => ackDone = resolve);
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -115,7 +116,7 @@ add_task(function* test_notification_ack() {
channelID: '5477bfda-22db-45d4-9614-fee369630260',
version: 6
}], updates, 'Wrong updates for acknowledgement 3');
ackDefer.resolve();
ackDone();
break;
default:
@ -128,6 +129,6 @@ add_task(function* test_notification_ack() {
yield waitForPromise(notifyPromise, DEFAULT_TIMEOUT,
'Timed out waiting for notifications');
yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
'Timed out waiting for multiple acknowledgements');
});

View File

@ -41,8 +41,8 @@ add_task(function* test_notification_duplicate() {
let notifyPromise = promiseObserverNotification('push-notification');
let acks = 0;
let ackDefer = Promise.defer();
let ackDone = after(2, ackDefer.resolve);
let ackDone;
let ackPromise = new Promise(resolve => ackDone = after(2, resolve));
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -73,7 +73,7 @@ add_task(function* test_notification_duplicate() {
yield waitForPromise(notifyPromise, DEFAULT_TIMEOUT,
'Timed out waiting for notifications');
yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
'Timed out waiting for stale acknowledgement');
let staleRecord = yield db.getByKeyID(

View File

@ -58,8 +58,8 @@ add_task(function* test_notification_error() {
)
]);
let ackDefer = Promise.defer();
let ackDone = after(records.length, ackDefer.resolve);
let ackDone;
let ackPromise = new Promise(resolve => ackDone = after(records.length, resolve));
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -112,7 +112,7 @@ add_task(function* test_notification_error() {
'Wrong endpoint for notification C');
equal(cPush.version, 4, 'Wrong version for notification C');
yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
'Timed out waiting for acknowledgements');
let aRecord = yield db.getByIdentifiers({scope: 'https://example.com/a',

View File

@ -57,8 +57,8 @@ add_task(function* test_notification_incomplete() {
ok(false, 'Should not deliver malformed updates');
}, 'push-notification', false);
let notificationDefer = Promise.defer();
let notificationDone = after(2, notificationDefer.resolve);
let notificationDone;
let notificationPromise = new Promise(resolve => notificationDone = after(2, resolve));
let prevHandler = PushServiceWebSocket._handleNotificationReply;
PushServiceWebSocket._handleNotificationReply = function _handleNotificationReply() {
notificationDone();
@ -107,7 +107,7 @@ add_task(function* test_notification_incomplete() {
}
});
yield waitForPromise(notificationDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(notificationPromise, DEFAULT_TIMEOUT,
'Timed out waiting for incomplete notifications');
let storeRecords = yield db.getAllKeyIDs();

View File

@ -28,7 +28,8 @@ add_task(function* test_notification_version_string() {
let notifyPromise = promiseObserverNotification('push-notification');
let ackDefer = Promise.defer();
let ackDone;
let ackPromise = new Promise(resolve => ackDone = resolve);
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -49,7 +50,7 @@ add_task(function* test_notification_version_string() {
}]
}));
},
onACK: ackDefer.resolve
onACK: ackDone
});
}
});
@ -65,7 +66,7 @@ add_task(function* test_notification_version_string() {
'Wrong push endpoint');
strictEqual(message.version, 4, 'Wrong version');
yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
'Timed out waiting for string acknowledgement');
let storeRecord = yield db.getByKeyID(

View File

@ -85,7 +85,9 @@ add_task(function* test_expiration_origin_threshold() {
updates++;
return updates == 6;
});
let unregisterDefer = Promise.defer();
let unregisterDone;
let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
PushService.init({
serverURI: 'wss://push.example.org/',
@ -127,7 +129,7 @@ add_task(function* test_expiration_origin_threshold() {
},
onUnregister(request) {
equal(request.channelID, 'eb33fc90-c883-4267-b5cb-613969e8e349', 'Unregistered wrong channel ID');
unregisterDefer.resolve();
unregisterDone();
},
// We expect to receive acks, but don't care about their
// contents.
@ -136,7 +138,7 @@ add_task(function* test_expiration_origin_threshold() {
},
});
yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
'Timed out waiting for unregister request');
yield waitForPromise(notifyPromise, DEFAULT_TIMEOUT,

View File

@ -52,7 +52,8 @@ add_task(function* test_expiration_history_observer() {
}],
});
let unregisterDefer = Promise.defer();
let unregisterDone;
let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
PushService.init({
serverURI: 'wss://push.example.org/',
@ -79,14 +80,14 @@ add_task(function* test_expiration_history_observer() {
},
onUnregister(request) {
equal(request.channelID, '379c0668-8323-44d2-a315-4ee83f1a9ee9', 'Dropped wrong channel ID');
unregisterDefer.resolve();
unregisterDone();
},
onACK(request) {},
});
}
});
yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
'Timed out waiting for unregister request');
let expiredRecord = yield db.getByKeyID('379c0668-8323-44d2-a315-4ee83f1a9ee9');

View File

@ -37,8 +37,8 @@ add_task(function* test_register_flush() {
let notifyPromise = promiseObserverNotification('push-notification');
let ackDefer = Promise.defer();
let ackDone = after(2, ackDefer.resolve);
let ackDone;
let ackPromise = new Promise(resolve => ackDone = after(2, resolve));
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -87,7 +87,7 @@ add_task(function* test_register_flush() {
'Timed out waiting for notification');
equal(scope, 'https://example.com/page/1', 'Wrong notification scope');
yield waitForPromise(ackDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(ackPromise, DEFAULT_TIMEOUT,
'Timed out waiting for acknowledgements');
let prevRecord = yield db.getByKeyID(

View File

@ -21,8 +21,8 @@ function run_test() {
}
add_task(function* test_register_invalid_json() {
let helloDefer = Promise.defer();
let helloDone = after(2, helloDefer.resolve);
let helloDone;
let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
let registers = 0;
PushServiceWebSocket._generateID = () => channelID;
@ -57,7 +57,7 @@ add_task(function* test_register_invalid_json() {
'Wrong error for invalid JSON response'
);
yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
'Reconnect after invalid JSON response timed out');
equal(registers, 1, 'Wrong register count');
});

View File

@ -23,8 +23,8 @@ function run_test() {
add_task(function* test_register_no_id() {
let registers = 0;
let helloDefer = Promise.defer();
let helloDone = after(2, helloDefer.resolve);
let helloDone;
let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
PushServiceWebSocket._generateID = () => channelID;
PushService.init({
@ -61,7 +61,7 @@ add_task(function* test_register_no_id() {
'Wrong error for incomplete register response'
);
yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
'Reconnect after incomplete register response timed out');
equal(registers, 1, 'Wrong register count');
});

View File

@ -21,15 +21,16 @@ add_task(function* test_register_request_queue() {
let db = PushServiceWebSocket.newPushDB();
do_register_cleanup(() => {return db.drop().then(_ => db.close());});
let helloDefer = Promise.defer();
let onHello = after(2, function onHello(request) {
let onHello;
let helloPromise = new Promise(resolve => onHello = after(2, function onHello(request) {
this.serverSendMsg(JSON.stringify({
messageType: 'hello',
status: 200,
uaid: '54b08a9e-59c6-4ed7-bb54-f4fd60d6f606'
}));
helloDefer.resolve();
});
resolve();
}));
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -62,6 +63,6 @@ add_task(function* test_register_request_queue() {
}, 'Should time out the second request')
]), DEFAULT_TIMEOUT, 'Queued requests did not time out');
yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
'Timed out waiting for reconnect');
});

View File

@ -27,7 +27,8 @@ add_task(function* test_register_rollback() {
let handshakes = 0;
let registers = 0;
let unregisterDefer = Promise.defer();
let unregisterDone;
let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
PushServiceWebSocket._generateID = () => channelID;
PushService.init({
serverURI: "wss://push.example.org/",
@ -66,7 +67,7 @@ add_task(function* test_register_rollback() {
status: 200,
channelID
}));
unregisterDefer.resolve();
unregisterDone();
}
});
}
@ -83,7 +84,7 @@ add_task(function* test_register_rollback() {
);
// Should send an out-of-band unregister request.
yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
'Unregister request timed out');
equal(handshakes, 1, 'Wrong handshake count');
equal(registers, 1, 'Wrong register count');

View File

@ -22,7 +22,8 @@ function run_test() {
add_task(function* test_register_timeout() {
let handshakes = 0;
let timeoutDefer = Promise.defer();
let timeoutDone;
let timeoutPromise = new Promise(resolve => timeoutDone = resolve);
let registers = 0;
let db = PushServiceWebSocket.newPushDB();
@ -74,7 +75,7 @@ add_task(function* test_register_timeout() {
uaid: userAgentID,
pushEndpoint: 'https://example.com/update/timeout',
}));
timeoutDefer.resolve();
timeoutDone();
}, 2000);
registers++;
}
@ -95,7 +96,7 @@ add_task(function* test_register_timeout() {
ok(!record, 'Should not store records for timed-out responses');
yield waitForPromise(
timeoutDefer.promise,
timeoutPromise,
DEFAULT_TIMEOUT,
'Reconnect timed out'
);

View File

@ -25,8 +25,8 @@ function run_test() {
add_task(function* test_register_wrong_id() {
// Should reconnect after the register request times out.
let registers = 0;
let helloDefer = Promise.defer();
let helloDone = after(2, helloDefer.resolve);
let helloDone;
let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
PushServiceWebSocket._generateID = () => clientChannelID;
PushService.init({
@ -67,7 +67,7 @@ add_task(function* test_register_wrong_id() {
'Wrong error for mismatched register reply'
);
yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
'Reconnect after mismatched register reply timed out');
equal(registers, 1, 'Wrong register count');
});

View File

@ -21,8 +21,8 @@ function run_test() {
add_task(function* test_register_wrong_type() {
let registers = 0;
let helloDefer = Promise.defer();
let helloDone = after(2, helloDefer.resolve);
let helloDone;
let helloPromise = new Promise(resolve => helloDone = after(2, resolve));
PushService._generateID = () => '1234';
PushService.init({
@ -63,7 +63,7 @@ add_task(function* test_register_wrong_type() {
'Wrong error for non-string channel ID'
);
yield waitForPromise(helloDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(helloPromise, DEFAULT_TIMEOUT,
'Reconnect after sending non-string channel ID timed out');
equal(registers, 1, 'Wrong register count');
});

View File

@ -42,7 +42,8 @@ add_task(function* test_registration_success() {
yield db.put(record);
}
let handshakeDefer = Promise.defer();
let handshakeDone;
let handshakePromise = new Promise(resolve => handshakeDone = resolve);
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -60,14 +61,14 @@ add_task(function* test_registration_success() {
status: 200,
uaid: userAgentID
}));
handshakeDefer.resolve();
handshakeDone();
}
});
}
});
yield waitForPromise(
handshakeDefer.promise,
handshakePromise,
DEFAULT_TIMEOUT,
'Timed out waiting for handshake'
);

View File

@ -25,7 +25,8 @@ add_task(function* test_unregister_error() {
quota: Infinity,
});
let unregisterDefer = Promise.defer();
let unregisterDone;
let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -49,7 +50,7 @@ add_task(function* test_unregister_error() {
error: 'omg, everything is exploding',
channelID
}));
unregisterDefer.resolve();
unregisterDone();
}
});
}
@ -62,6 +63,6 @@ add_task(function* test_unregister_error() {
ok(!result, 'Deleted push record exists');
// Make sure we send a request to the server.
yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
'Timed out waiting for unregister');
});

View File

@ -39,8 +39,8 @@ add_task(function* test_unregister_invalid_json() {
yield db.put(record);
}
let unregisterDefer = Promise.defer();
let unregisterDone = after(2, unregisterDefer.resolve);
let unregisterDone;
let unregisterPromise = new Promise(resolve => unregisterDone = after(2, resolve));
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -77,6 +77,6 @@ add_task(function* test_unregister_invalid_json() {
ok(!record,
'Failed to delete unregistered record after receiving invalid JSON');
yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
'Timed out waiting for unregister');
});

View File

@ -25,7 +25,8 @@ add_task(function* test_unregister_success() {
quota: Infinity,
});
let unregisterDefer = Promise.defer();
let unregisterDone;
let unregisterPromise = new Promise(resolve => unregisterDone = resolve);
PushService.init({
serverURI: "wss://push.example.org/",
networkInfo: new MockDesktopNetworkInfo(),
@ -46,7 +47,7 @@ add_task(function* test_unregister_success() {
status: 200,
channelID
}));
unregisterDefer.resolve();
unregisterDone();
}
});
}
@ -57,6 +58,6 @@ add_task(function* test_unregister_success() {
let record = yield db.getByKeyID(channelID);
ok(!record, 'Unregister did not remove record');
yield waitForPromise(unregisterDefer.promise, DEFAULT_TIMEOUT,
yield waitForPromise(unregisterPromise, DEFAULT_TIMEOUT,
'Timed out waiting for unregister');
});

View File

@ -30,6 +30,7 @@
#include "TextureGarbageBin.h"
#include "gfx2DGlue.h"
#include "gfxPrefs.h"
#include "DriverCrashGuard.h"
#include "mozilla/IntegerPrintfMacros.h"
#include "OGLShaderProgram.h" // for ShaderProgramType
@ -368,6 +369,11 @@ GLContext::InitWithPrefix(const char *prefix, bool trygl)
return true;
}
GLContextCrashGuard crashGuard;
if (crashGuard.Crashed()) {
return false;
}
mWorkAroundDriverBugs = gfxPrefs::WorkAroundDriverBugs();
SymLoadStruct symbols[] = {

View File

@ -8,6 +8,7 @@
#include "mozilla/layers/TextureD3D9.h"
#include "mozilla/layers/CompositableClient.h"
#include "mozilla/layers/CompositableForwarder.h"
#include "mozilla/layers/ImageBridgeChild.h"
#include "mozilla/gfx/Types.h"
namespace mozilla {
@ -22,63 +23,8 @@ D3D9SurfaceImage::D3D9SurfaceImage()
D3D9SurfaceImage::~D3D9SurfaceImage()
{
if (mTexture) {
gfxWindowsPlatform::sD3D9SurfaceImageUsed -= mSize.width * mSize.height * 4;
}
}
static const GUID sD3D9TextureUsage =
{ 0x631e1338, 0xdc22, 0x497f, { 0xa1, 0xa8, 0xb4, 0xfe, 0x3a, 0xf4, 0x13, 0x4d } };
/* This class get's it's lifetime tied to a D3D texture
* and increments memory usage on construction and decrements
* on destruction */
class TextureMemoryMeasurer9 : public IUnknown
{
public:
TextureMemoryMeasurer9(size_t aMemoryUsed)
{
mMemoryUsed = aMemoryUsed;
gfxWindowsPlatform::sD3D9MemoryUsed += mMemoryUsed;
mRefCnt = 0;
}
~TextureMemoryMeasurer9()
{
gfxWindowsPlatform::sD3D9MemoryUsed -= mMemoryUsed;
}
STDMETHODIMP_(ULONG) AddRef() {
mRefCnt++;
return mRefCnt;
}
STDMETHODIMP QueryInterface(REFIID riid,
void **ppvObject)
{
IUnknown *punk = nullptr;
if (riid == IID_IUnknown) {
punk = this;
}
*ppvObject = punk;
if (punk) {
punk->AddRef();
return S_OK;
} else {
return E_NOINTERFACE;
}
}
STDMETHODIMP_(ULONG) Release() {
int refCnt = --mRefCnt;
if (refCnt == 0) {
delete this;
}
return refCnt;
}
private:
int mRefCnt;
int mMemoryUsed;
};
HRESULT
D3D9SurfaceImage::SetData(const Data& aData)
{
@ -108,30 +54,18 @@ D3D9SurfaceImage::SetData(const Data& aData)
// to a sharable texture to that it's accessible to the layer manager's
// device.
const gfx::IntRect& region = aData.mRegion;
RefPtr<IDirect3DTexture9> texture;
HANDLE shareHandle = nullptr;
hr = device->CreateTexture(region.width,
region.height,
1,
D3DUSAGE_RENDERTARGET,
D3DFMT_X8R8G8B8,
D3DPOOL_DEFAULT,
byRef(texture),
&shareHandle);
NS_ENSURE_TRUE(SUCCEEDED(hr) && shareHandle, hr);
// Track the lifetime of this memory
texture->SetPrivateData(sD3D9TextureUsage, new TextureMemoryMeasurer9(region.width * region.height * 4), sizeof(IUnknown *), D3DSPD_IUNKNOWN);
gfxWindowsPlatform::sD3D9SurfaceImageUsed += region.width * region.height * 4;
RefPtr<SharedTextureClientD3D9> textureClient =
aData.mAllocator->CreateOrRecycleClient(gfx::SurfaceFormat::B8G8R8X8,
region.Size());
if (!textureClient) {
return E_FAIL;
}
// Copy the image onto the texture, preforming YUV -> RGB conversion if necessary.
RefPtr<IDirect3DSurface9> textureSurface;
hr = texture->GetSurfaceLevel(0, byRef(textureSurface));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
// Stash the surface description for later use.
textureSurface->GetDesc(&mDesc);
RefPtr<IDirect3DSurface9> textureSurface = textureClient->GetD3D9Surface();
if (!textureSurface) {
return E_FAIL;
}
RECT src = { region.x, region.y, region.x+region.width, region.y+region.height };
hr = device->StretchRect(surface, &src, textureSurface, nullptr, D3DTEXF_NONE);
@ -146,9 +80,8 @@ D3D9SurfaceImage::SetData(const Data& aData)
hr = query->Issue(D3DISSUE_END);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
mTexture = texture;
mShareHandle = shareHandle;
mSize = gfx::IntSize(region.width, region.height);
mTextureClient = textureClient;
mSize = region.Size();
mQuery = query;
return S_OK;
@ -188,7 +121,7 @@ D3D9SurfaceImage::EnsureSynchronized()
const D3DSURFACE_DESC&
D3D9SurfaceImage::GetDesc() const
{
return mDesc;
return mTextureClient->GetDesc();
}
gfx::IntSize
@ -200,22 +133,16 @@ D3D9SurfaceImage::GetSize()
TextureClient*
D3D9SurfaceImage::GetTextureClient(CompositableClient* aClient)
{
MOZ_ASSERT(mTextureClient);
MOZ_ASSERT(mTextureClient->GetAllocator() == aClient->GetForwarder());
EnsureSynchronized();
if (!mTextureClient) {
mTextureClient = SharedTextureClientD3D9::Create(aClient->GetForwarder(),
gfx::SurfaceFormat::B8G8R8X8,
TextureFlags::DEFAULT,
mTexture,
mShareHandle,
mDesc);
}
return mTextureClient;
}
already_AddRefed<gfx::SourceSurface>
D3D9SurfaceImage::GetAsSourceSurface()
{
NS_ENSURE_TRUE(mTexture, nullptr);
NS_ENSURE_TRUE(mTextureClient, nullptr);
HRESULT hr;
RefPtr<gfx::DataSourceSurface> surface = gfx::Factory::CreateDataSourceSurface(mSize, gfx::SurfaceFormat::B8G8R8X8);
@ -228,17 +155,19 @@ D3D9SurfaceImage::GetAsSourceSurface()
// Readback the texture from GPU memory into system memory, so that
// we can copy it into the Cairo image. This is expensive.
RefPtr<IDirect3DSurface9> textureSurface;
hr = mTexture->GetSurfaceLevel(0, byRef(textureSurface));
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
RefPtr<IDirect3DSurface9> textureSurface = mTextureClient->GetD3D9Surface();
if (!textureSurface) {
return nullptr;
}
RefPtr<IDirect3DDevice9> device;
hr = mTexture->GetDevice(byRef(device));
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
RefPtr<IDirect3DDevice9> device = mTextureClient->GetD3D9Device();
if (!device) {
return nullptr;
}
RefPtr<IDirect3DSurface9> systemMemorySurface;
hr = device->CreateOffscreenPlainSurface(mDesc.Width,
mDesc.Height,
hr = device->CreateOffscreenPlainSurface(mSize.width,
mSize.height,
D3DFMT_X8R8G8B8,
D3DPOOL_SYSTEMMEM,
byRef(systemMemorySurface),
@ -272,5 +201,36 @@ D3D9SurfaceImage::GetAsSourceSurface()
return surface.forget();
}
already_AddRefed<TextureClient>
D3D9RecycleAllocator::Allocate(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags aAllocFlags)
{
return SharedTextureClientD3D9::Create(mSurfaceAllocator,
aFormat,
aTextureFlags,
mDevice,
aSize);
}
already_AddRefed<SharedTextureClientD3D9>
D3D9RecycleAllocator::CreateOrRecycleClient(gfx::SurfaceFormat aFormat,
const gfx::IntSize& aSize)
{
RefPtr<TextureClient> textureClient =
CreateOrRecycle(aFormat,
aSize,
BackendSelector::Content,
layers::TextureFlags::DEFAULT);
if (!textureClient) {
return nullptr;
}
RefPtr<SharedTextureClientD3D9> textureD3D9 = static_cast<SharedTextureClientD3D9*>(textureClient.get());
return textureD3D9.forget();
}
} // namespace layers
} // namespace mozilla

View File

@ -10,10 +10,37 @@
#include "ImageContainer.h"
#include "nsAutoPtr.h"
#include "d3d9.h"
#include "mozilla/layers/TextureClientRecycleAllocator.h"
namespace mozilla {
namespace layers {
class SharedTextureClientD3D9;
class D3D9RecycleAllocator : public TextureClientRecycleAllocator
{
public:
explicit D3D9RecycleAllocator(ISurfaceAllocator* aAllocator,
IDirect3DDevice9* aDevice)
: TextureClientRecycleAllocator(aAllocator)
, mDevice(aDevice)
{}
already_AddRefed<SharedTextureClientD3D9>
CreateOrRecycleClient(gfx::SurfaceFormat aFormat,
const gfx::IntSize& aSize);
protected:
virtual already_AddRefed<TextureClient>
Allocate(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags aAllocFlags) override;
RefPtr<IDirect3DDevice9> mDevice;
};
// Image class that wraps a IDirect3DSurface9. This class copies the image
// passed into SetData(), so that it can be accessed from other D3D devices.
// This class also manages the synchronization of the copy, to ensure the
@ -22,10 +49,17 @@ class D3D9SurfaceImage : public Image {
public:
struct Data {
Data(IDirect3DSurface9* aSurface, const gfx::IntRect& aRegion)
: mSurface(aSurface), mRegion(aRegion) {}
Data(IDirect3DSurface9* aSurface,
const gfx::IntRect& aRegion,
D3D9RecycleAllocator* aAllocator)
: mSurface(aSurface)
, mRegion(aRegion)
, mAllocator(aAllocator)
{}
RefPtr<IDirect3DSurface9> mSurface;
gfx::IntRect mRegion;
RefPtr<D3D9RecycleAllocator> mAllocator;
};
D3D9SurfaceImage();
@ -53,11 +87,8 @@ private:
void EnsureSynchronized();
gfx::IntSize mSize;
RefPtr<IDirect3DTexture9> mTexture;
RefPtr<IDirect3DQuery9> mQuery;
RefPtr<TextureClient> mTextureClient;
HANDLE mShareHandle;
D3DSURFACE_DESC mDesc;
RefPtr<SharedTextureClientD3D9> mTextureClient;
bool mValid;
};

View File

@ -621,10 +621,10 @@ CairoImage::GetTextureClient(CompositableClient *aClient)
aClient->GetTextureClientRecycler();
if (recycler) {
textureClient =
recycler->CreateOrRecycleForDrawing(surface->GetFormat(),
surface->GetSize(),
BackendSelector::Content,
aClient->GetTextureFlags());
recycler->CreateOrRecycle(surface->GetFormat(),
surface->GetSize(),
BackendSelector::Content,
aClient->GetTextureFlags());
}
#endif

View File

@ -288,8 +288,8 @@ public:
explicit ImageContainer(ImageContainer::Mode flag = SYNCHRONOUS);
typedef int32_t FrameID;
typedef int32_t ProducerID;
typedef uint32_t FrameID;
typedef uint32_t ProducerID;
/**

View File

@ -81,9 +81,15 @@ void
ImageClient::RemoveTextureWithWaiter(TextureClient* aTexture,
AsyncTransactionWaiter* aAsyncTransactionWaiter)
{
#ifdef MOZ_WIDGET_GONK
if (aAsyncTransactionWaiter ||
GetForwarder()->IsImageBridgeChild()) {
if ((aAsyncTransactionWaiter ||
GetForwarder()->IsImageBridgeChild())
#ifndef MOZ_WIDGET_GONK
// If the texture client is taking part in recycling then we should make sure
// the host has finished with it before dropping the ref and triggering
// the recycle callback.
&& aTexture->GetRecycleAllocator()
#endif
) {
RefPtr<AsyncTransactionTracker> request =
new RemoveTextureFromCompositableTracker(aAsyncTransactionWaiter);
// Hold TextureClient until the transaction complete to postpone
@ -92,7 +98,6 @@ ImageClient::RemoveTextureWithWaiter(TextureClient* aTexture,
GetForwarder()->RemoveTextureFromCompositableAsync(request, this, aTexture);
return;
}
#endif
GetForwarder()->RemoveTextureFromCompositable(this, aTexture);
}

View File

@ -249,6 +249,24 @@ TextureClient::SetAddedToCompositableClient()
}
}
/* static */ void
TextureClient::TextureClientRecycleCallback(TextureClient* aClient, void* aClosure)
{
MOZ_ASSERT(aClient->GetRecycleAllocator());
aClient->GetRecycleAllocator()->RecycleTextureClient(aClient);
}
void
TextureClient::SetRecycleAllocator(TextureClientRecycleAllocator* aAllocator)
{
mRecycleAllocator = aAllocator;
if (aAllocator) {
SetRecycleCallback(TextureClientRecycleCallback, nullptr);
} else {
ClearRecycleCallback();
}
}
bool
TextureClient::InitIPDLActor(CompositableForwarder* aForwarder)
{

View File

@ -50,6 +50,7 @@ class PTextureChild;
class TextureChild;
class BufferTextureClient;
class TextureClient;
class TextureClientRecycleAllocator;
#ifdef GFX_DEBUG_TRACK_CLIENTS_IN_POOL
class TextureClientPool;
#endif
@ -491,7 +492,17 @@ public:
mShared = true;
}
ISurfaceAllocator* GetAllocator()
{
return mAllocator;
}
TextureClientRecycleAllocator* GetRecycleAllocator() { return mRecycleAllocator; }
void SetRecycleAllocator(TextureClientRecycleAllocator* aAllocator);
private:
static void TextureClientRecycleCallback(TextureClient* aClient, void* aClosure);
/**
* Called once, just before the destructor.
*
@ -519,13 +530,9 @@ protected:
*/
virtual bool ToSurfaceDescriptor(SurfaceDescriptor& aDescriptor) = 0;
ISurfaceAllocator* GetAllocator()
{
return mAllocator;
}
RefPtr<TextureChild> mActor;
RefPtr<ISurfaceAllocator> mAllocator;
RefPtr<TextureClientRecycleAllocator> mRecycleAllocator;
TextureFlags mFlags;
FenceHandle mReleaseFenceHandle;
FenceHandle mAcquireFenceHandle;

View File

@ -3,147 +3,62 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <map>
#include <stack>
#include "gfxPlatform.h"
#include "mozilla/layers/GrallocTextureClient.h"
#include "mozilla/layers/ISurfaceAllocator.h"
#include "mozilla/Mutex.h"
#include "TextureClientRecycleAllocator.h"
namespace mozilla {
namespace layers {
class TextureClientRecycleAllocatorImp : public ISurfaceAllocator
// Used to keep TextureClient's reference count stable as not to disrupt recycling.
class TextureClientHolder
{
~TextureClientRecycleAllocatorImp();
~TextureClientHolder() {}
public:
explicit TextureClientRecycleAllocatorImp(ISurfaceAllocator* aAllocator);
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TextureClientHolder)
void SetMaxPoolSize(uint32_t aMax)
explicit TextureClientHolder(TextureClient* aClient)
: mTextureClient(aClient)
{}
TextureClient* GetTextureClient()
{
if (aMax > 0) {
mMaxPooledSize = aMax;
}
}
// Creates and allocates a TextureClient.
already_AddRefed<TextureClient>
CreateOrRecycleForDrawing(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags flags);
void Destroy();
void RecycleCallbackImp(TextureClient* aClient);
static void RecycleCallback(TextureClient* aClient, void* aClosure);
// ISurfaceAllocator
virtual LayersBackend GetCompositorBackendType() const override
{
return mSurfaceAllocator->GetCompositorBackendType();
}
virtual bool AllocShmem(size_t aSize,
mozilla::ipc::SharedMemory::SharedMemoryType aType,
mozilla::ipc::Shmem* aShmem) override
{
return mSurfaceAllocator->AllocShmem(aSize, aType, aShmem);
}
virtual bool AllocUnsafeShmem(size_t aSize,
mozilla::ipc::SharedMemory::SharedMemoryType aType,
mozilla::ipc::Shmem* aShmem) override
{
return mSurfaceAllocator->AllocUnsafeShmem(aSize, aType, aShmem);
}
virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) override
{
mSurfaceAllocator->DeallocShmem(aShmem);
}
virtual bool IsSameProcess() const override
{
return mSurfaceAllocator->IsSameProcess();
}
virtual MessageLoop * GetMessageLoop() const override
{
return mSurfaceAllocator->GetMessageLoop();
return mTextureClient;
}
void ClearTextureClient() { mTextureClient = nullptr; }
protected:
// ISurfaceAllocator
virtual bool IsOnCompositorSide() const override
{
return false;
}
private:
static const uint32_t kMaxPooledSized = 2;
// Used to keep TextureClient's reference count stable as not to disrupt recycling.
class TextureClientHolder
{
~TextureClientHolder() {}
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TextureClientHolder)
explicit TextureClientHolder(TextureClient* aClient)
: mTextureClient(aClient)
{}
TextureClient* GetTextureClient()
{
return mTextureClient;
}
void ClearTextureClient() { mTextureClient = nullptr; }
protected:
RefPtr<TextureClient> mTextureClient;
};
bool mDestroyed;
uint32_t mMaxPooledSize;
RefPtr<ISurfaceAllocator> mSurfaceAllocator;
std::map<TextureClient*, RefPtr<TextureClientHolder> > mInUseClients;
// On b2g gonk, std::queue might be a better choice.
// On ICS, fence wait happens implicitly before drawing.
// Since JB, fence wait happens explicitly when fetching a client from the pool.
// stack is good from Graphics cache usage point of view.
std::stack<RefPtr<TextureClientHolder> > mPooledClients;
Mutex mLock;
RefPtr<TextureClient> mTextureClient;
};
TextureClientRecycleAllocatorImp::TextureClientRecycleAllocatorImp(ISurfaceAllocator *aAllocator)
: mDestroyed(false)
TextureClientRecycleAllocator::TextureClientRecycleAllocator(ISurfaceAllocator *aAllocator)
: mSurfaceAllocator(aAllocator)
, mMaxPooledSize(kMaxPooledSized)
, mSurfaceAllocator(aAllocator)
, mLock("TextureClientRecycleAllocatorImp.mLock")
{
}
TextureClientRecycleAllocatorImp::~TextureClientRecycleAllocatorImp()
TextureClientRecycleAllocator::~TextureClientRecycleAllocator()
{
MOZ_ASSERT(mDestroyed);
MOZ_ASSERT(mPooledClients.empty());
MutexAutoLock lock(mLock);
while (!mPooledClients.empty()) {
mPooledClients.pop();
}
MOZ_ASSERT(mInUseClients.empty());
}
void
TextureClientRecycleAllocator::SetMaxPoolSize(uint32_t aMax)
{
mMaxPooledSize = aMax;
}
already_AddRefed<TextureClient>
TextureClientRecycleAllocatorImp::CreateOrRecycleForDrawing(
gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags aAllocFlags)
TextureClientRecycleAllocator::CreateOrRecycle(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags aAllocFlags)
{
// TextureAllocationFlags is actually used only by ContentClient.
// This class does not handle ConteClient's TextureClient allocation.
@ -156,15 +71,12 @@ TextureClientRecycleAllocatorImp::CreateOrRecycleForDrawing(
{
MutexAutoLock lock(mLock);
if (mDestroyed) {
return nullptr;
} else if (!mPooledClients.empty()) {
if (!mPooledClients.empty()) {
textureHolder = mPooledClients.top();
mPooledClients.pop();
// If a pooled TextureClient is not compatible, release it.
if (textureHolder->GetTextureClient()->GetFormat() != aFormat ||
textureHolder->GetTextureClient()->GetSize() != aSize)
{
textureHolder->GetTextureClient()->GetSize() != aSize) {
TextureClientReleaseTask* task = new TextureClientReleaseTask(textureHolder->GetTextureClient());
textureHolder->ClearTextureClient();
textureHolder = nullptr;
@ -178,9 +90,7 @@ TextureClientRecycleAllocatorImp::CreateOrRecycleForDrawing(
if (!textureHolder) {
// Allocate new TextureClient
RefPtr<TextureClient> texture;
texture = TextureClient::CreateForDrawing(this, aFormat, aSize, aSelector,
aTextureFlags, aAllocFlags);
RefPtr<TextureClient> texture = Allocate(aFormat, aSize, aSelector, aTextureFlags, aAllocFlags);
if (!texture) {
return nullptr;
}
@ -193,34 +103,39 @@ TextureClientRecycleAllocatorImp::CreateOrRecycleForDrawing(
// Register TextureClient
mInUseClients[textureHolder->GetTextureClient()] = textureHolder;
}
textureHolder->GetTextureClient()->SetRecycleCallback(TextureClientRecycleAllocatorImp::RecycleCallback, this);
RefPtr<TextureClient> client(textureHolder->GetTextureClient());
// Make sure the texture holds a reference to us, and ask it to call RecycleTextureClient when its
// ref count drops to 1.
client->SetRecycleAllocator(this);
return client.forget();
}
void
TextureClientRecycleAllocatorImp::Destroy()
already_AddRefed<TextureClient>
TextureClientRecycleAllocator::Allocate(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags aAllocFlags)
{
MutexAutoLock lock(mLock);
if (mDestroyed) {
return;
}
mDestroyed = true;
while (!mPooledClients.empty()) {
mPooledClients.pop();
}
return TextureClient::CreateForDrawing(mSurfaceAllocator, aFormat, aSize, aSelector,
aTextureFlags, aAllocFlags);
}
void
TextureClientRecycleAllocatorImp::RecycleCallbackImp(TextureClient* aClient)
TextureClientRecycleAllocator::RecycleTextureClient(TextureClient* aClient)
{
// Clearing the recycle allocator drops a reference, so make sure we stay alive
// for the duration of this function.
RefPtr<TextureClientRecycleAllocator> kungFuDeathGrip(this);
aClient->SetRecycleAllocator(nullptr);
RefPtr<TextureClientHolder> textureHolder;
aClient->ClearRecycleCallback();
{
MutexAutoLock lock(mLock);
if (mInUseClients.find(aClient) != mInUseClients.end()) {
textureHolder = mInUseClients[aClient]; // Keep reference count of TextureClientHolder within lock.
if (!mDestroyed && mPooledClients.size() < mMaxPooledSize) {
if (mPooledClients.size() < mMaxPooledSize) {
mPooledClients.push(textureHolder);
}
mInUseClients.erase(aClient);
@ -228,45 +143,5 @@ TextureClientRecycleAllocatorImp::RecycleCallbackImp(TextureClient* aClient)
}
}
/* static */ void
TextureClientRecycleAllocatorImp::RecycleCallback(TextureClient* aClient, void* aClosure)
{
MOZ_ASSERT(aClient && !aClient->IsDead());
TextureClientRecycleAllocatorImp* recycleAllocator = static_cast<TextureClientRecycleAllocatorImp*>(aClosure);
recycleAllocator->RecycleCallbackImp(aClient);
}
TextureClientRecycleAllocator::TextureClientRecycleAllocator(ISurfaceAllocator *aAllocator)
{
mAllocator = new TextureClientRecycleAllocatorImp(aAllocator);
}
TextureClientRecycleAllocator::~TextureClientRecycleAllocator()
{
mAllocator->Destroy();
mAllocator = nullptr;
}
void
TextureClientRecycleAllocator::SetMaxPoolSize(uint32_t aMax)
{
mAllocator->SetMaxPoolSize(aMax);
}
already_AddRefed<TextureClient>
TextureClientRecycleAllocator::CreateOrRecycleForDrawing(
gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags aAllocFlags)
{
return mAllocator->CreateOrRecycleForDrawing(aFormat,
aSize,
aSelector,
aTextureFlags,
aAllocFlags);
}
} // namespace layers
} // namespace mozilla

View File

@ -6,15 +6,18 @@
#ifndef MOZILLA_GFX_TEXTURECLIENT_RECYCLE_ALLOCATOR_H
#define MOZILLA_GFX_TEXTURECLIENT_RECYCLE_ALLOCATOR_H
#include <map>
#include <stack>
#include "mozilla/gfx/Types.h"
#include "mozilla/RefPtr.h"
#include "TextureClient.h"
#include "mozilla/Mutex.h"
namespace mozilla {
namespace layers {
class ISurfaceAllocator;
class TextureClientRecycleAllocatorImp;
class TextureClientHolder;
/**
@ -22,10 +25,14 @@ class TextureClientRecycleAllocatorImp;
* recycling capabilities. It expects allocations of same sizes and
* attributres. If a recycled TextureClient is different from
* requested one, the recycled one is dropped and new TextureClient is allocated.
*
* By default this uses TextureClient::CreateForDrawing to allocate new texture
* clients.
*/
class TextureClientRecycleAllocator
{
~TextureClientRecycleAllocator();
protected:
virtual ~TextureClientRecycleAllocator();
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TextureClientRecycleAllocator)
@ -36,14 +43,37 @@ public:
// Creates and allocates a TextureClient.
already_AddRefed<TextureClient>
CreateOrRecycleForDrawing(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags flags = ALLOC_DEFAULT);
CreateOrRecycle(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags flags = ALLOC_DEFAULT);
protected:
virtual already_AddRefed<TextureClient>
Allocate(gfx::SurfaceFormat aFormat,
gfx::IntSize aSize,
BackendSelector aSelector,
TextureFlags aTextureFlags,
TextureAllocationFlags aAllocFlags);
RefPtr<ISurfaceAllocator> mSurfaceAllocator;
private:
RefPtr<TextureClientRecycleAllocatorImp> mAllocator;
friend class TextureClient;
void RecycleTextureClient(TextureClient* aClient);
static const uint32_t kMaxPooledSized = 2;
uint32_t mMaxPooledSize;
std::map<TextureClient*, RefPtr<TextureClientHolder> > mInUseClients;
// On b2g gonk, std::queue might be a better choice.
// On ICS, fence wait happens implicitly before drawing.
// Since JB, fence wait happens explicitly when fetching a client from the pool.
// stack is good from Graphics cache usage point of view.
std::stack<RefPtr<TextureClientHolder> > mPooledClients;
Mutex mLock;
};
} // namespace layers

View File

@ -675,22 +675,45 @@ already_AddRefed<SharedTextureClientD3D9>
SharedTextureClientD3D9::Create(ISurfaceAllocator* aAllocator,
gfx::SurfaceFormat aFormat,
TextureFlags aFlags,
IDirect3DTexture9* aTexture,
HANDLE aSharedHandle,
D3DSURFACE_DESC aDesc)
IDirect3DDevice9* aDevice,
const gfx::IntSize& aSize)
{
RefPtr<SharedTextureClientD3D9> texture =
MOZ_ASSERT(aFormat == gfx::SurfaceFormat::B8G8R8X8);
RefPtr<IDirect3DTexture9> texture;
HANDLE shareHandle = nullptr;
HRESULT hr = aDevice->CreateTexture(aSize.width,
aSize.height,
1,
D3DUSAGE_RENDERTARGET,
D3DFMT_X8R8G8B8,
D3DPOOL_DEFAULT,
byRef(texture),
&shareHandle);
NS_ENSURE_TRUE(SUCCEEDED(hr) && shareHandle, nullptr);
RefPtr<SharedTextureClientD3D9> client =
new SharedTextureClientD3D9(aAllocator,
aFormat,
aFlags);
MOZ_ASSERT(!texture->mTexture);
texture->mTexture = aTexture;
texture->mHandle = aSharedHandle;
texture->mDesc = aDesc;
if (texture->mTexture) {
gfxWindowsPlatform::sD3D9SharedTextureUsed += texture->mDesc.Width * texture->mDesc.Height * 4;
}
return texture.forget();
client->mDevice = aDevice;
client->mTexture = texture;
client->mHandle = shareHandle;
texture->GetLevelDesc(0, &client->mDesc);
gfxWindowsPlatform::sD3D9SharedTextureUsed += aSize.width * aSize.height * 4;
return client.forget();
}
already_AddRefed<IDirect3DSurface9>
SharedTextureClientD3D9::GetD3D9Surface() const
{
RefPtr<IDirect3DSurface9> textureSurface;
HRESULT hr = mTexture->GetSurfaceLevel(0, byRef(textureSurface));
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
return textureSurface.forget();
}
bool

View File

@ -244,14 +244,15 @@ public:
Create(ISurfaceAllocator* aAllocator,
gfx::SurfaceFormat aFormat,
TextureFlags aFlags,
IDirect3DTexture9* aTexture,
HANDLE aSharedHandle,
D3DSURFACE_DESC aDesc);
IDirect3DDevice9* aDevice,
const gfx::IntSize& aSize);
// TextureClient
virtual bool IsAllocated() const override { return !!mTexture; }
virtual gfx::SurfaceFormat GetFormat() const override { return mFormat; }
virtual bool Lock(OpenMode aOpenMode) override;
virtual void Unlock() override;
@ -273,7 +274,18 @@ public:
virtual already_AddRefed<TextureClient>
CreateSimilar(TextureFlags, TextureAllocationFlags) const override { return nullptr; }
IDirect3DDevice9* GetD3D9Device() { return mDevice; }
IDirect3DTexture9* GetD3D9Texture() { return mTexture; }
HANDLE GetShareHandle() const { return mHandle; }
already_AddRefed<IDirect3DSurface9> GetD3D9Surface() const;
const D3DSURFACE_DESC& GetDesc() const
{
return mDesc;
}
private:
RefPtr<IDirect3DDevice9> mDevice;
RefPtr<IDirect3DTexture9> mTexture;
gfx::SurfaceFormat mFormat;
HANDLE mHandle;

View File

@ -392,8 +392,8 @@ struct TimedTexture {
MaybeFence fence;
TimeStamp timeStamp;
IntRect picture;
int32_t frameID;
int32_t producerID;
uint32_t frameID;
uint32_t producerID;
};
/**
@ -487,8 +487,8 @@ struct ImageCompositeNotification {
PImageContainer imageContainer;
TimeStamp imageTimeStamp;
TimeStamp firstCompositeTimeStamp;
int32_t frameID;
int32_t producerID;
uint32_t frameID;
uint32_t producerID;
};
// Unit of a "changeset reply". This is a weird abstraction, probably

View File

@ -25,6 +25,7 @@ static const size_t NUM_CRASH_GUARD_TYPES = size_t(CrashGuardType::NUM_TYPES);
static const char* sCrashGuardNames[NUM_CRASH_GUARD_TYPES] = {
"d3d11layers",
"d3d9video",
"glcontext",
};
DriverCrashGuard::DriverCrashGuard(CrashGuardType aType, dom::ContentParent* aContentParent)
@ -54,6 +55,16 @@ DriverCrashGuard::InitializeIfNeeded()
void
DriverCrashGuard::Initialize()
{
// Using DriverCrashGuard off the main thread currently does not work. Under
// e10s it could conceivably work by dispatching the IPC calls via the main
// thread. In the parent process this would be harder. For now, we simply
// exit early instead.
if (!NS_IsMainThread()) {
return;
}
mGfxInfo = services::GetGfxInfo();
if (XRE_IsContentProcess()) {
// Ask the parent whether or not activating the guard is okay. The parent
// won't bother if it detected a crash.
@ -251,7 +262,6 @@ bool
DriverCrashGuard::UpdateBaseEnvironment()
{
bool changed = false;
mGfxInfo = services::GetGfxInfo();
if (mGfxInfo) {
nsString value;
@ -269,8 +279,11 @@ DriverCrashGuard::UpdateBaseEnvironment()
}
bool
DriverCrashGuard::FeatureEnabled(int aFeature)
DriverCrashGuard::FeatureEnabled(int aFeature, bool aDefault)
{
if (!mGfxInfo) {
return aDefault;
}
int32_t status;
if (!NS_SUCCEEDED(mGfxInfo->GetFeatureStatus(aFeature, &status))) {
return false;
@ -373,20 +386,18 @@ D3D11LayersCrashGuard::UpdateEnvironment()
checked = true;
if (mGfxInfo) {
// Feature status.
// Feature status.
#if defined(XP_WIN)
bool d2dEnabled = gfxPrefs::Direct2DForceEnabled() ||
(!gfxPrefs::Direct2DDisabled() && FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT2D));
changed |= CheckAndUpdateBoolPref("feature-d2d", d2dEnabled);
bool d2dEnabled = gfxPrefs::Direct2DForceEnabled() ||
(!gfxPrefs::Direct2DDisabled() && FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT2D));
changed |= CheckAndUpdateBoolPref("feature-d2d", d2dEnabled);
bool d3d11Enabled = !gfxPrefs::LayersPreferD3D9();
if (!FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT3D_11_LAYERS)) {
d3d11Enabled = false;
}
changed |= CheckAndUpdateBoolPref("feature-d3d11", d3d11Enabled);
#endif
bool d3d11Enabled = !gfxPrefs::LayersPreferD3D9();
if (!FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT3D_11_LAYERS)) {
d3d11Enabled = false;
}
changed |= CheckAndUpdateBoolPref("feature-d3d11", d3d11Enabled);
#endif
if (!changed) {
return false;
@ -453,5 +464,50 @@ D3D9VideoCrashGuard::LogFeatureDisabled()
gfxCriticalError(CriticalLog::DefaultOptions(false)) << "DXVA2D3D9 video decoding is disabled due to a previous crash.";
}
GLContextCrashGuard::GLContextCrashGuard(dom::ContentParent* aContentParent)
: DriverCrashGuard(CrashGuardType::GLContext, aContentParent)
{
}
bool
GLContextCrashGuard::UpdateEnvironment()
{
static bool checked = false;
static bool changed = false;
if (checked) {
return changed;
}
checked = true;
#if defined(XP_WIN)
changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle-force-d3d11",
gfxPrefs::WebGLANGLEForceD3D11());
changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle-try-d3d11",
gfxPrefs::WebGLANGLETryD3D11());
changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle-force-warp",
gfxPrefs::WebGLANGLEForceWARP());
changed |= CheckAndUpdateBoolPref("gfx.driver-init.webgl-angle",
FeatureEnabled(nsIGfxInfo::FEATURE_WEBGL_ANGLE, false));
changed |= CheckAndUpdateBoolPref("gfx.driver-init.direct3d11-angle",
FeatureEnabled(nsIGfxInfo::FEATURE_DIRECT3D_11_ANGLE, false));
#endif
return changed;
}
void
GLContextCrashGuard::LogCrashRecovery()
{
gfxCriticalError(CriticalLog::DefaultOptions(false)) << "GLContext just crashed and is now disabled.";
}
void
GLContextCrashGuard::LogFeatureDisabled()
{
gfxCriticalError(CriticalLog::DefaultOptions(false)) << "GLContext is disabled due to a previous crash.";
}
} // namespace gfx
} // namespace mozilla

View File

@ -39,6 +39,7 @@ enum class CrashGuardType : uint32_t
{
D3D11Layers,
D3D9Video,
GLContext,
NUM_TYPES
};
@ -84,7 +85,7 @@ protected:
virtual void LogFeatureDisabled() = 0;
// Helper functions.
bool FeatureEnabled(int aFeature);
bool FeatureEnabled(int aFeature, bool aDefault=true);
bool CheckAndUpdatePref(const char* aPrefName, const nsAString& aCurrentValue);
bool CheckAndUpdateBoolPref(const char* aPrefName, bool aCurrentValue);
std::string GetFullPrefName(const char* aPref);
@ -142,6 +143,17 @@ class D3D9VideoCrashGuard final : public DriverCrashGuard
void LogFeatureDisabled() override;
};
class GLContextCrashGuard final : public DriverCrashGuard
{
public:
explicit GLContextCrashGuard(dom::ContentParent* aContentParent = nullptr);
protected:
bool UpdateEnvironment() override;
void LogCrashRecovery() override;
void LogFeatureDisabled() override;
};
} // namespace gfx
} // namespace mozilla

View File

@ -65,6 +65,8 @@ bool gfxPlatformGtk::sUseFcFontList = false;
gfxPlatformGtk::gfxPlatformGtk()
{
gtk_init(nullptr, nullptr);
sUseFcFontList = mozilla::Preferences::GetBool("gfx.font_rendering.fontconfig.fontlist.enabled");
if (!sUseFcFontList && !sFontconfigUtils) {
sFontconfigUtils = gfxFontconfigUtils::GetFontconfigUtils();

View File

@ -333,26 +333,6 @@ public:
NS_IMPL_ISUPPORTS(D3D9TextureReporter, nsIMemoryReporter)
Atomic<size_t> gfxWindowsPlatform::sD3D9SurfaceImageUsed;
class D3D9SurfaceImageReporter final : public nsIMemoryReporter
{
~D3D9SurfaceImageReporter() {}
public:
NS_DECL_ISUPPORTS
NS_IMETHOD CollectReports(nsIHandleReportCallback *aHandleReport,
nsISupports* aData, bool aAnonymize) override
{
return MOZ_COLLECT_REPORT("d3d9-surface-image", KIND_OTHER, UNITS_BYTES,
gfxWindowsPlatform::sD3D9SurfaceImageUsed,
"Memory used for D3D9 surface images");
}
};
NS_IMPL_ISUPPORTS(D3D9SurfaceImageReporter, nsIMemoryReporter)
Atomic<size_t> gfxWindowsPlatform::sD3D9SharedTextureUsed;
class D3D9SharedTextureReporter final : public nsIMemoryReporter
@ -420,7 +400,6 @@ gfxWindowsPlatform::gfxWindowsPlatform()
RegisterStrongMemoryReporter(new GPUAdapterReporter());
RegisterStrongMemoryReporter(new D3D11TextureReporter());
RegisterStrongMemoryReporter(new D3D9TextureReporter());
RegisterStrongMemoryReporter(new D3D9SurfaceImageReporter());
RegisterStrongMemoryReporter(new D3D9SharedTextureReporter());
}

View File

@ -284,7 +284,6 @@ public:
virtual already_AddRefed<mozilla::gfx::VsyncSource> CreateHardwareVsyncSource() override;
static mozilla::Atomic<size_t> sD3D11MemoryUsed;
static mozilla::Atomic<size_t> sD3D9MemoryUsed;
static mozilla::Atomic<size_t> sD3D9SurfaceImageUsed;
static mozilla::Atomic<size_t> sD3D9SharedTextureUsed;
void GetDeviceInitData(mozilla::gfx::DeviceInitData* aOut) override;

View File

@ -47,35 +47,37 @@ public:
static void Dispatch(RasterImage* aImage,
Progress aProgress,
const nsIntRect& aInvalidRect,
uint32_t aFlags)
SurfaceFlags aSurfaceFlags)
{
MOZ_ASSERT(aImage);
nsCOMPtr<nsIRunnable> worker =
new NotifyProgressWorker(aImage, aProgress, aInvalidRect, aFlags);
new NotifyProgressWorker(aImage, aProgress, aInvalidRect, aSurfaceFlags);
NS_DispatchToMainThread(worker);
}
NS_IMETHOD Run() override
{
MOZ_ASSERT(NS_IsMainThread());
mImage->NotifyProgress(mProgress, mInvalidRect, mFlags);
mImage->NotifyProgress(mProgress, mInvalidRect, mSurfaceFlags);
return NS_OK;
}
private:
NotifyProgressWorker(RasterImage* aImage, Progress aProgress,
const nsIntRect& aInvalidRect, uint32_t aFlags)
NotifyProgressWorker(RasterImage* aImage,
Progress aProgress,
const nsIntRect& aInvalidRect,
SurfaceFlags aSurfaceFlags)
: mImage(aImage)
, mProgress(aProgress)
, mInvalidRect(aInvalidRect)
, mFlags(aFlags)
, mSurfaceFlags(aSurfaceFlags)
{ }
nsRefPtr<RasterImage> mImage;
const Progress mProgress;
const nsIntRect mInvalidRect;
const uint32_t mFlags;
const SurfaceFlags mSurfaceFlags;
};
class NotifyDecodeCompleteWorker : public nsRunnable
@ -470,17 +472,17 @@ DecodePool::NotifyProgress(Decoder* aDecoder)
MOZ_ASSERT(aDecoder);
if (!NS_IsMainThread() ||
(aDecoder->GetFlags() & imgIContainer::FLAG_ASYNC_NOTIFY)) {
(aDecoder->GetDecoderFlags() & DecoderFlags::ASYNC_NOTIFY)) {
NotifyProgressWorker::Dispatch(aDecoder->GetImage(),
aDecoder->TakeProgress(),
aDecoder->TakeInvalidRect(),
aDecoder->GetDecodeFlags());
aDecoder->GetSurfaceFlags());
return;
}
aDecoder->GetImage()->NotifyProgress(aDecoder->TakeProgress(),
aDecoder->TakeInvalidRect(),
aDecoder->GetDecodeFlags());
aDecoder->GetSurfaceFlags());
}
void
@ -489,7 +491,7 @@ DecodePool::NotifyDecodeComplete(Decoder* aDecoder)
MOZ_ASSERT(aDecoder);
if (!NS_IsMainThread() ||
(aDecoder->GetFlags() & imgIContainer::FLAG_ASYNC_NOTIFY)) {
(aDecoder->GetDecoderFlags() & DecoderFlags::ASYNC_NOTIFY)) {
NotifyDecodeCompleteWorker::Dispatch(aDecoder);
return;
}

View File

@ -31,13 +31,11 @@ Decoder::Decoder(RasterImage* aImage)
, mFrameCount(0)
, mFailCode(NS_OK)
, mChunkCount(0)
, mFlags(0)
, mDecoderFlags(DefaultDecoderFlags())
, mSurfaceFlags(DefaultSurfaceFlags())
, mBytesDecoded(0)
, mInitialized(false)
, mMetadataDecode(false)
, mSendPartialInvalidations(false)
, mImageIsTransient(false)
, mFirstFrameDecode(false)
, mInFrame(false)
, mDataDone(false)
, mDecodeDone(false)
@ -235,7 +233,9 @@ Decoder::CompleteDecode()
// If this image wasn't animated and isn't a transient image, mark its frame
// as optimizable. We don't support optimizing animated images and
// optimizing transient images isn't worth it.
if (!HasAnimation() && !mImageIsTransient && mCurrentFrame) {
if (!HasAnimation() &&
!(mDecoderFlags & DecoderFlags::IMAGE_IS_TRANSIENT) &&
mCurrentFrame) {
mCurrentFrame->SetOptimizable();
}
}
@ -249,8 +249,8 @@ Decoder::AllocateFrame(uint32_t aFrameNum,
uint8_t aPaletteDepth)
{
mCurrentFrame = AllocateFrameInternal(aFrameNum, aTargetSize, aFrameRect,
GetDecodeFlags(), aFormat,
aPaletteDepth, mCurrentFrame.get());
aFormat, aPaletteDepth,
mCurrentFrame.get());
if (mCurrentFrame) {
// Gather the raw pointers the decoders will use.
@ -276,7 +276,6 @@ RawAccessFrameRef
Decoder::AllocateFrameInternal(uint32_t aFrameNum,
const nsIntSize& aTargetSize,
const nsIntRect& aFrameRect,
uint32_t aDecodeFlags,
SurfaceFormat aFormat,
uint8_t aPaletteDepth,
imgFrame* aPreviousFrame)
@ -304,8 +303,7 @@ Decoder::AllocateFrameInternal(uint32_t aFrameNum,
}
nsRefPtr<imgFrame> frame = new imgFrame();
bool nonPremult =
aDecodeFlags & imgIContainer::FLAG_DECODE_NO_PREMULTIPLY_ALPHA;
bool nonPremult = bool(mSurfaceFlags & SurfaceFlags::NO_PREMULTIPLY_ALPHA);
if (NS_FAILED(frame->InitForDecoder(aTargetSize, aFrameRect, aFormat,
aPaletteDepth, nonPremult))) {
NS_WARNING("imgFrame::Init should succeed");
@ -322,7 +320,7 @@ Decoder::AllocateFrameInternal(uint32_t aFrameNum,
InsertOutcome outcome =
SurfaceCache::Insert(frame, ImageKey(mImage.get()),
RasterSurfaceKey(aTargetSize,
aDecodeFlags,
mSurfaceFlags,
aFrameNum),
Lifetime::Persistent);
if (outcome == InsertOutcome::FAILURE) {
@ -437,7 +435,7 @@ Decoder::PostFrameStop(Opacity aFrameOpacity /* = Opacity::TRANSPARENT */,
// If we're not sending partial invalidations, then we send an invalidation
// here when the first frame is complete.
if (!mSendPartialInvalidations && !HasAnimation()) {
if (!ShouldSendPartialInvalidations() && !HasAnimation()) {
mInvalidRect.UnionRect(mInvalidRect,
gfx::IntRect(gfx::IntPoint(0, 0), GetSize()));
}
@ -454,7 +452,7 @@ Decoder::PostInvalidation(const nsIntRect& aRect,
// Record this invalidation, unless we're not sending partial invalidations
// or we're past the first frame.
if (mSendPartialInvalidations && !HasAnimation()) {
if (ShouldSendPartialInvalidations() && !HasAnimation()) {
mInvalidRect.UnionRect(mInvalidRect, aRect);
mCurrentFrame->ImageUpdated(aRectAtTargetSize.valueOr(aRect));
}

View File

@ -10,9 +10,11 @@
#include "RasterImage.h"
#include "mozilla/RefPtr.h"
#include "DecodePool.h"
#include "DecoderFlags.h"
#include "ImageMetadata.h"
#include "Orientation.h"
#include "SourceBuffer.h"
#include "SurfaceFlags.h"
namespace mozilla {
@ -137,24 +139,6 @@ public:
*/
virtual void SetResolution(const gfx::IntSize& aResolution) { }
/**
* Set whether should send partial invalidations.
*
* If @aSend is true, we'll send partial invalidations when decoding the first
* frame of the image, so image notifications observers will be able to
* gradually draw in the image as it downloads.
*
* If @aSend is false (the default), we'll only send an invalidation when we
* complete the first frame.
*
* This must be called before Init() is called.
*/
void SetSendPartialInvalidations(bool aSend)
{
MOZ_ASSERT(!mInitialized, "Shouldn't be initialized yet");
mSendPartialInvalidations = aSend;
}
/**
* Set an iterator to the SourceBuffer which will feed data to this decoder.
*
@ -171,27 +155,21 @@ public:
}
/**
* Set whether this decoder is associated with a transient image. The decoder
* may choose to avoid certain optimizations that don't pay off for
* short-lived images in this case.
* Should this decoder send partial invalidations?
*/
void SetImageIsTransient(bool aIsTransient)
bool ShouldSendPartialInvalidations() const
{
MOZ_ASSERT(!mInitialized, "Shouldn't be initialized yet");
mImageIsTransient = aIsTransient;
return !(mDecoderFlags & DecoderFlags::IS_REDECODE);
}
/**
* Set whether we should stop decoding after the first frame.
* Should we stop decoding after the first frame?
*/
void SetIsFirstFrameDecode()
bool IsFirstFrameDecode() const
{
MOZ_ASSERT(!mInitialized, "Shouldn't be initialized yet");
mFirstFrameDecode = true;
return bool(mDecoderFlags & DecoderFlags::FIRST_FRAME_ONLY);
}
bool IsFirstFrameDecode() const { return mFirstFrameDecode; }
size_t BytesDecoded() const { return mBytesDecoded; }
// The amount of time we've spent inside Write() so far for this decoder.
@ -255,9 +233,26 @@ public:
SEQUENTIAL // decode to final image immediately
};
void SetFlags(uint32_t aFlags) { mFlags = aFlags; }
uint32_t GetFlags() const { return mFlags; }
uint32_t GetDecodeFlags() const { return DecodeFlags(mFlags); }
/**
* Get or set the DecoderFlags that influence the behavior of this decoder.
*/
void SetDecoderFlags(DecoderFlags aDecoderFlags)
{
MOZ_ASSERT(!mInitialized);
mDecoderFlags = aDecoderFlags;
}
DecoderFlags GetDecoderFlags() const { return mDecoderFlags; }
/**
* Get or set the SurfaceFlags that select the kind of output this decoder
* will produce.
*/
void SetSurfaceFlags(SurfaceFlags aSurfaceFlags)
{
MOZ_ASSERT(!mInitialized);
mSurfaceFlags = aSurfaceFlags;
}
SurfaceFlags GetSurfaceFlags() const { return mSurfaceFlags; }
bool HasSize() const { return mImageMetadata.HasSize(); }
@ -405,7 +400,6 @@ protected:
RawAccessFrameRef AllocateFrameInternal(uint32_t aFrameNum,
const nsIntSize& aTargetSize,
const nsIntRect& aFrameRect,
uint32_t aDecodeFlags,
gfx::SurfaceFormat aFormat,
uint8_t aPaletteDepth,
imgFrame* aPreviousFrame);
@ -432,14 +426,12 @@ private:
TimeDuration mDecodeTime;
uint32_t mChunkCount;
uint32_t mFlags;
DecoderFlags mDecoderFlags;
SurfaceFlags mSurfaceFlags;
size_t mBytesDecoded;
bool mInitialized : 1;
bool mMetadataDecode : 1;
bool mSendPartialInvalidations : 1;
bool mImageIsTransient : 1;
bool mFirstFrameDecode : 1;
bool mInFrame : 1;
bool mDataDone : 1;
bool mDecodeDone : 1;

View File

@ -109,28 +109,26 @@ DecoderFactory::CreateDecoder(DecoderType aType,
RasterImage* aImage,
SourceBuffer* aSourceBuffer,
const Maybe<IntSize>& aTargetSize,
uint32_t aFlags,
DecoderFlags aDecoderFlags,
SurfaceFlags aSurfaceFlags,
int aSampleSize,
const IntSize& aResolution,
bool aIsRedecode,
bool aImageIsTransient)
const IntSize& aResolution)
{
if (aType == DecoderType::UNKNOWN) {
return nullptr;
}
nsRefPtr<Decoder> decoder = GetDecoder(aType, aImage, aIsRedecode);
nsRefPtr<Decoder> decoder =
GetDecoder(aType, aImage, bool(aDecoderFlags & DecoderFlags::IS_REDECODE));
MOZ_ASSERT(decoder, "Should have a decoder now");
// Initialize the decoder.
decoder->SetMetadataDecode(false);
decoder->SetIterator(aSourceBuffer->Iterator());
decoder->SetFlags(aFlags);
decoder->SetDecoderFlags(aDecoderFlags | DecoderFlags::FIRST_FRAME_ONLY);
decoder->SetSurfaceFlags(aSurfaceFlags);
decoder->SetSampleSize(aSampleSize);
decoder->SetResolution(aResolution);
decoder->SetSendPartialInvalidations(!aIsRedecode);
decoder->SetImageIsTransient(aImageIsTransient);
decoder->SetIsFirstFrameDecode();
// Set a target size for downscale-during-decode if applicable.
if (aTargetSize) {
@ -152,7 +150,8 @@ DecoderFactory::CreateDecoder(DecoderType aType,
DecoderFactory::CreateAnimationDecoder(DecoderType aType,
RasterImage* aImage,
SourceBuffer* aSourceBuffer,
uint32_t aFlags,
DecoderFlags aDecoderFlags,
SurfaceFlags aSurfaceFlags,
const IntSize& aResolution)
{
if (aType == DecoderType::UNKNOWN) {
@ -169,9 +168,9 @@ DecoderFactory::CreateAnimationDecoder(DecoderType aType,
// Initialize the decoder.
decoder->SetMetadataDecode(false);
decoder->SetIterator(aSourceBuffer->Iterator());
decoder->SetFlags(aFlags);
decoder->SetDecoderFlags(aDecoderFlags | DecoderFlags::IS_REDECODE);
decoder->SetSurfaceFlags(aSurfaceFlags);
decoder->SetResolution(aResolution);
decoder->SetSendPartialInvalidations(false);
decoder->Init();
if (NS_FAILED(decoder->GetDecoderError())) {
@ -213,7 +212,7 @@ DecoderFactory::CreateMetadataDecoder(DecoderType aType,
/* static */ already_AddRefed<Decoder>
DecoderFactory::CreateAnonymousDecoder(DecoderType aType,
SourceBuffer* aSourceBuffer,
uint32_t aFlags)
SurfaceFlags aSurfaceFlags)
{
if (aType == DecoderType::UNKNOWN) {
return nullptr;
@ -226,15 +225,20 @@ DecoderFactory::CreateAnonymousDecoder(DecoderType aType,
// Initialize the decoder.
decoder->SetMetadataDecode(false);
decoder->SetIterator(aSourceBuffer->Iterator());
decoder->SetFlags(aFlags);
decoder->SetImageIsTransient(true);
// Anonymous decoders are always transient; we don't want to optimize surfaces
// or do any other expensive work that might be wasted.
DecoderFlags decoderFlags = DecoderFlags::IMAGE_IS_TRANSIENT;
// Without an image, the decoder can't store anything in the SurfaceCache, so
// callers will only be able to retrieve the most recent frame via
// Decoder::GetCurrentFrame(). That means that anonymous decoders should
// always be first-frame-only decoders, because nobody ever wants the *last*
// frame.
decoder->SetIsFirstFrameDecode();
decoderFlags |= DecoderFlags::FIRST_FRAME_ONLY;
decoder->SetDecoderFlags(decoderFlags);
decoder->SetSurfaceFlags(aSurfaceFlags);
decoder->Init();
if (NS_FAILED(decoder->GetDecoderError())) {
@ -259,7 +263,7 @@ DecoderFactory::CreateAnonymousMetadataDecoder(DecoderType aType,
// Initialize the decoder.
decoder->SetMetadataDecode(true);
decoder->SetIterator(aSourceBuffer->Iterator());
decoder->SetIsFirstFrameDecode();
decoder->SetDecoderFlags(DecoderFlags::FIRST_FRAME_ONLY);
decoder->Init();
if (NS_FAILED(decoder->GetDecoderError())) {

View File

@ -7,9 +7,12 @@
#ifndef mozilla_image_DecoderFactory_h
#define mozilla_image_DecoderFactory_h
#include "DecoderFlags.h"
#include "mozilla/Attributes.h"
#include "mozilla/Maybe.h"
#include "mozilla/gfx/2D.h"
#include "nsCOMPtr.h"
#include "SurfaceFlags.h"
class nsACString;
@ -20,6 +23,10 @@ class Decoder;
class RasterImage;
class SourceBuffer;
/**
* The type of decoder; this is usually determined from a MIME type using
* DecoderFactory::GetDecoderType().
*/
enum class DecoderType
{
PNG,
@ -42,10 +49,6 @@ public:
* (If the image *is* animated, only the first frame will be decoded.) The
* decoder will send notifications to @aImage.
*
* XXX(seth): @aIsRedecode and @aImageIsTransient should really be part of
* @aFlags. This requires changes to the way that decoder flags work, though.
* See bug 1185800.
*
* @param aType Which type of decoder to create - JPEG, PNG, etc.
* @param aImage The image will own the decoder and which should receive
* notifications as decoding progresses.
@ -55,25 +58,23 @@ public:
* be scaled to during decoding. It's an error to specify
* a target size for a decoder type which doesn't support
* downscale-during-decode.
* @param aFlags Flags specifying what type of output the decoder should
* produce; see GetDecodeFlags() in RasterImage.h.
* @param aDecoderFlags Flags specifying the behavior of this decoder.
* @param aSurfaceFlags Flags specifying the type of output this decoder
* should produce.
* @param aSampleSize The sample size requested using #-moz-samplesize (or 0
* if none).
* @param aResolution The resolution requested using #-moz-resolution (or an
* empty rect if none).
* @param aIsRedecode Specify 'true' if this image has been decoded before.
* @param aImageIsTransient Specify 'true' if this image is transient.
*/
static already_AddRefed<Decoder>
CreateDecoder(DecoderType aType,
RasterImage* aImage,
SourceBuffer* aSourceBuffer,
const Maybe<gfx::IntSize>& aTargetSize,
uint32_t aFlags,
DecoderFlags aDecoderFlags,
SurfaceFlags aSurfaceFlags,
int aSampleSize,
const gfx::IntSize& aResolution,
bool aIsRedecode,
bool aImageIsTransient);
const gfx::IntSize& aResolution);
/**
* Creates and initializes a decoder for animated images of type @aType.
@ -84,8 +85,9 @@ public:
* notifications as decoding progresses.
* @param aSourceBuffer The SourceBuffer which the decoder will read its data
* from.
* @param aFlags Flags specifying what type of output the decoder should
* produce; see GetDecodeFlags() in RasterImage.h.
* @param aDecoderFlags Flags specifying the behavior of this decoder.
* @param aSurfaceFlags Flags specifying the type of output this decoder
* should produce.
* @param aResolution The resolution requested using #-moz-resolution (or an
* empty rect if none).
*/
@ -93,7 +95,8 @@ public:
CreateAnimationDecoder(DecoderType aType,
RasterImage* aImage,
SourceBuffer* aSourceBuffer,
uint32_t aFlags,
DecoderFlags aDecoderFlags,
SurfaceFlags aSurfaceFlags,
const gfx::IntSize& aResolution);
/**
@ -126,13 +129,13 @@ public:
* @param aType Which type of decoder to create - JPEG, PNG, etc.
* @param aSourceBuffer The SourceBuffer which the decoder will read its data
* from.
* @param aFlags Flags specifying what type of output the decoder should
* produce; see GetDecodeFlags() in RasterImage.h.
* @param aSurfaceFlags Flags specifying the type of output this decoder
* should produce.
*/
static already_AddRefed<Decoder>
CreateAnonymousDecoder(DecoderType aType,
SourceBuffer* aSourceBuffer,
uint32_t aFlags);
SurfaceFlags aSurfaceFlags);
/**
* Creates and initializes an anonymous metadata decoder (one which isn't
@ -143,8 +146,6 @@ public:
* @param aType Which type of decoder to create - JPEG, PNG, etc.
* @param aSourceBuffer The SourceBuffer which the decoder will read its data
* from.
* @param aFlags Flags specifying what type of output the decoder should
* produce; see GetDecodeFlags() in RasterImage.h.
*/
static already_AddRefed<Decoder>
CreateAnonymousMetadataDecoder(DecoderType aType,

42
image/DecoderFlags.h Normal file
View File

@ -0,0 +1,42 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_image_DecoderFlags_h
#define mozilla_image_DecoderFlags_h
#include "mozilla/TypedEnumBits.h"
namespace mozilla {
namespace image {
/**
* Flags that influence decoder behavior. Note that these flags *don't*
* influence the logical content of the surfaces that the decoder generates, so
* they're not in a factor in SurfaceCache lookups and the like. These flags
* instead either influence which surfaces are generated at all or the tune the
* decoder's behavior for a particular scenario.
*/
enum class DecoderFlags : uint8_t
{
FIRST_FRAME_ONLY = 1 << 0,
IS_REDECODE = 1 << 1,
IMAGE_IS_TRANSIENT = 1 << 2,
ASYNC_NOTIFY = 1 << 3
};
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(DecoderFlags)
/**
* @return the default set of decode flags.
*/
inline DecoderFlags
DefaultDecoderFlags()
{
return DecoderFlags();
}
} // namespace image
} // namespace mozilla
#endif // mozilla_image_DecoderFlags_h

View File

@ -281,7 +281,7 @@ FrameAnimator::GetCompositedFrame(uint32_t aFrameNum)
LookupResult result =
SurfaceCache::Lookup(ImageKey(mImage),
RasterSurfaceKey(mSize,
0, // Default decode flags.
DefaultSurfaceFlags(),
aFrameNum));
MOZ_ASSERT(!result || !result.DrawableRef()->GetIsPaletted(),
"About to return a paletted frame");
@ -332,7 +332,7 @@ DoCollectSizeOfCompositingSurfaces(const RawAccessFrameRef& aSurface,
{
// Concoct a SurfaceKey for this surface.
SurfaceKey key = RasterSurfaceKey(aSurface->GetImageSize(),
imgIContainer::DECODE_FLAGS_DEFAULT,
DefaultSurfaceFlags(),
/* aFrameNum = */ 0);
// Create a counter for this surface.
@ -374,7 +374,7 @@ FrameAnimator::GetRawFrame(uint32_t aFrameNum) const
LookupResult result =
SurfaceCache::Lookup(ImageKey(mImage),
RasterSurfaceKey(mSize,
0, // Default decode flags.
DefaultSurfaceFlags(),
aFrameNum));
return result ? result.DrawableRef()->RawAccessRef()
: RawAccessFrameRef();

View File

@ -116,7 +116,9 @@ ImageOps::DecodeToSurface(nsIInputStream* aInputStream,
DecoderType decoderType =
DecoderFactory::GetDecoderType(PromiseFlatCString(aMimeType).get());
nsRefPtr<Decoder> decoder =
DecoderFactory::CreateAnonymousDecoder(decoderType, sourceBuffer, aFlags);
DecoderFactory::CreateAnonymousDecoder(decoderType,
sourceBuffer,
ToSurfaceFlags(aFlags));
if (!decoder) {
return nullptr;
}

Some files were not shown because too many files have changed in this diff Show More