Merge m-c to b2g-inbound

This commit is contained in:
Carsten "Tomcat" Book 2014-05-05 14:33:21 +02:00
commit ff011f4341
49 changed files with 1982 additions and 465 deletions

View File

@ -1490,7 +1490,13 @@ CanvasRenderingContext2D::CreatePattern(const HTMLImageOrCanvasOrVideoElement& e
return pat.forget();
}
} else if (element.IsHTMLImageElement()) {
htmlElement = &element.GetAsHTMLImageElement();
HTMLImageElement* img = &element.GetAsHTMLImageElement();
if (img->IntrinsicState().HasState(NS_EVENT_STATE_BROKEN)) {
error.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return nullptr;
}
htmlElement = img;
} else {
htmlElement = &element.GetAsHTMLVideoElement();
}
@ -1862,7 +1868,10 @@ bool CanvasRenderingContext2D::DrawCustomFocusRing(mozilla::dom::Element& aEleme
nsCOMPtr<nsIDOMElement> focusedElement;
fm->GetFocusedElement(getter_AddRefs(focusedElement));
if (SameCOMIdentity(aElement.AsDOMNode(), focusedElement)) {
return true;
nsPIDOMWindow *window = aElement.OwnerDoc()->GetWindow();
if (window) {
return window->ShouldShowFocusRing();
}
}
}

View File

@ -213,3 +213,4 @@ skip-if = (buildapp == 'b2g' && toolkit != 'gonk')
[test_toDataURL_parameters.html]
[test_windingRuleUndefined.html]
[test_2d.fillText.gradient.html]
[test_createPattern_broken.html]

View File

@ -15071,49 +15071,6 @@ isPixel(ctx, 50,25, 0,255,0,255, 0);
</script>
<img src="image_green.png" id="green_10.png" class="resource">
<!-- [[[ test_2d.pattern.image.broken.html ]]] -->
<p>Canvas test: 2d.pattern.image.broken</p>
<canvas id="c465" width="100" height="50"><p class="fallback">FAIL (fallback content)</p></canvas>
<script>
function test_2d_pattern_image_broken() {
var canvas = document.getElementById('c465');
var ctx = canvas.getContext('2d');
var img = document.getElementById('broken_2.png');
todo(img.complete === false, "img.complete === false");
var _thrown = undefined; try {
ctx.createPattern(img, 'repeat');
} catch (e) { _thrown = e }; todo(_thrown && _thrown.name == "InvalidStateError" && _thrown.code == DOMException.INVALID_STATE_ERR, "should throw InvalidStateError");
}
</script>
<img src="image_broken.png" id="broken_2.png" class="resource">
<!-- [[[ test_2d.pattern.image.incomplete.html ]]] -->
<p>Canvas test: 2d.pattern.image.incomplete</p>
<canvas id="c466" width="100" height="50"><p class="fallback">FAIL (fallback content)</p></canvas>
<script>
function test_2d_pattern_image_incomplete() {
var canvas = document.getElementById('c466');
var ctx = canvas.getContext('2d');
var img = new Image();
todo(img.complete === false, "img.complete === false");
var _thrown = undefined; try {
ctx.createPattern(img, 'repeat');
} catch (e) { _thrown = e }; todo(_thrown && _thrown.name == "InvalidStateError" && _thrown.code == DOMException.INVALID_STATE_ERR, "should throw InvalidStateError");
}
</script>
<!-- [[[ test_2d.pattern.image.null.html ]]] -->
<p>Canvas test: 2d.pattern.image.null</p>
@ -23784,16 +23741,6 @@ function runTests() {
} catch (e) {
ok(false, "unexpected exception thrown in: test_2d_pattern_crosscanvas");
}
try {
test_2d_pattern_image_broken();
} catch (e) {
ok(false, "unexpected exception thrown in: test_2d_pattern_image_broken");
}
try {
test_2d_pattern_image_incomplete();
} catch (e) {
ok(false, "unexpected exception thrown in: test_2d_pattern_image_incomplete");
}
try {
test_2d_pattern_image_null();
} catch (e) {

View File

@ -0,0 +1,35 @@
<!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
<title>Test for createPattern with a broken image</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="application/javascript" src="/tests/SimpleTest/WindowSnapshot.js"></script>
<script type="application/javascript" src="file_drawWindow_common.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript">
SimpleTest.waitForExplicitFinish();
window.addEventListener("load", function(){
var _thrown = undefined;
try{
var img = document.getElementById('broken.png');
var ctx = document.getElementById('c').getContext('2d');
var p = ctx.createPattern(img, 'repeat');
} catch (e) {
_thrown = e
};
ok(_thrown && _thrown.name == "InvalidStateError" && _thrown.code == DOMException.INVALID_STATE_ERR, "should throw InvalidStateError");
SimpleTest.finish();
});
</script>
</head>
<body>
<canvas id="c" class="output" width="100" height="50"></canvas>
<img src="/images/broken.png" id="broken.png" class="resource"/>
</body>
</html>

View File

@ -1386,16 +1386,26 @@ nsresult OggReader::Seek(int64_t aTarget,
// When doing fastSeek we display the first frame after the seek, so
// we need to advance the decode to the keyframe otherwise we'll get
// visual artifacts in the first frame output after the seek.
bool skip = true;
while (DecodeVideoFrame(skip, 0) && skip) {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
if (mDecoder->IsShutdown()) {
return NS_ERROR_FAILURE;
// First, we must check to see if there's already a keyframe in the frames
// that we may have already decoded, and discard frames up to the
// keyframe.
VideoData* v;
while ((v = mVideoQueue.PeekFront()) && !v->mKeyframe) {
delete mVideoQueue.PopFront();
}
if (mVideoQueue.GetSize() == 0) {
// We didn't find a keyframe in the frames already here, so decode
// forwards until we find a keyframe.
bool skip = true;
while (DecodeVideoFrame(skip, 0) && skip) {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
if (mDecoder->IsShutdown()) {
return NS_ERROR_FAILURE;
}
}
}
#ifdef DEBUG
const VideoData* v = mVideoQueue.PeekFront();
v = mVideoQueue.PeekFront();
if (!v || !v->mKeyframe) {
NS_WARNING("Ogg seek didn't end up before a key frame!");
}

View File

@ -196,18 +196,21 @@ public:
{
for (uint32_t j = 0; j < WEBAUDIO_BLOCK_SIZE*blocks; ++j) {
// Index into the curve array based on the amplitude of the
// incoming signal by clamping the amplitude to [-1, 1] and
// incoming signal by using an amplitude range of [-1, 1] and
// performing a linear interpolation of the neighbor values.
float index = std::max(0.0f, std::min(float(mCurve.Length() - 1),
mCurve.Length() * (aInputBuffer[j] + 1) / 2));
uint32_t indexLower = uint32_t(index);
uint32_t indexHigher = uint32_t(index + 1.0f);
if (indexHigher == mCurve.Length()) {
aOutputBuffer[j] = mCurve[indexLower];
float index = (mCurve.Length() - 1) * (aInputBuffer[j] + 1.0f) / 2.0f;
if (index < 0.0f) {
aOutputBuffer[j] = mCurve[0];
} else {
float interpolationFactor = index - indexLower;
aOutputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] +
interpolationFactor * mCurve[indexHigher];
int32_t indexLower = index;
if (static_cast<uint32_t>(indexLower) >= mCurve.Length() - 1) {
aOutputBuffer[j] = mCurve[mCurve.Length() - 1];
} else {
uint32_t indexHigher = indexLower + 1;
float interpolationFactor = index - indexLower;
aOutputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] +
interpolationFactor * mCurve[indexHigher];
}
}
}
}

View File

@ -17,6 +17,7 @@ function test() {
var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded());
var sp = context.createScriptProcessor(2048, 1);
node.connect(sp);
sp.connect(context.destination); // work around bug 916387
var expectedMinNonzeroSampleCount;
var expectedMaxNonzeroSampleCount;
var nonzeroSampleCount = 0;

View File

@ -11,6 +11,11 @@
* elevated privileges; the method implementations should contain the
* necessary security checks. Access this interface by calling
* getInterface on a DOMWindow.
*
* WARNING: Do not use 'out jsval' parameters in this file.
* SpecialPowers, which is used to access nsIDOMWindowUtils
* in plain mochitests, does not know how to handle them.
* (Use 'jsval' return values instead.)
*/
%{C++

View File

@ -295,7 +295,7 @@ public:
virtual TemporaryRef<gfx::DataSourceSurface> GetAsSurface() { return nullptr; }
#endif
virtual void PrintInfo(nsACString& aTo, const char* aPrefix) { }
virtual void PrintInfo(nsACString& aTo, const char* aPrefix) = 0;
virtual void UseTextureHost(TextureHost* aTexture);
virtual void UseComponentAlphaTextures(TextureHost* aTextureOnBlack,

View File

@ -657,6 +657,17 @@ ContentHostIncremental::TextureUpdateRequest::Execute(ContentHostIncremental* aH
}
}
void
ContentHostIncremental::PrintInfo(nsACString& aTo, const char* aPrefix)
{
aTo += aPrefix;
aTo += nsPrintfCString("ContentHostIncremental (0x%p)", this);
if (PaintWillResample()) {
aTo += " [paint-will-resample]";
}
}
void
ContentHostTexture::PrintInfo(nsACString& aTo, const char* aPrefix)
{
@ -669,10 +680,10 @@ ContentHostTexture::PrintInfo(nsACString& aTo, const char* aPrefix)
aTo += " [paint-will-resample]";
}
nsAutoCString pfx(aPrefix);
pfx += " ";
if (mTextureHost) {
nsAutoCString pfx(aPrefix);
pfx += " ";
aTo += "\n";
mTextureHost->PrintInfo(aTo, pfx.get());
}

View File

@ -279,6 +279,8 @@ public:
return false;
}
virtual void PrintInfo(nsACString& aTo, const char* aPrefix) MOZ_OVERRIDE;
virtual void DestroyTextures();
virtual bool Lock() {

View File

@ -164,8 +164,8 @@ nsACString&
ImageLayerComposite::PrintInfo(nsACString& aTo, const char* aPrefix)
{
ImageLayer::PrintInfo(aTo, aPrefix);
aTo += "\n";
if (mImageHost && mImageHost->IsAttached()) {
aTo += "\n";
nsAutoCString pfx(aPrefix);
pfx += " ";
mImageHost->PrintInfo(aTo, pfx.get());

View File

@ -131,9 +131,6 @@ LayerManagerComposite::Destroy()
RootLayer()->Destroy();
}
mRoot = nullptr;
mCompositor->Destroy();
mDestroyed = true;
}
}

View File

@ -195,8 +195,8 @@ nsACString&
ThebesLayerComposite::PrintInfo(nsACString& aTo, const char* aPrefix)
{
ThebesLayer::PrintInfo(aTo, aPrefix);
aTo += "\n";
if (mBuffer && mBuffer->IsAttached()) {
aTo += "\n";
nsAutoCString pfx(aPrefix);
pfx += " ";
mBuffer->PrintInfo(aTo, pfx.get());

View File

@ -241,7 +241,11 @@ CompositorParent::Destroy()
// Ensure that the layer manager is destructed on the compositor thread.
mLayerManager = nullptr;
if (mCompositor) {
mCompositor->Destroy();
}
mCompositor = nullptr;
mCompositionManager = nullptr;
mApzcTreeManager->ClearTree();
mApzcTreeManager = nullptr;
@ -273,7 +277,6 @@ CompositorParent::RecvWillStop()
}
mLayerManager->Destroy();
mLayerManager = nullptr;
mCompositor = nullptr;
mCompositionManager = nullptr;
}

View File

@ -40,8 +40,9 @@
#include "mozilla/layers/TextureClient.h"
struct nsIntRect;
using namespace base;
using base::Thread;
using base::ProcessHandle;
using namespace mozilla::ipc;
using namespace mozilla::gfx;

View File

@ -204,6 +204,7 @@ private:
DECL_GFX_PREF(Once, "layout.css.touch_action.enabled", TouchActionEnabled, bool, false);
DECL_GFX_PREF(Once, "layout.frame_rate", LayoutFrameRate, int32_t, -1);
DECL_GFX_PREF(Live, "layout.display-list.dump", LayoutDumpDisplayList, bool, false);
DECL_GFX_PREF(Live, "nglayout.debug.widget_update_flashing", WidgetUpdateFlashing, bool, false);

View File

@ -15,6 +15,7 @@
#include "GeckoProfiler.h"
#include "ImageContainer.h"
#include "gfx2DGlue.h"
#include "gfxPrefs.h"
#ifdef XP_WIN
#include "gfxWindowsPlatform.h"
@ -1075,7 +1076,13 @@ gfxUtils::CopyAsDataURL(RefPtr<gfx::SourceSurface> aSourceSurface)
gfxUtils::CopyAsDataURL(dt.get());
}
bool gfxUtils::sDumpPaintList = getenv("MOZ_DUMP_PAINT_LIST") != 0;
static bool sDumpPaintList = getenv("MOZ_DUMP_PAINT_LIST") != 0;
/* static */ bool
gfxUtils::DumpPaintList() {
return sDumpPaintList || gfxPrefs::LayoutDumpDisplayList();
}
bool gfxUtils::sDumpPainting = getenv("MOZ_DUMP_PAINT") != 0;
bool gfxUtils::sDumpPaintingToFile = getenv("MOZ_DUMP_PAINT_TO_FILE") != 0;
FILE *gfxUtils::sDumpPaintFile = nullptr;

View File

@ -226,7 +226,8 @@ public:
*/
static void CopyAsDataURL(mozilla::gfx::DrawTarget* aDT);
static bool sDumpPaintList;
static bool DumpPaintList();
static bool sDumpPainting;
static bool sDumpPaintingToFile;
static FILE* sDumpPaintFile;

View File

@ -0,0 +1,2 @@
var result = "D1D1D1D1D1D1D1D1D1D1".replace(/d1/ig,1);
assertEq(result, "1111111111");

View File

@ -1,3 +1,4 @@
// The SavedFrame constructor shouldn't have been exposed to JS on the global.
saveStack();
assertEq(typeof SavedFrame, "undefined");

View File

@ -0,0 +1,4 @@
// This test case was found by the fuzzer and crashed the js shell.
Object.preventExtensions(this);
saveStack();

View File

@ -6,11 +6,13 @@ load(libdir + "asserts.js");
let proto = Object.getPrototypeOf(saveStack());
// Can't create new SavedFrame instances by hand.
print("Testing constructor");
assertThrowsInstanceOf(() => {
new proto.constructor();
}, TypeError);
for (let p of ["source", "line", "column", "functionDisplayName", "parent"]) {
print("Testing getter: " + p);
// The getters shouldn't work on the prototype.
assertThrowsInstanceOf(() => proto[p], TypeError);

View File

@ -1173,6 +1173,13 @@ js::StringHasPattern(const jschar *text, uint32_t textlen,
return StringMatch(text, textlen, pat, patlen) != -1;
}
int
js::StringFindPattern(const jschar *text, uint32_t textlen,
const jschar *pat, uint32_t patlen)
{
return StringMatch(text, textlen, pat, patlen);
}
// When an algorithm does not need a string represented as a single linear
// array of characters, this range utility may be used to traverse the string a
// sequence of linear arrays of characters. This avoids flattening ropes.
@ -1737,6 +1744,12 @@ HasRegExpMetaChars(const jschar *chars, size_t length)
return false;
}
bool
js::StringHasRegExpMetaChars(const jschar *chars, size_t length)
{
return HasRegExpMetaChars(chars, length);
}
namespace {
/*

View File

@ -213,6 +213,13 @@ extern bool
StringHasPattern(const jschar *text, uint32_t textlen,
const jschar *pat, uint32_t patlen);
extern int
StringFindPattern(const jschar *text, uint32_t textlen,
const jschar *pat, uint32_t patlen);
extern bool
StringHasRegExpMetaChars(const jschar *chars, size_t length);
} /* namespace js */
extern size_t

View File

@ -8,6 +8,8 @@
#include "mozilla/MemoryReporting.h"
#include "jsstr.h"
#include "frontend/TokenStream.h"
#include "vm/MatchPairs.h"
#include "vm/RegExpStatics.h"
@ -377,7 +379,7 @@ RegExpObject::toString(JSContext *cx) const
/* RegExpShared */
RegExpShared::RegExpShared(JSAtom *source, RegExpFlag flags, uint64_t gcNumber)
: source(source), flags(flags), parenCount(0),
: source(source), flags(flags), parenCount(0), canStringMatch(false),
#if ENABLE_YARR_JIT
codeBlock(),
#endif
@ -438,6 +440,9 @@ RegExpShared::checkSyntax(ExclusiveContext *cx, TokenStream *tokenStream, JSLine
bool
RegExpShared::compile(JSContext *cx, bool matchOnly)
{
TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
AutoTraceLog logCompile(logger, TraceLogger::YarrCompile);
if (!sticky())
return compile(cx, *source, matchOnly);
@ -466,6 +471,12 @@ RegExpShared::compile(JSContext *cx, bool matchOnly)
bool
RegExpShared::compile(JSContext *cx, JSLinearString &pattern, bool matchOnly)
{
if (!ignoreCase() && !StringHasRegExpMetaChars(pattern.chars(), pattern.length())) {
canStringMatch = true;
parenCount = 0;
return true;
}
/* Parse the pattern. */
ErrorCode yarrError;
YarrPattern yarrPattern(pattern, ignoreCase(), multiline(), &yarrError);
@ -507,7 +518,7 @@ RegExpShared::compile(JSContext *cx, JSLinearString &pattern, bool matchOnly)
bool
RegExpShared::compileIfNecessary(JSContext *cx)
{
if (hasCode() || hasBytecode())
if (hasCode() || hasBytecode() || canStringMatch)
return true;
return compile(cx, false);
}
@ -515,7 +526,7 @@ RegExpShared::compileIfNecessary(JSContext *cx)
bool
RegExpShared::compileMatchOnlyIfNecessary(JSContext *cx)
{
if (hasMatchOnlyCode() || hasBytecode())
if (hasMatchOnlyCode() || hasBytecode() || canStringMatch)
return true;
return compile(cx, true);
}
@ -526,12 +537,9 @@ RegExpShared::execute(JSContext *cx, const jschar *chars, size_t length,
{
TraceLogger *logger = TraceLoggerForMainThread(cx->runtime());
{
/* Compile the code at point-of-use. */
AutoTraceLog logCompile(logger, TraceLogger::YarrCompile);
if (!compileIfNecessary(cx))
return RegExpRunStatus_Error;
}
/* Compile the code at point-of-use. */
if (!compileIfNecessary(cx))
return RegExpRunStatus_Error;
/* Ensure sufficient memory for output vector. */
if (!matches.initArray(pairCount()))
@ -555,6 +563,20 @@ RegExpShared::execute(JSContext *cx, const jschar *chars, size_t length,
unsigned *outputBuf = matches.rawBuf();
unsigned result;
if (canStringMatch) {
int res = StringFindPattern(chars+start, length-start, source->chars(), source->length());
if (res == -1)
return RegExpRunStatus_Success_NotFound;
outputBuf[0] = res + start;
outputBuf[1] = outputBuf[0] + source->length();
matches.displace(displacement);
matches.checkAgainst(origLength);
*lastIndex = matches[0].limit;
return RegExpRunStatus_Success;
}
#if ENABLE_YARR_JIT
if (codeBlock.isFallBack()) {
AutoTraceLog logInterpret(logger, TraceLogger::YarrInterpret);
@ -590,12 +612,9 @@ RegExpShared::executeMatchOnly(JSContext *cx, const jschar *chars, size_t length
{
TraceLogger *logger = js::TraceLoggerForMainThread(cx->runtime());
{
/* Compile the code at point-of-use. */
AutoTraceLog logCompile(logger, TraceLogger::YarrCompile);
if (!compileMatchOnlyIfNecessary(cx))
return RegExpRunStatus_Error;
}
/* Compile the code at point-of-use. */
if (!compileMatchOnlyIfNecessary(cx))
return RegExpRunStatus_Error;
#ifdef DEBUG
const size_t origLength = length;
@ -610,6 +629,17 @@ RegExpShared::executeMatchOnly(JSContext *cx, const jschar *chars, size_t length
start = 0;
}
if (canStringMatch) {
int res = StringFindPattern(chars+start, length-start, source->chars(), source->length());
if (res == -1)
return RegExpRunStatus_Success_NotFound;
match = MatchPair(res + start, res + start + source->length());
match.displace(displacement);
*lastIndex = match.limit;
return RegExpRunStatus_Success;
}
#if ENABLE_YARR_JIT
if (!codeBlock.isFallBack()) {
AutoTraceLog logJIT(logger, TraceLogger::YarrJIT);

View File

@ -145,6 +145,7 @@ class RegExpShared
RegExpFlag flags;
unsigned parenCount;
bool canStringMatch;
#if ENABLE_YARR_JIT
/* Note: Native code is valid only if |codeBlock.isFallBack() == false|. */
@ -204,7 +205,11 @@ class RegExpShared
/* Accessors */
size_t getParenCount() const { JS_ASSERT(isCompiled()); return parenCount; }
size_t getParenCount() const {
JS_ASSERT(isCompiled() || canStringMatch);
return parenCount;
}
void incRef() { activeUseCount++; }
void decRef() { JS_ASSERT(activeUseCount > 0); activeUseCount--; }

View File

@ -7,6 +7,7 @@
#include "vm/SavedStacks.h"
#include "jsapi.h"
#include "jscompartment.h"
#include "jsnum.h"
@ -339,6 +340,7 @@ SavedFrame::toStringMethod(JSContext *cx, unsigned argc, Value *vp)
}
/* static */ const JSFunctionSpec SavedFrame::methods[] = {
JS_FN("constructor", SavedFrame::construct, 0, 0),
JS_FN("toString", SavedFrame::toStringMethod, 0, 0),
JS_FS_END
};
@ -445,7 +447,7 @@ SavedStacks::insertFrames(JSContext *cx, ScriptFrameIter &iter, MutableHandle<Sa
thisFrame.compartment()->principals);
frame.set(getOrCreateSavedFrame(cx, lookup));
return frame.address() != nullptr;
return frame.get() != nullptr;
}
SavedFrame *
@ -475,9 +477,15 @@ SavedStacks::getOrCreateSavedFramePrototype(JSContext *cx)
if (!global)
return nullptr;
savedFrameProto = js_InitClass(cx, global, global->getOrCreateObjectPrototype(cx),
&SavedFrame::class_, SavedFrame::construct, 0,
SavedFrame::properties, SavedFrame::methods, nullptr, nullptr);
RootedObject proto(cx, NewObjectWithGivenProto(cx, &SavedFrame::class_,
global->getOrCreateObjectPrototype(cx),
global));
if (!proto
|| !JS_DefineProperties(cx, proto, SavedFrame::properties)
|| !JS_DefineFunctions(cx, proto, SavedFrame::methods))
return nullptr;
savedFrameProto = proto;
// The only object with the SavedFrame::class_ that doesn't have a source
// should be the prototype.
savedFrameProto->setReservedSlot(SavedFrame::JSSLOT_SOURCE, NullValue());

View File

@ -717,6 +717,11 @@ static void RecordFrameMetrics(nsIFrame* aForFrame,
}
}
LayoutDeviceToParentLayerScale layoutToParentLayerScale =
// The ScreenToParentLayerScale should be mTransformScale which is not calculated yet,
// but we don't yet handle CSS transforms, so we assume it's 1 here.
metrics.mCumulativeResolution * LayerToScreenScale(1.0) * ScreenToParentLayerScale(1.0);
// Calculate the composition bounds as the size of the scroll frame and
// its origin relative to the reference frame.
// If aScrollFrame is null, we are in a document without a root scroll frame,
@ -725,7 +730,8 @@ static void RecordFrameMetrics(nsIFrame* aForFrame,
nsRect compositionBounds(frameForCompositionBoundsCalculation->GetOffsetToCrossDoc(aReferenceFrame),
frameForCompositionBoundsCalculation->GetSize());
metrics.mCompositionBounds = RoundedToInt(LayoutDeviceRect::FromAppUnits(compositionBounds, auPerDevPixel)
* metrics.GetParentResolution());
* layoutToParentLayerScale);
// For the root scroll frame of the root content document, the above calculation
// will yield the size of the viewport frame as the composition bounds, which
@ -742,7 +748,8 @@ static void RecordFrameMetrics(nsIFrame* aForFrame,
if (nsView* view = rootFrame->GetView()) {
nsRect viewBoundsAppUnits = view->GetBounds() + rootFrame->GetOffsetToCrossDoc(aReferenceFrame);
ParentLayerIntRect viewBounds = RoundedToInt(LayoutDeviceRect::FromAppUnits(viewBoundsAppUnits, auPerDevPixel)
* metrics.GetParentResolution());
* layoutToParentLayerScale);
// On Android, we need to do things a bit differently to get things
// right (see bug 983208, bug 988882). We use the bounds of the nearest
// widget, but clamp the height to the view bounds height. This clamping

View File

@ -2758,7 +2758,7 @@ nsLayoutUtils::PaintFrame(nsRenderingContext* aRenderingContext, nsIFrame* aFram
#ifdef MOZ_DUMP_PAINTING
FILE* savedDumpFile = gfxUtils::sDumpPaintFile;
if (gfxUtils::sDumpPaintList || gfxUtils::sDumpPainting) {
if (gfxUtils::DumpPaintList() || gfxUtils::sDumpPainting) {
if (gfxUtils::sDumpPaintingToFile) {
nsCString string("dump-");
string.AppendInt(gPaintCount);
@ -2817,7 +2817,7 @@ nsLayoutUtils::PaintFrame(nsRenderingContext* aRenderingContext, nsIFrame* aFram
list.PaintRoot(&builder, aRenderingContext, flags);
#ifdef MOZ_DUMP_PAINTING
if (gfxUtils::sDumpPaintList || gfxUtils::sDumpPainting) {
if (gfxUtils::DumpPaintList() || gfxUtils::sDumpPainting) {
if (gfxUtils::sDumpPaintingToFile) {
fprintf_stderr(gfxUtils::sDumpPaintFile, "</script>");
}

View File

@ -346,7 +346,7 @@ public:
nscoord GetBaselineOffsetFromOuterCrossEdge(AxisOrientationType aCrossAxis,
AxisEdgeType aEdge) const;
float GetShareOfFlexWeightSoFar() const { return mShareOfFlexWeightSoFar; }
float GetShareOfWeightSoFar() const { return mShareOfWeightSoFar; }
bool IsFrozen() const { return mIsFrozen; }
@ -368,16 +368,33 @@ public:
uint8_t GetAlignSelf() const { return mAlignSelf; }
// Returns the flex weight that we should use in the "resolving flexible
// lengths" algorithm. If we're using flex grow, we just return that;
// otherwise, we use the "scaled flex shrink weight" (scaled by our flex
// base size, so that when both large and small items are shrinking,
// the large items shrink more).
float GetFlexWeightToUse(bool aIsUsingFlexGrow)
// Returns the flex factor (flex-grow or flex-shrink), depending on
// 'aIsUsingFlexGrow'.
//
// Asserts fatally if called on a frozen item (since frozen items are not
// flexible).
float GetFlexFactor(bool aIsUsingFlexGrow)
{
if (IsFrozen()) {
return 0.0f;
}
MOZ_ASSERT(!IsFrozen(), "shouldn't need flex factor after item is frozen");
return aIsUsingFlexGrow ? mFlexGrow : mFlexShrink;
}
// Returns the weight that we should use in the "resolving flexible lengths"
// algorithm. If we're using the flex grow factor, we just return that;
// otherwise, we return the "scaled flex shrink factor" (scaled by our flex
// base size, so that when both large and small items are shrinking, the large
// items shrink more).
//
// I'm calling this a "weight" instead of a "[scaled] flex-[grow|shrink]
// factor", to more clearly distinguish it from the actual flex-grow &
// flex-shrink factors.
//
// Asserts fatally if called on a frozen item (since frozen items are not
// flexible).
float GetWeight(bool aIsUsingFlexGrow)
{
MOZ_ASSERT(!IsFrozen(), "shouldn't need weight after item is frozen");
if (aIsUsingFlexGrow) {
return mFlexGrow;
@ -466,12 +483,12 @@ public:
mMainSize = aNewMainSize;
}
void SetShareOfFlexWeightSoFar(float aNewShare)
void SetShareOfWeightSoFar(float aNewShare)
{
MOZ_ASSERT(!mIsFrozen || aNewShare == 0.0f,
"shouldn't be giving this item any share of the weight "
"after it's frozen");
mShareOfFlexWeightSoFar = aNewShare;
mShareOfWeightSoFar = aNewShare;
}
void Freeze() { mIsFrozen = true; }
@ -574,7 +591,7 @@ protected:
// overlay the same memory as some other member vars that aren't touched
// until after main-size has been resolved. In particular, these could share
// memory with mMainPosn through mAscent, and mIsStretched.
float mShareOfFlexWeightSoFar;
float mShareOfWeightSoFar;
bool mIsFrozen;
bool mHadMinViolation;
bool mHadMaxViolation;
@ -599,6 +616,7 @@ class nsFlexContainerFrame::FlexLine : public LinkedListElement<FlexLine>
public:
FlexLine()
: mNumItems(0),
mNumFrozenItems(0),
mTotalInnerHypotheticalMainSize(0),
mTotalOuterHypotheticalMainSize(0),
mLineCrossSize(0),
@ -654,7 +672,12 @@ public:
} else {
mItems.insertBack(aItem);
}
// Update our various bookkeeping member-vars:
mNumItems++;
if (aItem->IsFrozen()) {
mNumFrozenItems++;
}
mTotalInnerHypotheticalMainSize += aItemInnerHypotheticalMainSize;
mTotalOuterHypotheticalMainSize += aItemOuterHypotheticalMainSize;
}
@ -712,6 +735,11 @@ private:
// for splitting lines across continuations. Then we can
// update this count carefully.)
// Number of *frozen* FlexItems in this line, based on FlexItem::IsFrozen().
// Mostly used for optimization purposes, e.g. to bail out early from loops
// when we can tell they have nothing left to do.
uint32_t mNumFrozenItems;
nscoord mTotalInnerHypotheticalMainSize;
nscoord mTotalOuterHypotheticalMainSize;
nscoord mLineCrossSize;
@ -1133,7 +1161,7 @@ FlexItem::FlexItem(nsIFrame* aChildFrame,
mCrossSize(0),
mCrossPosn(0),
mAscent(0),
mShareOfFlexWeightSoFar(0.0f),
mShareOfWeightSoFar(0.0f),
mIsFrozen(false),
mHadMinViolation(false),
mHadMaxViolation(false),
@ -1205,7 +1233,7 @@ FlexItem::FlexItem(nsIFrame* aChildFrame, nscoord aCrossSize)
mCrossSize(aCrossSize),
mCrossPosn(0),
mAscent(0),
mShareOfFlexWeightSoFar(0.0f),
mShareOfWeightSoFar(0.0f),
mIsFrozen(true),
mHadMinViolation(false),
mHadMaxViolation(false),
@ -1587,11 +1615,18 @@ FlexLine::FreezeOrRestoreEachFlexibleSize(const nscoord aTotalViolation,
freezeType = eFreezeMaxViolations;
}
for (FlexItem* item = mItems.getFirst(); item; item = item->getNext()) {
MOZ_ASSERT(!item->HadMinViolation() || !item->HadMaxViolation(),
"Can have either min or max violation, but not both");
// Since this loop only operates on unfrozen flex items, we can break as
// soon as we have seen all of them.
uint32_t numUnfrozenItemsToBeSeen = mNumItems - mNumFrozenItems;
for (FlexItem* item = mItems.getFirst();
numUnfrozenItemsToBeSeen > 0; item = item->getNext()) {
MOZ_ASSERT(item, "numUnfrozenItemsToBeSeen says items remain to be seen");
if (!item->IsFrozen()) {
numUnfrozenItemsToBeSeen--;
MOZ_ASSERT(!item->HadMinViolation() || !item->HadMaxViolation(),
"Can have either min or max violation, but not both");
if (eFreezeEverything == freezeType ||
(eFreezeMinViolations == freezeType && item->HadMinViolation()) ||
(eFreezeMaxViolations == freezeType && item->HadMaxViolation())) {
@ -1602,12 +1637,14 @@ FlexLine::FreezeOrRestoreEachFlexibleSize(const nscoord aTotalViolation,
"Freezing item at a size above its maximum");
item->Freeze();
mNumFrozenItems++;
} else if (MOZ_UNLIKELY(aIsFinalIteration)) {
// XXXdholbert If & when bug 765861 is fixed, we should upgrade this
// assertion to be fatal except in documents with enormous lengths.
NS_ERROR("Final iteration still has unfrozen items, this shouldn't"
" happen unless there was nscoord under/overflow.");
item->Freeze();
mNumFrozenItems++;
} // else, we'll reset this item's main size to its flex base size on the
// next iteration of this algorithm.
@ -1621,10 +1658,13 @@ void
FlexLine::ResolveFlexibleLengths(nscoord aFlexContainerMainSize)
{
PR_LOG(GetFlexContainerLog(), PR_LOG_DEBUG, ("ResolveFlexibleLengths\n"));
if (IsEmpty()) {
if (mNumFrozenItems == mNumItems) {
// All our items are frozen, so we have no flexible lengths to resolve.
return;
}
MOZ_ASSERT(!IsEmpty(), "empty lines should take the early-return above");
// Subtract space occupied by our items' margins/borders/padding, so we can
// just be dealing with the space available for our flex items' content
// boxes.
@ -1638,6 +1678,9 @@ FlexLine::ResolveFlexibleLengths(nscoord aFlexContainerMainSize)
const bool isUsingFlexGrow =
(mTotalOuterHypotheticalMainSize < aFlexContainerMainSize);
nscoord origAvailableFreeSpace;
bool isOrigAvailFreeSpaceInitialized = false;
// NOTE: I claim that this chunk of the algorithm (the looping part) needs to
// run the loop at MOST mNumItems times. This claim should hold up
// because we'll freeze at least one item on each loop iteration, and once
@ -1666,65 +1709,123 @@ FlexLine::ResolveFlexibleLengths(nscoord aFlexContainerMainSize)
if ((availableFreeSpace > 0 && isUsingFlexGrow) ||
(availableFreeSpace < 0 && !isUsingFlexGrow)) {
// The first time we do this, we initialize origAvailableFreeSpace.
if (!isOrigAvailFreeSpaceInitialized) {
origAvailableFreeSpace = availableFreeSpace;
isOrigAvailFreeSpaceInitialized = true;
}
// STRATEGY: On each item, we compute & store its "share" of the total
// flex weight that we've seen so far:
// curFlexWeight / runningFlexWeightSum
// weight that we've seen so far:
// curWeight / weightSum
//
// Then, when we go to actually distribute the space (in the next loop),
// we can simply walk backwards through the elements and give each item
// its "share" multiplied by the remaining available space.
//
// SPECIAL CASE: If the sum of the flex weights is larger than the
// SPECIAL CASE: If the sum of the weights is larger than the
// maximum representable float (overflowing to infinity), then we can't
// sensibly divide out proportional shares anymore. In that case, we
// simply treat the flex item(s) with the largest flex weights as if
// simply treat the flex item(s) with the largest weights as if
// their weights were infinite (dwarfing all the others), and we
// distribute all of the available space among them.
float runningFlexWeightSum = 0.0f;
float largestFlexWeight = 0.0f;
uint32_t numItemsWithLargestFlexWeight = 0;
for (FlexItem* item = mItems.getFirst(); item; item = item->getNext()) {
float curFlexWeight = item->GetFlexWeightToUse(isUsingFlexGrow);
MOZ_ASSERT(curFlexWeight >= 0.0f, "weights are non-negative");
float weightSum = 0.0f;
float flexFactorSum = 0.0f;
float largestWeight = 0.0f;
uint32_t numItemsWithLargestWeight = 0;
runningFlexWeightSum += curFlexWeight;
if (NS_finite(runningFlexWeightSum)) {
if (curFlexWeight == 0.0f) {
item->SetShareOfFlexWeightSoFar(0.0f);
} else {
item->SetShareOfFlexWeightSoFar(curFlexWeight /
runningFlexWeightSum);
// Since this loop only operates on unfrozen flex items, we can break as
// soon as we have seen all of them.
uint32_t numUnfrozenItemsToBeSeen = mNumItems - mNumFrozenItems;
for (FlexItem* item = mItems.getFirst();
numUnfrozenItemsToBeSeen > 0; item = item->getNext()) {
MOZ_ASSERT(item,
"numUnfrozenItemsToBeSeen says items remain to be seen");
if (!item->IsFrozen()) {
numUnfrozenItemsToBeSeen--;
float curWeight = item->GetWeight(isUsingFlexGrow);
float curFlexFactor = item->GetFlexFactor(isUsingFlexGrow);
MOZ_ASSERT(curWeight >= 0.0f, "weights are non-negative");
MOZ_ASSERT(curFlexFactor >= 0.0f, "flex factors are non-negative");
weightSum += curWeight;
flexFactorSum += curFlexFactor;
if (NS_finite(weightSum)) {
if (curWeight == 0.0f) {
item->SetShareOfWeightSoFar(0.0f);
} else {
item->SetShareOfWeightSoFar(curWeight / weightSum);
}
} // else, the sum of weights overflows to infinity, in which
// case we don't bother with "SetShareOfWeightSoFar" since
// we know we won't use it. (instead, we'll just give every
// item with the largest weight an equal share of space.)
// Update our largest-weight tracking vars
if (curWeight > largestWeight) {
largestWeight = curWeight;
numItemsWithLargestWeight = 1;
} else if (curWeight == largestWeight) {
numItemsWithLargestWeight++;
}
} // else, the sum of weights overflows to infinity, in which
// case we don't bother with "SetShareOfFlexWeightSoFar" since
// we know we won't use it. (instead, we'll just give every
// item with the largest flex weight an equal share of space.)
// Update our largest-flex-weight tracking vars
if (curFlexWeight > largestFlexWeight) {
largestFlexWeight = curFlexWeight;
numItemsWithLargestFlexWeight = 1;
} else if (curFlexWeight == largestFlexWeight) {
numItemsWithLargestFlexWeight++;
}
}
if (runningFlexWeightSum != 0.0f) { // no distribution if no flexibility
if (weightSum != 0.0f) {
MOZ_ASSERT(flexFactorSum != 0.0f,
"flex factor sum can't be 0, if a weighted sum "
"of its components (weightSum) is nonzero");
if (flexFactorSum < 1.0f) {
// Our unfrozen flex items don't want all of the original free space!
// (Their flex factors add up to something less than 1.)
// Hence, make sure we don't distribute any more than the portion of
// our original free space that these items actually want.
nscoord totalDesiredPortionOfOrigFreeSpace =
NSToCoordRound(origAvailableFreeSpace * flexFactorSum);
// Clamp availableFreeSpace to be no larger than that ^^.
// (using min or max, depending on sign).
// This should not change the sign of availableFreeSpace (except
// possibly by setting it to 0), as enforced by this assertion:
MOZ_ASSERT(totalDesiredPortionOfOrigFreeSpace == 0 ||
((totalDesiredPortionOfOrigFreeSpace > 0) ==
(availableFreeSpace > 0)),
"When we reduce available free space for flex factors < 1,"
"we shouldn't change the sign of the free space...");
if (availableFreeSpace > 0) {
availableFreeSpace = std::min(availableFreeSpace,
totalDesiredPortionOfOrigFreeSpace);
} else {
availableFreeSpace = std::max(availableFreeSpace,
totalDesiredPortionOfOrigFreeSpace);
}
}
PR_LOG(GetFlexContainerLog(), PR_LOG_DEBUG,
(" Distributing available space:"));
// Since this loop only operates on unfrozen flex items, we can break as
// soon as we have seen all of them.
numUnfrozenItemsToBeSeen = mNumItems - mNumFrozenItems;
// NOTE: It's important that we traverse our items in *reverse* order
// here, for correct width distribution according to the items'
// "ShareOfFlexWeightSoFar" progressively-calculated values.
for (FlexItem* item = mItems.getLast(); item;
item = item->getPrevious()) {
// "ShareOfWeightSoFar" progressively-calculated values.
for (FlexItem* item = mItems.getLast();
numUnfrozenItemsToBeSeen > 0; item = item->getPrevious()) {
MOZ_ASSERT(item,
"numUnfrozenItemsToBeSeen says items remain to be seen");
if (!item->IsFrozen()) {
numUnfrozenItemsToBeSeen--;
// To avoid rounding issues, we compute the change in size for this
// item, and then subtract it from the remaining available space.
nscoord sizeDelta = 0;
if (NS_finite(runningFlexWeightSum)) {
if (NS_finite(weightSum)) {
float myShareOfRemainingSpace =
item->GetShareOfFlexWeightSoFar();
item->GetShareOfWeightSoFar();
MOZ_ASSERT(myShareOfRemainingSpace >= 0.0f &&
myShareOfRemainingSpace <= 1.0f,
@ -1738,15 +1839,14 @@ FlexLine::ResolveFlexibleLengths(nscoord aFlexContainerMainSize)
sizeDelta = NSToCoordRound(availableFreeSpace *
myShareOfRemainingSpace);
}
} else if (item->GetFlexWeightToUse(isUsingFlexGrow) ==
largestFlexWeight) {
} else if (item->GetWeight(isUsingFlexGrow) == largestWeight) {
// Total flexibility is infinite, so we're just distributing
// the available space equally among the items that are tied for
// having the largest weight (and this is one of those items).
sizeDelta =
NSToCoordRound(availableFreeSpace /
float(numItemsWithLargestFlexWeight));
numItemsWithLargestFlexWeight--;
float(numItemsWithLargestWeight));
numItemsWithLargestWeight--;
}
availableFreeSpace -= sizeDelta;
@ -1765,8 +1865,15 @@ FlexLine::ResolveFlexibleLengths(nscoord aFlexContainerMainSize)
PR_LOG(GetFlexContainerLog(), PR_LOG_DEBUG,
(" Checking for violations:"));
for (FlexItem* item = mItems.getFirst(); item; item = item->getNext()) {
// Since this loop only operates on unfrozen flex items, we can break as
// soon as we have seen all of them.
uint32_t numUnfrozenItemsToBeSeen = mNumItems - mNumFrozenItems;
for (FlexItem* item = mItems.getFirst();
numUnfrozenItemsToBeSeen > 0; item = item->getNext()) {
MOZ_ASSERT(item, "numUnfrozenItemsToBeSeen says items remain to be seen");
if (!item->IsFrozen()) {
numUnfrozenItemsToBeSeen--;
if (item->GetMainSize() < item->GetMainMinSize()) {
// min violation
totalViolation += item->GetMainMinSize() - item->GetMainSize();
@ -1787,16 +1894,22 @@ FlexLine::ResolveFlexibleLengths(nscoord aFlexContainerMainSize)
PR_LOG(GetFlexContainerLog(), PR_LOG_DEBUG,
(" Total violation: %d\n", totalViolation));
if (totalViolation == 0) {
if (mNumFrozenItems == mNumItems) {
break;
}
MOZ_ASSERT(totalViolation != 0,
"Zero violation should've made us freeze all items & break");
}
// Post-condition: all lengths should've been frozen.
#ifdef DEBUG
// Post-condition: all items should've been frozen.
// Make sure the counts match:
MOZ_ASSERT(mNumFrozenItems == mNumItems, "All items should be frozen");
// For good measure, check each item directly, in case our counts are busted:
for (const FlexItem* item = mItems.getFirst(); item; item = item->getNext()) {
MOZ_ASSERT(item->IsFrozen(),
"All flexible lengths should've been resolved");
MOZ_ASSERT(item->IsFrozen(), "All items should be frozen");
}
#endif // DEBUG
}

View File

@ -1781,7 +1781,7 @@ skip-if(B2G&&browserIsRemote) == 858803-1.html 858803-1-ref.html # bug 974780
fuzzy(1,10000) fuzzy-if(Android&&AndroidVersion>=15,5,10000) == 902330-1.html 902330-1-ref.html
fuzzy-if(Android,8,400) == 906199-1.html 906199-1-ref.html
== 921716-1.html 921716-1-ref.html
== 926155-1.html 926155-1-ref.html
test-pref(layout.css.sticky.enabled,true) == 926155-1.html 926155-1-ref.html
fuzzy-if(cocoaWidget,1,40) == 928607-1.html 928607-1-ref.html
== 931464-1.html 931464-1-ref.html
== 931853.html 931853-ref.html

View File

@ -1695,7 +1695,7 @@ CSS_PROP_POSITION(
VARIANT_HN,
nullptr,
offsetof(nsStylePosition, mFlexGrow),
eStyleAnimType_float) // float, except animations to/from 0 shouldn't work
eStyleAnimType_float)
CSS_PROP_POSITION(
flex-shrink,
flex_shrink,
@ -1709,7 +1709,7 @@ CSS_PROP_POSITION(
VARIANT_HN,
nullptr,
offsetof(nsStylePosition, mFlexShrink),
eStyleAnimType_float) // float, except animations to/from 0 shouldn't work
eStyleAnimType_float)
CSS_PROP_POSITION(
flex-wrap,
flex_wrap,

View File

@ -463,16 +463,6 @@ nsStyleAnimation::ComputeDistance(nsCSSProperty aProperty,
return true;
}
case eUnit_Float: {
// Special case for flex-grow and flex-shrink: animations are
// disallowed between 0 and other values.
if ((aProperty == eCSSProperty_flex_grow ||
aProperty == eCSSProperty_flex_shrink) &&
(aStartValue.GetFloatValue() == 0.0f ||
aEndValue.GetFloatValue() == 0.0f) &&
aStartValue.GetFloatValue() != aEndValue.GetFloatValue()) {
return false;
}
float startFloat = aStartValue.GetFloatValue();
float endFloat = aEndValue.GetFloatValue();
aDistance = Abs(double(endFloat) - double(startFloat));
@ -1948,16 +1938,6 @@ nsStyleAnimation::AddWeighted(nsCSSProperty aProperty,
return true;
}
case eUnit_Float: {
// Special case for flex-grow and flex-shrink: animations are
// disallowed between 0 and other values.
if ((aProperty == eCSSProperty_flex_grow ||
aProperty == eCSSProperty_flex_shrink) &&
(aValue1.GetFloatValue() == 0.0f ||
aValue2.GetFloatValue() == 0.0f) &&
aValue1.GetFloatValue() != aValue2.GetFloatValue()) {
return false;
}
aResultValue.SetFloatValue(RestrictValue(aProperty,
aCoeff1 * aValue1.GetFloatValue() +
aCoeff2 * aValue2.GetFloatValue()));

View File

@ -835,4 +835,548 @@ var gFlexboxTestcases =
},
]
},
// Test cases where flex-grow sums to less than 1:
// ===============================================
// This makes us treat the flexibilities like "fraction of free space"
// instead of weights, so that e.g. a single item with "flex-grow: 0.1"
// will only get 10% of the free space instead of all of the free space.
// Basic cases where flex-grow sum is less than 1:
{
items:
[
{
"flex": "0.1 100px",
"_main-size": [ null, "110px" ] // +10% of free space
},
]
},
{
items:
[
{
"flex": "0.8 0px",
"_main-size": [ null, "160px" ] // +80% of free space
},
]
},
// ... and now with two flex items:
{
items:
[
{
"flex": "0.4 70px",
"_main-size": [ null, "110px" ] // +40% of free space
},
{
"flex": "0.2 30px",
"_main-size": [ null, "50px" ] // +20% of free space
},
]
},
// ...and now with max-size modifying how much free space one item can take:
{
items:
[
{
"flex": "0.4 70px",
"_main-size": [ null, "110px" ] // +40% of free space
},
{
"flex": "0.2 30px",
"_max-main-size": "35px",
"_main-size": [ null, "35px" ] // +20% free space, then clamped
},
]
},
// ...and now with a max-size smaller than our flex-basis:
{
items:
[
{
"flex": "0.4 70px",
"_main-size": [ null, "110px" ] // +40% of free space
},
{
"flex": "0.2 30px",
"_max-main-size": "10px",
"_main-size": [ null, "10px" ] // +20% free space, then clamped
},
]
},
// ...and now with a max-size and a huge flex-basis, such that we initially
// have negative free space, which makes the "% of [original] free space"
// calculations a bit more subtle. We set the "original free space" after
// we've clamped the second item (the first time the free space is positive).
{
items:
[
{
"flex": "0.4 70px",
"_main-size": [ null, "118px" ] // +40% of free space _after freezing
// the other item_
},
{
"flex": "0.2 150px",
"_max-main-size": "10px",
"_main-size": [ null, "10px" ] // clamped immediately
},
]
},
// Now with min-size modifying how much free space our items take:
{
items:
[
{
"flex": "0.4 70px",
"_main-size": [ null, "110px" ] // +40% of free space
},
{
"flex": "0.2 30px",
"_min-main-size": "70px",
"_main-size": [ null, "70px" ] // +20% free space, then clamped
},
]
},
// ...and now with a large enough min-size that it prevents the other flex
// item from taking its full desired portion of the original free space:
{
items:
[
{
"flex": "0.4 70px",
"_main-size": [ null, "80px" ] // (Can't take my full +40% of
// free space due to other item's
// large min-size.)
},
{
"flex": "0.2 30px",
"_min-main-size": "120px",
"_main-size": [ null, "120px" ] // +20% free space, then clamped
},
]
},
// ...and now with a large-enough min-size that it pushes the other flex item
// to actually shrink a bit (with default "flex-shrink:1"):
{
items:
[
{
"flex": "0.3 30px",
"_main-size": [ null, "20px" ] // -10px, instead of desired +45px
},
{
"flex": "0.2 20px",
"_min-main-size": "180px",
"_main-size": [ null, "180px" ] // +160px, instead of desired +30px
},
]
},
// In this case, the items' flexibilities don't initially sum to < 1, but they
// do after we freeze the third item for violating its max-size.
{
items:
[
{
"flex": "0.3 30px",
"_main-size": [ null, "75px" ]
// 1st loop: desires (0.3 / 5) * 150px = 9px. Tentatively granted.
// 2nd loop: desires 0.3 * 150px = 45px. Tentatively granted.
// 3rd loop: desires 0.3 * 150px = 45px. Granted +45px.
},
{
"flex": "0.2 20px",
"_max-main-size": "30px",
"_main-size": [ null, "30px" ]
// First loop: desires (0.2 / 5) * 150px = 6px. Tentatively granted.
// Second loop: desires 0.2 * 150px = 30px. Frozen at +10px.
},
{
"flex": "4.5 0px",
"_max-main-size": "20px",
"_main-size": [ null, "20px" ]
// First loop: desires (4.5 / 5) * 150px = 135px. Frozen at +20px.
},
]
},
// XXXdholbert The algorithm we're currently using has an unfortunate
// discontinuity between the following two cases, as described in bug 985304
// comment 28, due to when we determine the "original free space". We could
// fix this by always determining "original free space" up-front, but that
// causes other discontinuities. I'm waiting until the discussion sorts out a
// bit on www-style before deciding how to resolve this.
{
// First example:
// Here, we have an "original free space" of 2px, so our first item ends up
// getting 0.5 * 2px = 1px.
items:
[
{
"flex": "0.5 100px",
"_main-size": [ null, "101px" ]
},
{
"flex": "1 98px",
"_max-main-size": "40px",
"_main-size": [ null, "40px" ]
},
]
},
{
// Second example (with 2nd flex item having 3px larger flex-basis):
// Here, our "original free space" is negative, but we're using flex-grow
// based on the sum of the items' hypothetical main sizes -- so we wait to
// establish the "original free space" until after we've frozen the second
// item. At that point, we have 60px free space. So our first item ends up
// getting 0.5 * 60px = 30px.
items:
[
{
"flex": "0.5 100px",
"_main-size": [ null, "130px" ]
},
{
"flex": "1 101px",
"_max-main-size": "40px",
"_main-size": [ null, "40px" ]
},
]
},
// XXXdholbert Here's another pair of testcases where we have another
// discontinuity, mentioned at the end of bug 985304 comment 28. Here, the
// "original free space" is small, and then a flex item gets clamped, making
// more free space available. If our flex items' sum is < 1, then this new
// free space won't be distributed (since it's not part of the *original* free
// space). But if we tweak a flex-grow value to push the sum over 1, then
// suddenly this extra free space *will* be distributed. Hence, discontinuity.
{
// First example: flex items' sum is 0.9 (just under 1)
// We only distribute shares of the "original free space", which is 10px.
items:
[
{
"flex": "0.4 50px",
"_main-size": [ null, "54px" ]
},
{
"flex": "0.5 50px",
"_main-size": [ null, "55px" ]
},
{
"flex": "0 90px",
"_max-main-size": "0px",
"_main-size": [ null, "0px" ]
},
]
},
{
// Second example: flex items' sum is exactly 1.0
// We distribute all of the current free space, in each loop of the
// algorithm. (In particular, after we've clamped the third item & freed up
// some more space.) So, the first and second item end up substantially
// larger than in the previous example.
items:
[
{
"flex": "0.45 50px",
"_main-size": [ null, "95px" ]
},
{
"flex": "0.55 50px",
"_main-size": [ null, "105px" ]
},
{
"flex": "0 90px",
"_max-main-size": "0px",
"_main-size": [ null, "0px" ]
},
]
},
// Test cases where flex-shrink sums to less than 1:
// =================================================
// This makes us treat the flexibilities more like "fraction of (negative)
// free space" instead of weights, so that e.g. a single item with
// "flex-shrink: 0.1" will only shrink by 10% of amount that it overflows
// its container by.
//
// It gets a bit more complex when there are multiple flex items, because
// flex-shrink is scaled by the flex-basis before it's used as a weight. But
// even with that scaling, the general principal is that e.g. if the
// flex-shrink values *sum* to 0.6, then the items will collectively only
// shrink by 60% (and hence will still overflow).
// Basic cases where flex-grow sum is less than 1:
{
items:
[
{
"flex": "0 0.1 300px",
"_main-size": [ null, "290px" ] // +10% of (negative) free space
},
]
},
{
items:
[
{
"flex": "0 0.8 400px",
"_main-size": [ null, "240px" ] // +80% of (negative) free space
},
]
},
// ...now with two flex items, with the same flex-basis value:
{
items:
[
{
"flex": "0 0.4 150px",
"_main-size": [ null, "110px" ] // +40% of (negative) free space
},
{
"flex": "0 0.2 150px",
"_main-size": [ null, "130px" ] // +20% of (negative) free space
},
]
},
// ...now with two flex items, with different flex-basis values (and hence
// differently-scaled flex factors):
{
items:
[
{
"flex": "0 0.3 100px",
"_main-size": [ null, "76px" ]
},
{
"flex": "0 0.1 200px",
"_main-size": [ null, "184px" ]
}
]
// Notes:
// - Free space: -100px
// - Sum of flex-shrink factors: 0.3 + 0.1 = 0.4
// - Since that sum ^ is < 1, we'll only distribute that fraction of
// the free space. We'll distribute: -100px * 0.4 = -40px
//
// - 1st item's scaled flex factor: 0.3 * 100px = 30
// - 2nd item's scaled flex factor: 0.1 * 200px = 20
// - 1st item's share of distributed free space: 30/(30+20) = 60%
// - 2nd item's share of distributed free space: 20/(30+20) = 40%
//
// SO:
// - 1st item gets 60% * -40px = -24px. 100px-24px = 76px
// - 2nd item gets 40% * -40px = -16px. 200px-16px = 184px
},
// ...now with min-size modifying how much one item can shrink:
{
items:
[
{
"flex": "0 0.3 100px",
"_main-size": [ null, "70px" ]
},
{
"flex": "0 0.1 200px",
"_min-main-size": "190px",
"_main-size": [ null, "190px" ]
}
]
// Notes:
// - We proceed as in previous testcase, but clamp the second flex item
// at its min main size.
// - After that point, we have a total flex-shrink of = 0.3, so we
// distribute 0.3 * -100px = -30px to the remaining unfrozen flex
// items. Since there's only one unfrozen item left, it gets all of it.
},
// ...now with min-size larger than our flex-basis:
{
items:
[
{
"flex": "0 0.3 100px",
"_main-size": [ null, "70px" ]
},
{
"flex": "0 0.1 200px",
"_min-main-size": "250px",
"_main-size": [ null, "250px" ]
}
]
// (Same as previous example, except the min-main-size prevents the
// second item from shrinking at all)
},
// ...and now with a min-size and a small flex-basis, such that we initially
// have positive free space, which makes the "% of [original] free space"
// calculations a bit more subtle. We set the "original free space" after
// we've clamped the second item (the first time the free space is negative).
{
items:
[
{
"flex": "0 0.3 100px",
"_main-size": [ null, "70px" ]
},
{
"flex": "0 0.1 50px",
"_min-main-size": "200px",
"_main-size": [ null, "200px" ]
}
]
},
// Now with max-size making an item shrink more than its flex-shrink value
// calls for:
{
items:
[
{
"flex": "0 0.3 100px",
"_main-size": [ null, "70px" ]
},
{
"flex": "0 0.1 200px",
"_max-main-size": "150px",
"_main-size": [ null, "150px" ]
}
]
// Notes:
// - We proceed as in an earlier testcase, but clamp the second flex item
// at its max main size.
// - After that point, we have a total flex-shrink of = 0.3, so we
// distribute 0.3 * -100px = -30px to the remaining unfrozen flex
// items. Since there's only one unfrozen item left, it gets all of it.
},
// ...and now with a small enough max-size that it prevents the other flex
// item from taking its full desired portion of the (negative) original free
// space:
{
items:
[
{
"flex": "0 0.3 100px",
"_main-size": [ null, "90px" ]
},
{
"flex": "0 0.1 200px",
"_max-main-size": "110px",
"_main-size": [ null, "110px" ]
}
]
// Notes:
// - We proceed as in an earlier testcase, but clamp the second flex item
// at its max main size.
// - After that point, we have a total flex-shrink of 0.3, which would
// have us distribute 0.3 * -100px = -30px to the (one) remaining
// unfrozen flex item. But our remaining free space is only -10px at
// that point, so we distribute that instead.
},
// ...and now with a small enough max-size that it pushes the other flex item
// to actually grow a bit (with custom "flex-grow: 1" for this testcase):
{
items:
[
{
"flex": "1 0.3 100px",
"_main-size": [ null, "120px" ]
},
{
"flex": "1 0.1 200px",
"_max-main-size": "80px",
"_main-size": [ null, "80px" ]
}
]
},
// In this case, the items' flexibilities don't initially sum to < 1, but they
// do after we freeze the third item for violating its min-size.
{
items:
[
{
"flex": "0 0.3 100px",
"_main-size": [ null, "84px" ]
},
{
"flex": "0 0.1 150px",
"_main-size": [ null, "142px" ]
},
{
"flex": "0 0.8 10px",
"_min-main-size": "40px",
"_main-size": [ null, "40px" ]
}
]
// Notes:
// - For the first round of flexing, we shrink everything and trivially
// violate the third items' min-size. So we freeze it and restart.
// We also establish a "original free space" of -60px.
//
// - For the second round, we have -40px of free space, and a total
// flex-shrink of 0.4, and -60px *original* free space.
// So our remaining items will collectively shrink by
// 0.4 * -60px = -24px.
//
// - 1st item's scaled flex factor: 0.3 * 100px = 30
// - 2nd item's scaled flex factor: 0.1 * 150px = 15
//
// - 1st item's share of distributed free space: 30/(30+15) = 2/3
// - 2nd item's share of distributed free space: 15/(30+15) = 1/3
//
// SO:
// - 1st item gets 2/3 * -24px = -16px. 100px - 16px = 84px
// - 2nd item gets 1/3 * -24px = -8px. 150px - 8px = 142px
},
// In this case, the items' flexibilities sum to > 1, in part due to an item
// that *can't actually shrink* due to its 0 flex-basis (which gives it a
// "scaled flex factor" of 0). So that item can't shrink, but it does prevent
// the others from getting the "flex-shrink sum less than 1" code-path.
{
items:
[
{
"flex": "0 .3 150px",
"_main-size": [ null, "90px" ]
},
{
"flex": "0 .2 150px",
"_main-size": [ null, "110px" ]
},
{
"flex": "0 2 0px",
"_main-size": [ null, "0px" ]
}
]
},
// For comparison, the above testcase should behave just like this one with
// all >1 flex-shrink values (it shouldn't trigger any special <1 behavior):
{
items:
[
{
"flex": "0 3 150px",
"_main-size": [ null, "90px" ]
},
{
"flex": "0 2 150px",
"_main-size": [ null, "110px" ]
},
]
}
];

View File

@ -16,7 +16,7 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=696253
* { flex-grow: 10; flex-shrink: 20 }
/* These animations SHOULD affect computed style */
/* Animations that we'll test (individually) in the script below: */
@keyframes flexGrowTwoToThree {
0% { flex-grow: 2 }
100% { flex-grow: 3 }
@ -33,9 +33,6 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=696253
0% { flex-shrink: 0 }
100% { flex-shrink: 0 }
}
/* These animations SHOULD NOT affect computed style. (flex-grow and
flex-shrink are animatable "except between '0' and other values") */
@keyframes flexGrowZeroToOne {
0% { flex-grow: 0 }
100% { flex-grow: 1 }
@ -136,41 +133,43 @@ advance_clock(1000);
is(cs.flexShrink, 20, "flexShrinkZeroToZero at 1.5s");
done_div();
// ANIMATIONS THAT SHOULD NOT AFFECT COMPUTED STYLE
// ------------------------------------------------
// ANIMATIONS THAT DIDN'T USED TO AFFECT COMPUTED STYLE, BUT NOW DO
// ----------------------------------------------------------------
// (In an older version of the flexbox spec, flex-grow & flex-shrink were not
// allowed to animate between 0 and other values. But now that's allowed.)
// flexGrowZeroToOne: no effect on computed style. 10 all the way through.
// flexGrowZeroToOne: 0 at 0%, 0.5 at 50%, 10 after animation is over.
new_div("animation: flexGrowZeroToOne linear 1s");
is(cs.flexGrow, 10, "flexGrowZeroToOne at 0.0s");
is(cs.flexGrow, 0, "flexGrowZeroToOne at 0.0s");
advance_clock(500);
is(cs.flexGrow, 10, "flexGrowZeroToOne at 0.5s");
is(cs.flexGrow, 0.5, "flexGrowZeroToOne at 0.5s");
advance_clock(1000);
is(cs.flexGrow, 10, "flexGrowZeroToOne at 1.5s");
done_div();
// flexShrinkZeroToOne: no effect on computed style. 20 all the way through.
// flexShrinkZeroToOne: 0 at 0%, 0.5 at 50%, 20 after animation is over.
new_div("animation: flexShrinkZeroToOne linear 1s");
is(cs.flexShrink, 20, "flexShrinkZeroToOne at 0.0s");
is(cs.flexShrink, 0, "flexShrinkZeroToOne at 0.0s");
advance_clock(500);
is(cs.flexShrink, 20, "flexShrinkZeroToOne at 0.5s");
is(cs.flexShrink, 0.5, "flexShrinkZeroToOne at 0.5s");
advance_clock(1000);
is(cs.flexShrink, 20, "flexShrinkZeroToOne at 1.5s");
done_div();
// flexGrowOneToZero: no effect on computed style. 10 all the way through.
// flexGrowOneToZero: 1 at 0%, 0.5 at 50%, 10 after animation is over.
new_div("animation: flexGrowOneToZero linear 1s");
is(cs.flexGrow, 10, "flexGrowOneToZero at 0.0s");
is(cs.flexGrow, 1, "flexGrowOneToZero at 0.0s");
advance_clock(500);
is(cs.flexGrow, 10, "flexGrowOneToZero at 0.5s");
is(cs.flexGrow, 0.5, "flexGrowOneToZero at 0.5s");
advance_clock(1000);
is(cs.flexGrow, 10, "flexGrowOneToZero at 1.5s");
done_div();
// flexShrinkOneToZero: no effect on computed style. 20 all the way through.
// flexShrinkOneToZero: 1 at 0%, 0.5 at 50%, 20 after animation is over.
new_div("animation: flexShrinkOneToZero linear 1s");
is(cs.flexShrink, 20, "flexShrinkOneToZero at 0.0s");
is(cs.flexShrink, 1, "flexShrinkOneToZero at 0.0s");
advance_clock(500);
is(cs.flexShrink, 20, "flexShrinkOneToZero at 0.5s");
is(cs.flexShrink, 0.5, "flexShrinkOneToZero at 0.5s");
advance_clock(1000);
is(cs.flexShrink, 20, "flexShrinkOneToZero at 1.5s");
done_div();

View File

@ -1801,7 +1801,7 @@ pref("layout.css.masking.enabled", true);
#endif
// Is support for mix-blend-mode enabled?
pref("layout.css.mix-blend-mode.enabled", false);
pref("layout.css.mix-blend-mode.enabled", true);
// Is support for the the @supports rule enabled?
pref("layout.css.supports-rule.enabled", true);
@ -1921,6 +1921,11 @@ pref("layout.interruptible-reflow.enabled", true);
// specific information is available).
pref("layout.frame_rate", -1);
// pref to dump the display list to the log. Useful for debugging invalidation problems.
#ifdef MOZ_DUMP_PAINTING
pref("layout.display-list.dump", false);
#endif
// pref to control precision of the frame rate timer. When true,
// we use a "precise" timer, which means each notification fires
// Nms after the start of the last notification. That means if the

View File

@ -16,6 +16,8 @@ from mach.mixin.logging import LoggingMixin
import mozpack.path as mozpath
import manifestparser
import mozinfo
from .data import (
ConfigFileSubstitution,
Defines,
@ -69,12 +71,16 @@ class TreeMetadataEmitter(LoggingMixin):
self.config = config
# TODO add mozinfo into config or somewhere else.
mozinfo_path = mozpath.join(config.topobjdir, 'mozinfo.json')
if os.path.exists(mozinfo_path):
self.mozinfo = json.load(open(mozinfo_path, 'rt'))
else:
self.mozinfo = {}
mozinfo.find_and_update_from_json(config.topobjdir)
# Python 2.6 doesn't allow unicode keys to be used for keyword
# arguments. This gross hack works around the problem until we
# rid ourselves of 2.6.
self.info = {}
for k, v in mozinfo.info.items():
if isinstance(k, unicode):
k = k.encode('ascii')
self.info[k] = v
self._libs = {}
self._final_libs = []
@ -476,7 +482,7 @@ class TreeMetadataEmitter(LoggingMixin):
# We return tests that don't exist because we want manifests
# defining tests that don't exist to result in error.
filtered = m.active_tests(exists=False, disabled=False,
**self.mozinfo)
**self.info)
missing = [t['name'] for t in filtered if not os.path.exists(t['path'])]
if missing:

View File

@ -191,15 +191,10 @@ typedef struct {
} TransportSecurityPreload;
static const TransportSecurityPreload kPublicKeyPinningPreloadList[] = {
{ "addons.mozilla.net", true, &kPinSet_mozilla },
{ "addons.mozilla.org", true, &kPinSet_mozilla },
{ "cdn.mozilla.net", true, &kPinSet_mozilla_cdn },
{ "cdn.mozilla.org", true, &kPinSet_mozilla_cdn },
{ "exclude-subdomains.pinning.example.com", false, &kPinSet_mozilla_test },
{ "include-subdomains.pinning.example.com", true, &kPinSet_mozilla_test },
{ "media.mozilla.com", true, &kPinSet_mozilla_cdn },
};
static const int kPublicKeyPinningPreloadListLength = 7;
static const int kPublicKeyPinningPreloadListLength = 2;
const PRTime kPreloadPKPinsExpirationTime = INT64_C(1409867186821000);
const PRTime kPreloadPKPinsExpirationTime = INT64_C(1410109244157000);

View File

@ -50,36 +50,41 @@ GetOCSPResponseForType(OCSPResponseType aORT, CERTCertificate *aCert,
}
}
// XXX CERT_FindCertIssuer uses the old, deprecated path-building logic
context.issuerCert = CERT_FindCertIssuer(aCert, now, certUsageSSLCA);
if (!context.issuerCert) {
ScopedCERTCertificate issuerCert(CERT_FindCertIssuer(aCert, now,
certUsageSSLCA));
if (!issuerCert) {
PrintPRError("CERT_FindCertIssuer failed");
return nullptr;
}
context.issuerNameDER = &issuerCert->derSubject;
context.issuerSPKI = &issuerCert->subjectPublicKeyInfo;
ScopedCERTCertificate signerCert;
if (aORT == ORTGoodOtherCA || aORT == ORTDelegatedIncluded ||
aORT == ORTDelegatedIncludedLast || aORT == ORTDelegatedMissing ||
aORT == ORTDelegatedMissingMultiple) {
context.signerCert = PK11_FindCertFromNickname(aAdditionalCertName,
nullptr);
if (!context.signerCert) {
signerCert = PK11_FindCertFromNickname(aAdditionalCertName, nullptr);
if (!signerCert) {
PrintPRError("PK11_FindCertFromNickname failed");
return nullptr;
}
}
const SECItem* certs[5] = { nullptr, nullptr, nullptr, nullptr, nullptr };
if (aORT == ORTDelegatedIncluded) {
context.includedCertificates[0] =
CERT_DupCertificate(context.signerCert.get());
certs[0] = &signerCert->derCert;
context.certs = certs;
}
if (aORT == ORTDelegatedIncludedLast || aORT == ORTDelegatedMissingMultiple) {
context.includedCertificates[0] =
CERT_DupCertificate(context.issuerCert.get());
context.includedCertificates[1] = CERT_DupCertificate(context.cert.get());
context.includedCertificates[2] =
CERT_DupCertificate(context.issuerCert.get());
certs[0] = &issuerCert->derCert;
certs[1] = &context.cert->derCert;
certs[2] = &issuerCert->derCert;
if (aORT != ORTDelegatedMissingMultiple) {
context.includedCertificates[3] =
CERT_DupCertificate(context.signerCert.get());
certs[3] = &signerCert->derCert;
}
context.certs = certs;
}
switch (aORT) {
case ORTMalformed:
context.responseStatus = 1;
@ -148,8 +153,13 @@ GetOCSPResponseForType(OCSPResponseType aORT, CERTCertificate *aCert,
context.includeEmptyExtensions = true;
}
if (!context.signerCert) {
context.signerCert = CERT_DupCertificate(context.issuerCert.get());
if (!signerCert) {
signerCert = CERT_DupCertificate(issuerCert.get());
}
context.signerPrivateKey = PK11_FindKeyByAnyCert(signerCert.get(), nullptr);
if (!context.signerPrivateKey) {
PrintPRError("PK11_FindKeyByAnyCert failed");
return nullptr;
}
SECItem* response = CreateEncodedOCSPResponse(context);

View File

@ -93,11 +93,12 @@
],
"entries": [
{ "name": "addons.mozilla.org", "include_subdomains": true, "pins": "mozilla" },
{ "name": "addons.mozilla.net", "include_subdomains": true, "pins": "mozilla" },
{ "name": "cdn.mozilla.net", "include_subdomains": true, "pins": "mozilla_cdn" },
{ "name": "cdn.mozilla.org", "include_subdomains": true, "pins": "mozilla_cdn" },
{ "name": "media.mozilla.com", "include_subdomains": true, "pins": "mozilla_cdn" },
// Disable until bug 1005653 is fixed.
// { "name": "addons.mozilla.org", "include_subdomains": true, "pins": "mozilla" },
// { "name": "addons.mozilla.net", "include_subdomains": true, "pins": "mozilla" },
// { "name": "cdn.mozilla.net", "include_subdomains": true, "pins": "mozilla_cdn" },
// { "name": "cdn.mozilla.org", "include_subdomains": true, "pins": "mozilla_cdn" },
// { "name": "media.mozilla.com", "include_subdomains": true, "pins": "mozilla_cdn" },
{ "name": "include-subdomains.pinning.example.com", "include_subdomains": true, "pins": "mozilla_test" },
{ "name": "exclude-subdomains.pinning.example.com", "include_subdomains": false, "pins": "mozilla_test" }
]

View File

@ -183,21 +183,24 @@ public:
{
private:
friend class Input;
explicit Mark(const uint8_t* mark) : mMark(mark) { }
const uint8_t* const mMark;
Mark(const Input& input, const uint8_t* mark) : input(input), mark(mark) { }
const Input& input;
const uint8_t* const mark;
void operator=(const Mark&) /* = delete */;
};
Mark GetMark() const { return Mark(input); }
Mark GetMark() const { return Mark(*this, input); }
bool GetSECItem(SECItemType type, const Mark& mark, /*out*/ SECItem& item)
Result GetSECItem(SECItemType type, const Mark& mark, /*out*/ SECItem& item)
{
PR_ASSERT(mark.mMark < input);
if (&mark.input != this || mark.mark > input) {
PR_NOT_REACHED("invalid mark");
return Fail(SEC_ERROR_INVALID_ARGS);
}
item.type = type;
item.data = const_cast<uint8_t*>(mark.mMark);
// TODO: Return false if bounds check fails
item.len = input - mark.mMark;
return true;
item.data = const_cast<uint8_t*>(mark.mark);
item.len = static_cast<decltype(item.len)>(input - mark.mark);
return Success;
}
private:

View File

@ -80,6 +80,25 @@ private:
void operator=(const Context&); // delete
};
static der::Result
HashBuf(const SECItem& item, /*out*/ uint8_t *hashBuf, size_t hashBufLen)
{
if (hashBufLen != SHA1_LENGTH) {
PR_NOT_REACHED("invalid hash length");
return der::Fail(SEC_ERROR_INVALID_ARGS);
}
if (item.len >
static_cast<decltype(item.len)>(std::numeric_limits<int32_t>::max())) {
PR_NOT_REACHED("large OCSP responses should have already been rejected");
return der::Fail(SEC_ERROR_INVALID_ARGS);
}
if (PK11_HashBuf(SEC_OID_SHA1, hashBuf, item.data,
static_cast<int32_t>(item.len)) != SECSuccess) {
return der::Fail(PR_GetError());
}
return der::Success;
}
// Verify that potentialSigner is a valid delegated OCSP response signing cert
// according to RFC 6960 section 4.2.2.2.
static Result
@ -169,9 +188,9 @@ static inline der::Result CheckExtensionsForCriticality(der::Input&);
static inline der::Result CertID(der::Input& input,
const Context& context,
/*out*/ bool& match);
static der::Result MatchIssuerKey(const SECItem& issuerKeyHash,
const CERTCertificate& issuer,
/*out*/ bool& match);
static der::Result MatchKeyHash(const SECItem& issuerKeyHash,
const CERTCertificate& issuer,
/*out*/ bool& match);
// RFC 6960 section 4.2.2.2: The OCSP responder must either be the issuer of
// the cert or it must be a delegated OCSP response signing cert directly
@ -240,12 +259,11 @@ GetOCSPSignerCertificate(TrustDomain& trustDomain,
!= der::Success) {
return nullptr;
}
SECItem issuerKeyHash;
if (der::Skip(responderID, der::OCTET_STRING, issuerKeyHash) != der::Success) {
SECItem keyHash;
if (der::Skip(responderID, der::OCTET_STRING, keyHash) != der::Success) {
return nullptr;
}
if (MatchIssuerKey(issuerKeyHash, *potentialSigner.get(), match)
!= der::Success) {
if (MatchKeyHash(keyHash, *potentialSigner.get(), match) != der::Success) {
return nullptr;
}
break;
@ -442,7 +460,9 @@ BasicResponse(der::Input& input, Context& context)
CERTSignedData signedData;
input.GetSECItem(siBuffer, mark, signedData.data);
if (input.GetSECItem(siBuffer, mark, signedData.data) != der::Success) {
return der::Failure;
}
if (der::Nested(input, der::SEQUENCE,
bind(der::AlgorithmIdentifier, _1,
@ -503,7 +523,9 @@ BasicResponse(der::Input& input, Context& context)
return der::Failure;
}
input.GetSECItem(siBuffer, mark, certs[numCerts]);
if (input.GetSECItem(siBuffer, mark, certs[numCerts]) != der::Success) {
return der::Failure;
}
++numCerts;
}
}
@ -774,8 +796,7 @@ CertID(der::Input& input, const Context& context, /*out*/ bool& match)
// "The hash shall be calculated over the DER encoding of the
// issuer's name field in the certificate being checked."
uint8_t hashBuf[SHA1_LENGTH];
if (PK11_HashBuf(SEC_OID_SHA1, hashBuf, cert.derIssuer.data,
cert.derIssuer.len) != SECSuccess) {
if (HashBuf(cert.derIssuer, hashBuf, sizeof(hashBuf)) != der::Success) {
return der::Failure;
}
if (memcmp(hashBuf, issuerNameHash.data, issuerNameHash.len)) {
@ -784,17 +805,17 @@ CertID(der::Input& input, const Context& context, /*out*/ bool& match)
return der::Success;
}
return MatchIssuerKey(issuerKeyHash, issuerCert, match);
return MatchKeyHash(issuerKeyHash, issuerCert, match);
}
// From http://tools.ietf.org/html/rfc6960#section-4.1.1:
// "The hash shall be calculated over the value (excluding tag and length) of
// the subject public key field in the issuer's certificate."
static der::Result
MatchIssuerKey(const SECItem& issuerKeyHash, const CERTCertificate& issuer,
/*out*/ bool& match)
MatchKeyHash(const SECItem& keyHash, const CERTCertificate& cert,
/*out*/ bool& match)
{
if (issuerKeyHash.len != SHA1_LENGTH) {
if (keyHash.len != SHA1_LENGTH) {
return der::Fail(SEC_ERROR_OCSP_MALFORMED_RESPONSE);
}
@ -803,15 +824,15 @@ MatchIssuerKey(const SECItem& issuerKeyHash, const CERTCertificate& issuer,
// Copy just the length and data pointer (nothing needs to be freed) of the
// subject public key so we can convert the length from bits to bytes, which
// is what the digest function expects.
SECItem spk = issuer.subjectPublicKeyInfo.subjectPublicKey;
SECItem spk = cert.subjectPublicKeyInfo.subjectPublicKey;
DER_ConvertBitString(&spk);
static uint8_t hashBuf[SHA1_LENGTH];
if (PK11_HashBuf(SEC_OID_SHA1, hashBuf, spk.data, spk.len) != SECSuccess) {
if (HashBuf(spk, hashBuf, sizeof(hashBuf)) != der::Success) {
return der::Failure;
}
match = !memcmp(hashBuf, issuerKeyHash.data, issuerKeyHash.len);
match = !memcmp(hashBuf, keyHash.data, keyHash.len);
return der::Success;
}
@ -944,11 +965,11 @@ CreateEncodedOCSPRequest(PLArenaPool* arena,
}
uint8_t* d = encodedRequest->data;
*d++ = 0x30; *d++ = totalLen - 2; // OCSPRequest (SEQUENCE)
*d++ = 0x30; *d++ = totalLen - 4; // tbsRequest (SEQUENCE)
*d++ = 0x30; *d++ = totalLen - 6; // requestList (SEQUENCE OF)
*d++ = 0x30; *d++ = totalLen - 8; // Request (SEQUENCE)
*d++ = 0x30; *d++ = totalLen - 10; // reqCert (CertID SEQUENCE)
*d++ = 0x30; *d++ = totalLen - 2u; // OCSPRequest (SEQUENCE)
*d++ = 0x30; *d++ = totalLen - 4u; // tbsRequest (SEQUENCE)
*d++ = 0x30; *d++ = totalLen - 6u; // requestList (SEQUENCE OF)
*d++ = 0x30; *d++ = totalLen - 8u; // Request (SEQUENCE)
*d++ = 0x30; *d++ = totalLen - 10u; // reqCert (CertID SEQUENCE)
// reqCert.hashAlgorithm
for (size_t i = 0; i < PR_ARRAY_SIZE(hashAlgorithm); ++i) {
@ -958,8 +979,7 @@ CreateEncodedOCSPRequest(PLArenaPool* arena,
// reqCert.issuerNameHash (OCTET STRING)
*d++ = 0x04;
*d++ = hashLen;
if (PK11_HashBuf(SEC_OID_SHA1, d, issuerCert->derSubject.data,
issuerCert->derSubject.len) != SECSuccess) {
if (HashBuf(issuerCert->derSubject, d, hashLen) != der::Success) {
return nullptr;
}
d += hashLen;
@ -969,7 +989,7 @@ CreateEncodedOCSPRequest(PLArenaPool* arena,
*d++ = hashLen;
SECItem key = issuerCert->subjectPublicKeyInfo.subjectPublicKey;
DER_ConvertBitString(&key);
if (PK11_HashBuf(SEC_OID_SHA1, d, key.data, key.len) != SECSuccess) {
if (HashBuf(key, d, hashLen) != der::Success) {
return nullptr;
}
d += hashLen;

View File

@ -22,6 +22,17 @@ TEST_DIRS += [
'test/lib',
]
CXXFLAGS += ['-Wall']
# -Wall with Visual C++ enables too many problematic warnings
if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'windows':
CXXFLAGS += [
'-wd4514', # 'function': unreferenced inline function has been removed
'-wd4668', # 'symbol' is not defined as a preprocessor macro...
'-wd4710', # 'function': function not inlined
'-wd4711', # function 'function' selected for inline expansion
'-wd4820', # 'bytes' bytes padding added after construct 'member_name'
]
FAIL_ON_WARNINGS = True
LIBRARY_NAME = 'mozillapkix'

View File

@ -417,13 +417,32 @@ TEST_F(pkixder_input_tests, MarkAndGetSECItem)
SECItem item;
memset(&item, 0x00, sizeof item);
ASSERT_TRUE(input.GetSECItem(siBuffer, mark, item));
ASSERT_EQ(Success, input.GetSECItem(siBuffer, mark, item));
ASSERT_EQ(siBuffer, item.type);
ASSERT_EQ(sizeof expectedItemData, item.len);
ASSERT_TRUE(item.data);
ASSERT_EQ(0, memcmp(item.data, expectedItemData, sizeof expectedItemData));
}
// Cannot run this test on debug builds because of the PR_NOT_REACHED
#ifndef DEBUG
TEST_F(pkixder_input_tests, MarkAndGetSECItemDifferentInput)
{
Input input;
const uint8_t der[] = { 0x11, 0x22, 0x33, 0x44 };
ASSERT_EQ(Success, input.Init(der, sizeof der));
Input another;
Input::Mark mark = another.GetMark();
ASSERT_EQ(Success, input.Skip(3));
SECItem item;
ASSERT_EQ(Failure, input.GetSECItem(siBuffer, mark, item));
ASSERT_EQ(SEC_ERROR_INVALID_ARGS, PR_GetError());
}
#endif
TEST_F(pkixder_input_tests, ExpectTagAndLength)
{
Input input;

View File

@ -15,18 +15,77 @@
* limitations under the License.
*/
#include "pkixcheck.h"
#include "pkixder.h"
#include "pkixtestutil.h"
#include <cerrno>
#include <limits>
#include <new>
#include "cryptohi.h"
#include "hasht.h"
#include "pk11pub.h"
#include "pkixcheck.h"
#include "pkixder.h"
#include "prinit.h"
#include "prprf.h"
#include "secder.h"
using namespace std;
namespace mozilla { namespace pkix { namespace test {
const PRTime ONE_DAY = PRTime(24) * PRTime(60) * PRTime(60) * PR_USEC_PER_SEC;
namespace {
inline void
deleteCharArray(char* chars)
{
delete[] chars;
}
} // unnamed namespace
FILE*
OpenFile(const char* dir, const char* filename, const char* mode)
{
PR_ASSERT(dir);
PR_ASSERT(*dir);
PR_ASSERT(filename);
PR_ASSERT(*filename);
ScopedPtr<char, deleteCharArray>
path(new (nothrow) char[strlen(dir) + 1 + strlen(filename) + 1]);
if (!path) {
PR_SetError(SEC_ERROR_NO_MEMORY, 0);
return nullptr;
}
strcpy(path.get(), dir);
strcat(path.get(), "/");
strcat(path.get(), filename);
ScopedFILE file;
#ifdef _MSC_VER
{
FILE* rawFile;
errno_t error = fopen_s(&rawFile, path.get(), mode);
if (error) {
// TODO: map error to NSPR error code
PR_SetError(PR_FILE_NOT_FOUND_ERROR, error);
rawFile = nullptr;
}
file = rawFile;
}
#else
file = fopen(path.get(), mode);
if (!file) {
// TODO: map errno to NSPR error code
PR_SetError(PR_FILE_NOT_FOUND_ERROR, errno);
}
#endif
return file.release();
}
class Output
{
public:
@ -102,7 +161,7 @@ private:
}
}
static const size_t MaxSequenceItems = 5;
static const size_t MaxSequenceItems = 10;
const SECItem* contents[MaxSequenceItems];
size_t numItems;
size_t length;
@ -116,25 +175,24 @@ OCSPResponseContext::OCSPResponseContext(PLArenaPool* arena,
PRTime time)
: arena(arena)
, cert(CERT_DupCertificate(cert))
, issuerCert(nullptr)
, signerCert(nullptr)
, responseStatus(0)
, responseStatus(successful)
, skipResponseBytes(false)
, issuerNameDER(nullptr)
, issuerSPKI(nullptr)
, signerNameDER(nullptr)
, producedAt(time)
, extensions(nullptr)
, includeEmptyExtensions(false)
, badSignature(false)
, certs(nullptr)
, certIDHashAlg(SEC_OID_SHA1)
, certStatus(good)
, revocationTime(0)
, thisUpdate(time)
, nextUpdate(time + 10 * PR_USEC_PER_SEC)
, includeNextUpdate(true)
, certIDHashAlg(SEC_OID_SHA1)
, certStatus(0)
, revocationTime(0)
, badSignature(false)
, responderIDType(ByKeyHash)
, extensions(nullptr)
, includeEmptyExtensions(false)
{
for (size_t i = 0; i < MaxIncludedCertificates; i++) {
includedCertificates[i] = nullptr;
}
}
static SECItem* ResponseBytes(OCSPResponseContext& context);
@ -145,10 +203,9 @@ static SECItem* KeyHash(OCSPResponseContext& context);
static SECItem* SingleResponse(OCSPResponseContext& context);
static SECItem* CertID(OCSPResponseContext& context);
static SECItem* CertStatus(OCSPResponseContext& context);
static SECItem* Certificates(OCSPResponseContext& context);
static SECItem*
EncodeNested(PLArenaPool* arena, uint8_t tag, SECItem* inner)
EncodeNested(PLArenaPool* arena, uint8_t tag, const SECItem* inner)
{
Output output;
if (output.Add(inner) != der::Success) {
@ -198,10 +255,10 @@ HashedOctetString(PLArenaPool* arena, const SECItem* bytes, SECOidTag hashAlg)
}
static SECItem*
KeyHashHelper(PLArenaPool* arena, const CERTCertificate* cert)
KeyHashHelper(PLArenaPool* arena, const CERTSubjectPublicKeyInfo* spki)
{
// We only need a shallow copy here.
SECItem spk = cert->subjectPublicKeyInfo.subjectPublicKey;
SECItem spk = spki->subjectPublicKey;
DER_ConvertBitString(&spk); // bits to bytes
return HashedOctetString(arena, &spk, SEC_OID_SHA1);
}
@ -229,24 +286,597 @@ AlgorithmIdentifier(PLArenaPool* arena, SECOidTag algTag)
}
static SECItem*
PRTimeToEncodedTime(PLArenaPool* arena, PRTime time)
BitString(PLArenaPool* arena, const SECItem* rawBytes, bool corrupt)
{
SECItem derTime;
if (DER_TimeToGeneralizedTimeArena(arena, &derTime, time) != SECSuccess) {
// We have to add a byte at the beginning indicating no unused bits.
// TODO: add ability to have bit strings of bit length not divisible by 8,
// resulting in unused bits in the bitstring encoding
SECItem* prefixed = SECITEM_AllocItem(arena, nullptr, rawBytes->len + 1);
if (!prefixed) {
return nullptr;
}
return EncodeNested(arena, der::GENERALIZED_TIME, &derTime);
prefixed->data[0] = 0;
memcpy(prefixed->data + 1, rawBytes->data, rawBytes->len);
if (corrupt) {
PR_ASSERT(prefixed->len > 8);
prefixed->data[8]++;
}
return EncodeNested(arena, der::BIT_STRING, prefixed);
}
static SECItem*
Boolean(PLArenaPool* arena, bool value)
{
PR_ASSERT(arena);
SECItem* result(SECITEM_AllocItem(arena, nullptr, 3));
if (!result) {
return nullptr;
}
result->data[0] = der::BOOLEAN;
result->data[1] = 1; // length
result->data[2] = value ? 0xff : 0x00;
return result;
}
static SECItem*
Integer(PLArenaPool* arena, long value)
{
if (value < 0 || value > 127) {
// TODO: add encoding of larger values
PR_SetError(PR_NOT_IMPLEMENTED_ERROR, 0);
return nullptr;
}
SECItem* encoded = SECITEM_AllocItem(arena, nullptr, 3);
if (!encoded) {
return nullptr;
}
encoded->data[0] = der::INTEGER;
encoded->data[1] = 1; // length
encoded->data[2] = value;
return encoded;
}
static SECItem*
OID(PLArenaPool* arena, SECOidTag tag)
{
const SECOidData* extnIDData(SECOID_FindOIDByTag(tag));
if (!extnIDData) {
return nullptr;
}
return EncodeNested(arena, der::OIDTag, &extnIDData->oid);
}
enum TimeEncoding { UTCTime = 0, GeneralizedTime = 1 };
// http://tools.ietf.org/html/rfc5280#section-4.1.2.5
// UTCTime: YYMMDDHHMMSSZ (years 1950-2049 only)
// GeneralizedTime: YYYYMMDDHHMMSSZ
static SECItem*
PRTimeToEncodedTime(PLArenaPool* arena, PRTime time, TimeEncoding encoding)
{
PR_ASSERT(encoding == UTCTime || encoding == GeneralizedTime);
PRExplodedTime exploded;
PR_ExplodeTime(time, PR_GMTParameters, &exploded);
if (exploded.tm_sec >= 60) {
// round down for leap seconds
exploded.tm_sec = 59;
}
if (encoding == UTCTime &&
(exploded.tm_year < 1950 || exploded.tm_year >= 2050)) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
SECItem* derTime = SECITEM_AllocItem(arena, nullptr,
encoding == UTCTime ? 15 : 17);
if (!derTime) {
return nullptr;
}
size_t i = 0;
derTime->data[i++] = encoding == GeneralizedTime ? 0x18 : 0x17; // tag
derTime->data[i++] = static_cast<uint8_t>(derTime->len - 2); // length
if (encoding == GeneralizedTime) {
derTime->data[i++] = '0' + (exploded.tm_year / 1000);
derTime->data[i++] = '0' + ((exploded.tm_year % 1000) / 100);
}
derTime->data[i++] = '0' + ((exploded.tm_year % 100) / 10);
derTime->data[i++] = '0' + (exploded.tm_year % 10);
derTime->data[i++] = '0' + ((exploded.tm_month + 1) / 10);
derTime->data[i++] = '0' + ((exploded.tm_month + 1) % 10);
derTime->data[i++] = '0' + (exploded.tm_mday / 10);
derTime->data[i++] = '0' + (exploded.tm_mday % 10);
derTime->data[i++] = '0' + (exploded.tm_hour / 10);
derTime->data[i++] = '0' + (exploded.tm_hour % 10);
derTime->data[i++] = '0' + (exploded.tm_min / 10);
derTime->data[i++] = '0' + (exploded.tm_min % 10);
derTime->data[i++] = '0' + (exploded.tm_sec / 10);
derTime->data[i++] = '0' + (exploded.tm_sec % 10);
derTime->data[i++] = 'Z';
return derTime;
}
static SECItem*
PRTimeToGeneralizedTime(PLArenaPool* arena, PRTime time)
{
return PRTimeToEncodedTime(arena, time, GeneralizedTime);
}
// http://tools.ietf.org/html/rfc5280#section-4.1.2.5: "CAs conforming to this
// profile MUST always encode certificate validity dates through the year 2049
// as UTCTime; certificate validity dates in 2050 or later MUST be encoded as
// GeneralizedTime." (This is a special case of the rule that we must always
// use the shortest possible encoding.)
static SECItem*
PRTimeToTimeChoice(PLArenaPool* arena, PRTime time)
{
PRExplodedTime exploded;
PR_ExplodeTime(time, PR_GMTParameters, &exploded);
return PRTimeToEncodedTime(arena, time,
(exploded.tm_year >= 1950 && exploded.tm_year < 2050) ? UTCTime
: GeneralizedTime);
}
static SECItem*
SignedData(PLArenaPool* arena, const SECItem* tbsData,
SECKEYPrivateKey* privKey, SECOidTag hashAlg,
bool corrupt, /*optional*/ SECItem const* const* certs)
{
PR_ASSERT(arena);
PR_ASSERT(tbsData);
PR_ASSERT(privKey);
if (!arena || !tbsData || !privKey) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
SECOidTag signatureAlgTag = SEC_GetSignatureAlgorithmOidTag(privKey->keyType,
hashAlg);
if (signatureAlgTag == SEC_OID_UNKNOWN) {
return nullptr;
}
SECItem* signatureAlgorithm = AlgorithmIdentifier(arena, signatureAlgTag);
if (!signatureAlgorithm) {
return nullptr;
}
// SEC_SignData doesn't take an arena parameter, so we have to manage
// the memory allocated in signature.
SECItem signature;
if (SEC_SignData(&signature, tbsData->data, tbsData->len, privKey,
signatureAlgTag) != SECSuccess)
{
return nullptr;
}
// TODO: add ability to have signatures of bit length not divisible by 8,
// resulting in unused bits in the bitstring encoding
SECItem* signatureNested = BitString(arena, &signature, corrupt);
SECITEM_FreeItem(&signature, false);
if (!signatureNested) {
return nullptr;
}
SECItem* certsNested = nullptr;
if (certs) {
Output certsOutput;
while (*certs) {
certsOutput.Add(*certs);
++certs;
}
SECItem* certsSequence = certsOutput.Squash(arena, der::SEQUENCE);
if (!certsSequence) {
return nullptr;
}
certsNested = EncodeNested(arena,
der::CONSTRUCTED | der::CONTEXT_SPECIFIC | 0,
certsSequence);
if (!certsNested) {
return nullptr;
}
}
Output output;
if (output.Add(tbsData) != der::Success) {
return nullptr;
}
if (output.Add(signatureAlgorithm) != der::Success) {
return nullptr;
}
if (output.Add(signatureNested) != der::Success) {
return nullptr;
}
if (certsNested) {
if (output.Add(certsNested) != der::Success) {
return nullptr;
}
}
return output.Squash(arena, der::SEQUENCE);
}
// Extension ::= SEQUENCE {
// extnID OBJECT IDENTIFIER,
// critical BOOLEAN DEFAULT FALSE,
// extnValue OCTET STRING
// -- contains the DER encoding of an ASN.1 value
// -- corresponding to the extension type identified
// -- by extnID
// }
static SECItem*
Extension(PLArenaPool* arena, SECOidTag extnIDTag,
ExtensionCriticality criticality, Output& value)
{
PR_ASSERT(arena);
if (!arena) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
Output output;
const SECItem* extnID(OID(arena, extnIDTag));
if (!extnID) {
return nullptr;
}
if (output.Add(extnID) != der::Success) {
return nullptr;
}
if (criticality == ExtensionCriticality::Critical) {
SECItem* critical(Boolean(arena, true));
if (output.Add(critical) != der::Success) {
return nullptr;
}
}
SECItem* extnValueBytes(value.Squash(arena, der::SEQUENCE));
if (!extnValueBytes) {
return nullptr;
}
SECItem* extnValue(EncodeNested(arena, der::OCTET_STRING, extnValueBytes));
if (!extnValue) {
return nullptr;
}
if (output.Add(extnValue) != der::Success) {
return nullptr;
}
return output.Squash(arena, der::SEQUENCE);
}
SECItem*
MaybeLogOutput(SECItem* result, const char* suffix)
{
PR_ASSERT(suffix);
if (!result) {
return nullptr;
}
// This allows us to more easily debug the generated output, by creating a
// file in the directory given by MOZILLA_PKIX_TEST_LOG_DIR for each
// NOT THREAD-SAFE!!!
const char* logPath = getenv("MOZILLA_PKIX_TEST_LOG_DIR");
if (logPath) {
static int counter = 0;
ScopedPtr<char, PR_smprintf_free>
filename(PR_smprintf("%u-%s.der", counter, suffix));
++counter;
if (filename) {
ScopedFILE file(OpenFile(logPath, filename.get(), "wb"));
if (file) {
(void) fwrite(result->data, result->len, 1, file.get());
}
}
}
return result;
}
///////////////////////////////////////////////////////////////////////////////
// Key Pairs
SECStatus
GenerateKeyPair(/*out*/ ScopedSECKEYPublicKey& publicKey,
/*out*/ ScopedSECKEYPrivateKey& privateKey)
{
ScopedPtr<PK11SlotInfo, PK11_FreeSlot> slot(PK11_GetInternalSlot());
if (!slot) {
return SECFailure;
}
PK11RSAGenParams params;
params.keySizeInBits = 2048;
params.pe = 3;
SECKEYPublicKey* publicKeyTemp = nullptr;
privateKey = PK11_GenerateKeyPair(slot.get(), CKM_RSA_PKCS_KEY_PAIR_GEN,
&params, &publicKeyTemp, false, true,
nullptr);
if (!privateKey) {
PR_ASSERT(!publicKeyTemp);
return SECFailure;
}
publicKey = publicKeyTemp;
PR_ASSERT(publicKey);
return SECSuccess;
}
///////////////////////////////////////////////////////////////////////////////
// Certificates
static SECItem* TBSCertificate(PLArenaPool* arena, long version,
long serialNumber, SECOidTag signature,
const SECItem* issuer, PRTime notBefore,
PRTime notAfter, const SECItem* subject,
const SECKEYPublicKey* subjectPublicKey,
/*optional*/ SECItem const* const* extensions);
// Certificate ::= SEQUENCE {
// tbsCertificate TBSCertificate,
// signatureAlgorithm AlgorithmIdentifier,
// signatureValue BIT STRING }
SECItem*
CreateEncodedCertificate(PLArenaPool* arena, long version,
SECOidTag signature, long serialNumber,
const SECItem* issuerNameDER, PRTime notBefore,
PRTime notAfter, const SECItem* subjectNameDER,
/*optional*/ SECItem const* const* extensions,
/*optional*/ SECKEYPrivateKey* issuerPrivateKey,
SECOidTag signatureHashAlg,
/*out*/ ScopedSECKEYPrivateKey& privateKey)
{
PR_ASSERT(arena);
PR_ASSERT(issuerNameDER);
PR_ASSERT(subjectNameDER);
if (!arena || !issuerNameDER || !subjectNameDER) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
ScopedSECKEYPublicKey publicKey;
if (GenerateKeyPair(publicKey, privateKey) != SECSuccess) {
return nullptr;
}
SECItem* tbsCertificate(TBSCertificate(arena, version, serialNumber,
signature, issuerNameDER, notBefore,
notAfter, subjectNameDER,
publicKey.get(), extensions));
if (!tbsCertificate) {
return nullptr;
}
return MaybeLogOutput(SignedData(arena, tbsCertificate,
issuerPrivateKey ? issuerPrivateKey
: privateKey.get(),
signatureHashAlg, false, nullptr), "cert");
}
// TBSCertificate ::= SEQUENCE {
// version [0] Version DEFAULT v1,
// serialNumber CertificateSerialNumber,
// signature AlgorithmIdentifier,
// issuer Name,
// validity Validity,
// subject Name,
// subjectPublicKeyInfo SubjectPublicKeyInfo,
// issuerUniqueID [1] IMPLICIT UniqueIdentifier OPTIONAL,
// -- If present, version MUST be v2 or v3
// subjectUniqueID [2] IMPLICIT UniqueIdentifier OPTIONAL,
// -- If present, version MUST be v2 or v3
// extensions [3] Extensions OPTIONAL
// -- If present, version MUST be v3 -- }
static SECItem*
TBSCertificate(PLArenaPool* arena, long versionValue,
long serialNumberValue, SECOidTag signatureOidTag,
const SECItem* issuer, PRTime notBeforeTime,
PRTime notAfterTime, const SECItem* subject,
const SECKEYPublicKey* subjectPublicKey,
/*optional*/ SECItem const* const* extensions)
{
PR_ASSERT(arena);
PR_ASSERT(issuer);
PR_ASSERT(subject);
PR_ASSERT(subjectPublicKey);
if (!arena || !issuer || !subject || !subjectPublicKey) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
Output output;
if (versionValue != der::v1) {
SECItem* versionInteger(Integer(arena, versionValue));
if (!versionInteger) {
return nullptr;
}
SECItem* version(EncodeNested(arena,
der::CONSTRUCTED | der::CONTEXT_SPECIFIC | 0,
versionInteger));
if (!version) {
return nullptr;
}
if (output.Add(version) != der::Success) {
return nullptr;
}
}
SECItem* serialNumber(Integer(arena, serialNumberValue));
if (!serialNumber) {
return nullptr;
}
if (output.Add(serialNumber) != der::Success) {
return nullptr;
}
SECItem* signature(AlgorithmIdentifier(arena, signatureOidTag));
if (!signature) {
return nullptr;
}
if (output.Add(signature) != der::Success) {
return nullptr;
}
if (output.Add(issuer) != der::Success) {
return nullptr;
}
// Validity ::= SEQUENCE {
// notBefore Time,
// notAfter Time }
SECItem* validity;
{
SECItem* notBefore(PRTimeToTimeChoice(arena, notBeforeTime));
if (!notBefore) {
return nullptr;
}
SECItem* notAfter(PRTimeToTimeChoice(arena, notAfterTime));
if (!notAfter) {
return nullptr;
}
Output validityOutput;
if (validityOutput.Add(notBefore) != der::Success) {
return nullptr;
}
if (validityOutput.Add(notAfter) != der::Success) {
return nullptr;
}
validity = validityOutput.Squash(arena, der::SEQUENCE);
if (!validity) {
return nullptr;
}
}
if (output.Add(validity) != der::Success) {
return nullptr;
}
if (output.Add(subject) != der::Success) {
return nullptr;
}
// SubjectPublicKeyInfo ::= SEQUENCE {
// algorithm AlgorithmIdentifier,
// subjectPublicKey BIT STRING }
ScopedSECItem subjectPublicKeyInfo(
SECKEY_EncodeDERSubjectPublicKeyInfo(subjectPublicKey));
if (!subjectPublicKeyInfo) {
return nullptr;
}
if (output.Add(subjectPublicKeyInfo.get()) != der::Success) {
return nullptr;
}
if (extensions) {
Output extensionsOutput;
while (*extensions) {
if (extensionsOutput.Add(*extensions) != der::Success) {
return nullptr;
}
++extensions;
}
SECItem* allExtensions(extensionsOutput.Squash(arena, der::SEQUENCE));
if (!allExtensions) {
return nullptr;
}
SECItem* extensionsWrapped(
EncodeNested(arena, der::CONSTRUCTED | der::CONTEXT_SPECIFIC | 3,
allExtensions));
if (!extensions) {
return nullptr;
}
if (output.Add(extensionsWrapped) != der::Success) {
return nullptr;
}
}
return output.Squash(arena, der::SEQUENCE);
}
// BasicConstraints ::= SEQUENCE {
// cA BOOLEAN DEFAULT FALSE,
// pathLenConstraint INTEGER (0..MAX) OPTIONAL }
SECItem*
CreateEncodedBasicConstraints(PLArenaPool* arena, bool isCA,
long pathLenConstraintValue,
ExtensionCriticality criticality)
{
PR_ASSERT(arena);
if (!arena) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
Output value;
if (isCA) {
if (value.Add(Boolean(arena, true)) != der::Success) {
return nullptr;
}
}
SECItem* pathLenConstraint(Integer(arena, pathLenConstraintValue));
if (!pathLenConstraint) {
return nullptr;
}
if (value.Add(pathLenConstraint) != der::Success) {
return nullptr;
}
return Extension(arena, SEC_OID_X509_BASIC_CONSTRAINTS, criticality, value);
}
// ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId
// KeyPurposeId ::= OBJECT IDENTIFIER
SECItem*
CreateEncodedEKUExtension(PLArenaPool* arena, SECOidTag const* ekus,
size_t ekusCount, ExtensionCriticality criticality)
{
PR_ASSERT(arena);
PR_ASSERT(ekus);
if (!arena || (!ekus && ekusCount != 0)) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
Output value;
for (size_t i = 0; i < ekusCount; ++i) {
SECItem* encodedEKUOID = OID(arena, ekus[i]);
if (!encodedEKUOID) {
return nullptr;
}
if (value.Add(encodedEKUOID) != der::Success) {
return nullptr;
}
}
return Extension(arena, SEC_OID_X509_EXT_KEY_USAGE, criticality, value);
}
///////////////////////////////////////////////////////////////////////////////
// OCSP responses
SECItem*
CreateEncodedOCSPResponse(OCSPResponseContext& context)
{
if (!context.arena || !context.cert || !context.issuerCert ||
!context.signerCert) {
if (!context.arena) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
if (!context.skipResponseBytes) {
if (!context.cert || !context.issuerNameDER || !context.issuerSPKI ||
!context.signerPrivateKey) {
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
return nullptr;
}
}
// OCSPResponse ::= SEQUENCE {
// responseStatus OCSPResponseStatus,
// responseBytes [0] EXPLICIT ResponseBytes OPTIONAL }
@ -293,7 +923,7 @@ CreateEncodedOCSPResponse(OCSPResponseContext& context)
return nullptr;
}
}
return output.Squash(context.arena, der::SEQUENCE);
return MaybeLogOutput(output.Squash(context.arena, der::SEQUENCE), "ocsp");
}
// ResponseBytes ::= SEQUENCE {
@ -344,84 +974,10 @@ BasicOCSPResponse(OCSPResponseContext& context)
return nullptr;
}
pkix::ScopedPtr<SECKEYPrivateKey, SECKEY_DestroyPrivateKey> privKey(
PK11_FindKeyByAnyCert(context.signerCert.get(), nullptr));
if (!privKey) {
return nullptr;
}
SECOidTag signatureAlgTag = SEC_GetSignatureAlgorithmOidTag(privKey->keyType,
SEC_OID_SHA1);
if (signatureAlgTag == SEC_OID_UNKNOWN) {
return nullptr;
}
SECItem* signatureAlgorithm = AlgorithmIdentifier(context.arena,
signatureAlgTag);
if (!signatureAlgorithm) {
return nullptr;
}
// SEC_SignData doesn't take an arena parameter, so we have to manage
// the memory allocated in signature.
SECItem signature;
if (SEC_SignData(&signature, tbsResponseData->data, tbsResponseData->len,
privKey.get(), signatureAlgTag) != SECSuccess)
{
return nullptr;
}
// We have to add a byte at the beginning indicating no unused bits.
// TODO: add ability to have signatures of bit length not divisible by 8,
// resulting in unused bits in the bitstring encoding
SECItem* prefixedSignature = SECITEM_AllocItem(context.arena, nullptr,
signature.len + 1);
if (!prefixedSignature) {
SECITEM_FreeItem(&signature, false);
return nullptr;
}
prefixedSignature->data[0] = 0;
memcpy(prefixedSignature->data + 1, signature.data, signature.len);
SECITEM_FreeItem(&signature, false);
if (context.badSignature) {
PR_ASSERT(prefixedSignature->len > 8);
prefixedSignature->data[8]++;
}
SECItem* signatureNested = EncodeNested(context.arena, der::BIT_STRING,
prefixedSignature);
if (!signatureNested) {
return nullptr;
}
SECItem* certificatesNested = nullptr;
if (context.includedCertificates[0]) {
SECItem* certificates = Certificates(context);
if (!certificates) {
return nullptr;
}
certificatesNested = EncodeNested(context.arena,
der::CONSTRUCTED |
der::CONTEXT_SPECIFIC |
0,
certificates);
if (!certificatesNested) {
return nullptr;
}
}
Output output;
if (output.Add(tbsResponseData) != der::Success) {
return nullptr;
}
if (output.Add(signatureAlgorithm) != der::Success) {
return nullptr;
}
if (output.Add(signatureNested) != der::Success) {
return nullptr;
}
if (certificatesNested) {
if (output.Add(certificatesNested) != der::Success) {
return nullptr;
}
}
return output.Squash(context.arena, der::SEQUENCE);
// TODO(bug 980538): certs
return SignedData(context.arena, tbsResponseData,
context.signerPrivateKey.get(), SEC_OID_SHA1,
context.badSignature, context.certs);
}
// Extension ::= SEQUENCE {
@ -499,8 +1055,8 @@ ResponseData(OCSPResponseContext& context)
if (!responderID) {
return nullptr;
}
SECItem* producedAtEncoded = PRTimeToEncodedTime(context.arena,
context.producedAt);
SECItem* producedAtEncoded = PRTimeToGeneralizedTime(context.arena,
context.producedAt);
if (!producedAtEncoded) {
return nullptr;
}
@ -543,22 +1099,23 @@ ResponseData(OCSPResponseContext& context)
SECItem*
ResponderID(OCSPResponseContext& context)
{
SECItem* contents = nullptr;
if (context.responderIDType == OCSPResponseContext::ByName) {
contents = &context.signerCert->derSubject;
} else if (context.responderIDType == OCSPResponseContext::ByKeyHash) {
contents = KeyHash(context);
if (!contents) {
return nullptr;
}
const SECItem* contents;
uint8_t responderIDType;
if (context.signerNameDER) {
contents = context.signerNameDER;
responderIDType = 1; // byName
} else {
contents = KeyHash(context);
responderIDType = 2; // byKey
}
if (!contents) {
return nullptr;
}
return EncodeNested(context.arena,
der::CONSTRUCTED |
der::CONTEXT_SPECIFIC |
context.responderIDType,
responderIDType,
contents);
}
@ -570,7 +1127,17 @@ ResponderID(OCSPResponseContext& context)
SECItem*
KeyHash(OCSPResponseContext& context)
{
return KeyHashHelper(context.arena, context.signerCert.get());
ScopedSECKEYPublicKey
signerPublicKey(SECKEY_ConvertToPublicKey(context.signerPrivateKey.get()));
if (!signerPublicKey) {
return nullptr;
}
ScopedPtr<CERTSubjectPublicKeyInfo, SECKEY_DestroySubjectPublicKeyInfo>
signerSPKI(SECKEY_CreateSubjectPublicKeyInfo(signerPublicKey.get()));
if (!signerSPKI) {
return nullptr;
}
return KeyHashHelper(context.arena, signerSPKI.get());
}
// SingleResponse ::= SEQUENCE {
@ -590,15 +1157,15 @@ SingleResponse(OCSPResponseContext& context)
if (!certStatus) {
return nullptr;
}
SECItem* thisUpdateEncoded = PRTimeToEncodedTime(context.arena,
context.thisUpdate);
SECItem* thisUpdateEncoded = PRTimeToGeneralizedTime(context.arena,
context.thisUpdate);
if (!thisUpdateEncoded) {
return nullptr;
}
SECItem* nextUpdateEncodedNested = nullptr;
if (context.includeNextUpdate) {
SECItem* nextUpdateEncoded = PRTimeToEncodedTime(context.arena,
context.nextUpdate);
SECItem* nextUpdateEncoded = PRTimeToGeneralizedTime(context.arena,
context.nextUpdate);
if (!nextUpdateEncoded) {
return nullptr;
}
@ -644,13 +1211,12 @@ CertID(OCSPResponseContext& context)
return nullptr;
}
SECItem* issuerNameHash = HashedOctetString(context.arena,
&context.issuerCert->derSubject,
context.issuerNameDER,
context.certIDHashAlg);
if (!issuerNameHash) {
return nullptr;
}
SECItem* issuerKeyHash = KeyHashHelper(context.arena,
context.issuerCert.get());
SECItem* issuerKeyHash = KeyHashHelper(context.arena, context.issuerSPKI);
if (!issuerKeyHash) {
return nullptr;
}
@ -711,8 +1277,8 @@ CertStatus(OCSPResponseContext& context)
}
case 1:
{
SECItem* revocationTime = PRTimeToEncodedTime(context.arena,
context.revocationTime);
SECItem* revocationTime = PRTimeToGeneralizedTime(context.arena,
context.revocationTime);
if (!revocationTime) {
return nullptr;
}
@ -728,19 +1294,4 @@ CertStatus(OCSPResponseContext& context)
return nullptr;
}
// SEQUENCE OF Certificate
SECItem*
Certificates(OCSPResponseContext& context)
{
Output output;
for (size_t i = 0; i < context.MaxIncludedCertificates; i++) {
CERTCertificate* cert = context.includedCertificates[i].get();
if (!cert) {
break;
}
output.Add(&cert->derCert);
}
return output.Squash(context.arena, der::SEQUENCE);
}
} } } // namespace mozilla::pkix::test

View File

@ -18,12 +18,86 @@
#ifndef mozilla_pkix_test__pkixtestutils_h
#define mozilla_pkix_test__pkixtestutils_h
#include "pkix/ScopedPtr.h"
#include <stdint.h>
#include <stdio.h>
#include "pkix/enumclass.h"
#include "pkix/pkixtypes.h"
#include "pkix/ScopedPtr.h"
#include "seccomon.h"
namespace mozilla { namespace pkix { namespace test {
namespace {
inline void
fclose_void(FILE* file) {
(void) fclose(file);
}
inline void
SECITEM_FreeItem_true(SECItem* item)
{
SECITEM_FreeItem(item, true);
}
} // unnamed namespace
typedef mozilla::pkix::ScopedPtr<FILE, fclose_void> ScopedFILE;
typedef mozilla::pkix::ScopedPtr<SECItem, SECITEM_FreeItem_true> ScopedSECItem;
typedef mozilla::pkix::ScopedPtr<SECKEYPrivateKey, SECKEY_DestroyPrivateKey>
ScopedSECKEYPrivateKey;
FILE* OpenFile(const char* dir, const char* filename, const char* mode);
extern const PRTime ONE_DAY;
SECStatus GenerateKeyPair(/*out*/ ScopedSECKEYPublicKey& publicKey,
/*out*/ ScopedSECKEYPrivateKey& privateKey);
///////////////////////////////////////////////////////////////////////////////
// Encode Certificates
enum Version { v1 = 0, v2 = 1, v3 = 2 };
// If extensions is null, then no extensions will be encoded. Otherwise,
// extensions must point to a null-terminated array of SECItem*. If the first
// item of the array is null then an empty Extensions sequence will be encoded.
//
// If issuerPrivateKey is null, then the certificate will be self-signed.
// Parameter order is based on the order of the attributes of the certificate
// in RFC 5280.
//
// The return value, if non-null, is owned by the arena in the context and
// MUST NOT be freed.
SECItem* CreateEncodedCertificate(PLArenaPool* arena, long version,
SECOidTag signature, long serialNumber,
const char* issuerASCII, PRTime notBefore,
PRTime notAfter, const char* subjectASCII,
/*optional*/ SECItem const* const* extensions,
/*optional*/ SECKEYPrivateKey* issuerPrivateKey,
SECOidTag signatureHashAlg,
/*out*/ ScopedSECKEYPrivateKey& privateKey);
MOZILLA_PKIX_ENUM_CLASS ExtensionCriticality { NotCritical = 0, Critical = 1 };
// The return value, if non-null, is owned by the arena and MUST NOT be freed.
SECItem* CreateEncodedBasicConstraints(PLArenaPool* arena, bool isCA,
long pathLenConstraint,
ExtensionCriticality criticality);
// ekus must be non-null and must must point to a SEC_OID_UNKNOWN-terminated
// array of SECOidTags. If the first item of the array is SEC_OID_UNKNOWN then
// an empty EKU extension will be encoded.
//
// The return value, if non-null, is owned by the arena and MUST NOT be freed.
SECItem* CreateEncodedEKUExtension(PLArenaPool* arena,
const SECOidTag* ekus, size_t ekusCount,
ExtensionCriticality criticality);
///////////////////////////////////////////////////////////////////////////////
// Encode OCSP responses
class OCSPResponseExtension
{
public:
@ -41,35 +115,62 @@ public:
PLArenaPool* arena;
// TODO(bug 980538): add a way to specify what certificates are included.
pkix::ScopedCERTCertificate cert; // The subject of the OCSP response
pkix::ScopedCERTCertificate issuerCert; // The issuer of the subject
pkix::ScopedCERTCertificate signerCert; // This cert signs the response
uint8_t responseStatus; // See the OCSPResponseStatus enum in rfc 6960
// The fields below are in the order that they appear in an OCSP response.
// By directly using the issuer name & SPKI and signer name & private key,
// instead of extracting those things out of CERTCertificate objects, we
// avoid poor interactions with the NSS CERTCertificate caches. In
// particular, there are some tests in which it is important that we know
// that the issuer and/or signer certificates are NOT in the NSS caches
// because we ant to make sure that our path building logic will find them
// or we want to test what happens when those certificates cannot be found.
// This concern doesn't apply to |cert| above because our verification code
// for certificate chains and for OCSP responses take the end-entity cert
// as a CERTCertificate anyway.
enum OCSPResponseStatus {
successful = 0,
malformedRequest = 1,
internalError = 2,
tryLater = 3,
// 4 is not used
sigRequired = 5,
unauthorized = 6,
};
uint8_t responseStatus; // an OCSPResponseStatus or an invalid value
bool skipResponseBytes; // If true, don't include responseBytes
static const uint32_t MaxIncludedCertificates = 4;
pkix::ScopedCERTCertificate includedCertificates[MaxIncludedCertificates];
// responderID
const SECItem* issuerNameDER; // non-owning
const CERTSubjectPublicKeyInfo* issuerSPKI; // non-owning pointer
const SECItem* signerNameDER; // If set, responderID will use the byName
// form; otherwise responderID will use the
// byKeyHash form.
// The following fields are on a per-SingleResponse basis. In the future we
// may support including multiple SingleResponses per response.
PRTime producedAt;
PRTime thisUpdate;
PRTime nextUpdate;
bool includeNextUpdate;
SECOidTag certIDHashAlg;
uint8_t certStatus; // See the CertStatus choice in rfc 6960
PRTime revocationTime; // For certStatus == revoked
bool badSignature; // If true, alter the signature to fail verification
enum ResponderIDType {
ByName = 1,
ByKeyHash = 2
};
ResponderIDType responderIDType;
OCSPResponseExtension* extensions;
bool includeEmptyExtensions; // If true, include the extension wrapper
// regardless of if there are any actual
// extensions.
ScopedSECKEYPrivateKey signerPrivateKey;
bool badSignature; // If true, alter the signature to fail verification
SECItem const* const* certs; // non-owning pointer to certs to embed
// The following fields are on a per-SingleResponse basis. In the future we
// may support including multiple SingleResponses per response.
SECOidTag certIDHashAlg;
enum CertStatus {
good = 0,
revoked = 1,
unknown = 2,
};
uint8_t certStatus; // CertStatus or an invalid value
PRTime revocationTime; // For certStatus == revoked
PRTime thisUpdate;
PRTime nextUpdate;
bool includeNextUpdate;
};
// The return value, if non-null, is owned by the arena in the context

View File

@ -31,6 +31,7 @@ unknown = unknown() # singleton
info = {'os': unknown,
'processor': unknown,
'version': unknown,
'os_version': unknown,
'bits': unknown }
(system, node, release, version, machine, processor) = platform.uname()
(bits, linkage) = platform.architecture()
@ -45,28 +46,34 @@ if system in ["Microsoft", "Windows"]:
else:
processor = os.environ.get('PROCESSOR_ARCHITECTURE', processor)
system = os.environ.get("OS", system).replace('_', ' ')
service_pack = os.sys.getwindowsversion()[4]
(major, minor, _, _, service_pack) = os.sys.getwindowsversion()
info['service_pack'] = service_pack
os_version = "%d.%d" % (major, minor)
elif system == "Linux":
if hasattr(platform, "linux_distribution"):
(distro, version, codename) = platform.linux_distribution()
(distro, os_version, codename) = platform.linux_distribution()
else:
(distro, version, codename) = platform.dist()
version = "%s %s" % (distro, version)
(distro, os_version, codename) = platform.dist()
if not processor:
processor = machine
version = "%s %s" % (distro, os_version)
info['os'] = 'linux'
info['linux_distro'] = distro
elif system in ['DragonFly', 'FreeBSD', 'NetBSD', 'OpenBSD']:
info['os'] = 'bsd'
version = sys.platform
version = os_version = sys.platform
elif system == "Darwin":
(release, versioninfo, machine) = platform.mac_ver()
version = "OS X %s" % release
versionNums = release.split('.')[:2]
os_version = "%s.%s" % (versionNums[0], versionNums[1])
info['os'] = 'mac'
elif sys.platform in ('solaris', 'sunos5'):
info['os'] = 'unix'
version = sys.platform
info['version'] = version # os version
os_version = version = sys.platform
info['version'] = version
info['os_version'] = os_version
# processor type and bits
if processor in ["i386", "i686"]:
@ -140,7 +147,7 @@ def find_and_update_from_json(*dirs):
"""
# First, see if we're in an objdir
try:
from mozbuild.base import MozbuildObject
from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
build = MozbuildObject.from_environment()
json_path = _os.path.join(build.topobjdir, "mozinfo.json")
if _os.path.isfile(json_path):
@ -148,6 +155,8 @@ def find_and_update_from_json(*dirs):
return json_path
except ImportError:
pass
except BuildEnvironmentNotFoundException:
pass
for d in dirs:
d = _os.path.abspath(d)

View File

@ -76,6 +76,43 @@ NS_IMPL_CI_INTERFACE_GETTER(nsMultiplexInputStream,
nsIInputStream,
nsISeekableStream)
static nsresult
AvailableMaybeSeek(nsIInputStream *stream, uint64_t *_retval)
{
nsresult rv = stream->Available(_retval);
if (rv == NS_BASE_STREAM_CLOSED) {
// Blindly seek to the current position if Available() returns
// NS_BASE_STREAM_CLOSED.
// If nsIFileInputStream is closed in Read() due to CLOSE_ON_EOF flag,
// Seek() could reopen the file if REOPEN_ON_REWIND flag is set.
nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(stream);
if (seekable) {
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_CUR, 0);
if (NS_SUCCEEDED(rv)) {
rv = stream->Available(_retval);
}
}
}
return rv;
}
static nsresult
TellMaybeSeek(nsISeekableStream *seekable, int64_t *_retval)
{
nsresult rv = seekable->Tell(_retval);
if (rv == NS_BASE_STREAM_CLOSED) {
// Blindly seek to the current position if Tell() returns
// NS_BASE_STREAM_CLOSED.
// If nsIFileInputStream is closed in Read() due to CLOSE_ON_EOF flag,
// Seek() could reopen the file if REOPEN_ON_REWIND flag is set.
nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_CUR, 0);
if (NS_SUCCEEDED(rv)) {
rv = seekable->Tell(_retval);
}
}
return rv;
}
nsMultiplexInputStream::nsMultiplexInputStream()
: mCurrentStream(0),
mStartedReadingCurrent(false),
@ -180,7 +217,7 @@ nsMultiplexInputStream::Available(uint64_t *_retval)
uint32_t len = mStreams.Length();
for (uint32_t i = mCurrentStream; i < len; i++) {
uint64_t streamAvail;
rv = mStreams[i]->Available(&streamAvail);
rv = AvailableMaybeSeek(mStreams[i], &streamAvail);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
avail += streamAvail;
@ -385,7 +422,7 @@ nsMultiplexInputStream::Seek(int32_t aWhence, int64_t aOffset)
streamPos = 0;
}
else {
rv = stream->Tell(&streamPos);
rv = TellMaybeSeek(stream, &streamPos);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
}
@ -409,7 +446,7 @@ nsMultiplexInputStream::Seek(int32_t aWhence, int64_t aOffset)
}
else {
uint64_t avail;
rv = mStreams[i]->Available(&avail);
rv = AvailableMaybeSeek(mStreams[i], &avail);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
@ -442,7 +479,7 @@ nsMultiplexInputStream::Seek(int32_t aWhence, int64_t aOffset)
do_QueryInterface(mStreams[i]);
uint64_t avail;
rv = mStreams[i]->Available(&avail);
rv = AvailableMaybeSeek(mStreams[i], &avail);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
@ -468,7 +505,7 @@ nsMultiplexInputStream::Seek(int32_t aWhence, int64_t aOffset)
do_QueryInterface(mStreams[i]);
int64_t pos;
rv = stream->Tell(&pos);
rv = TellMaybeSeek(stream, &pos);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
@ -520,7 +557,7 @@ nsMultiplexInputStream::Seek(int32_t aWhence, int64_t aOffset)
streamPos = 0;
} else {
uint64_t avail;
rv = mStreams[i]->Available(&avail);
rv = AvailableMaybeSeek(mStreams[i], &avail);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
@ -544,7 +581,7 @@ nsMultiplexInputStream::Seek(int32_t aWhence, int64_t aOffset)
remaining += streamPos;
} else {
int64_t avail;
rv = stream->Tell(&avail);
rv = TellMaybeSeek(stream, &avail);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
@ -590,7 +627,7 @@ nsMultiplexInputStream::Tell(int64_t *_retval)
return NS_ERROR_NO_INTERFACE;
int64_t pos;
rv = stream->Tell(&pos);
rv = TellMaybeSeek(stream, &pos);
if (NS_WARN_IF(NS_FAILED(rv)))
return rv;
ret64 += pos;