Bug 975930 - [LayerScope] All textured buffers show RB swapped which is incorrect. r=jgilbert, r=kamidphish

This commit is contained in:
Morris Tseng 2014-03-11 11:14:50 -04:00
parent 9229a9cf21
commit 39d993582d
3 changed files with 216 additions and 22 deletions

View File

@ -231,6 +231,71 @@ static void SwapRAndBComponents(DataSourceSurface* surf)
}
}
static uint16_t PackRGB565(uint8_t r, uint8_t g, uint8_t b)
{
uint16_t pixel = ((r << 11) & 0xf800) |
((g << 5) & 0x07e0) |
((b ) & 0x001f);
return pixel;
}
static void CopyDataSourceSurface(DataSourceSurface* aSource,
DataSourceSurface* aDest)
{
MOZ_ASSERT(aSource->GetSize() == aDest->GetSize());
MOZ_ASSERT(aSource->GetFormat() == SurfaceFormat::R8G8B8A8 ||
aSource->GetFormat() == SurfaceFormat::R8G8B8X8);
uint8_t *srcRow = aSource->GetData();
size_t srcRowBytes = aSource->GetSize().width * BytesPerPixel(aSource->GetFormat());
size_t srcRowHole = aSource->Stride() - srcRowBytes;
uint8_t *destRow = aDest->GetData();
size_t destRowBytes = aDest->GetSize().width * BytesPerPixel(aDest->GetFormat());
size_t destRowHole = aDest->Stride() - destRowBytes;
bool needsRBSwap = false;
if (aDest->GetFormat() == SurfaceFormat::B8G8R8A8 ||
aDest->GetFormat() == SurfaceFormat::B8G8R8X8 ||
aDest->GetFormat() == SurfaceFormat::R5G6B5) {
needsRBSwap = true;
}
bool needsConvertTo16Bits = false;
if (aDest->GetFormat() == SurfaceFormat::R5G6B5) {
needsConvertTo16Bits = true;
}
size_t rows = aSource->GetSize().height;
while (rows) {
const uint8_t *srcRowEnd = srcRow + srcRowBytes;
while (srcRow != srcRowEnd) {
uint8_t r = needsRBSwap ? srcRow[2] : srcRow[0];
uint8_t g = srcRow[1];
uint8_t b = needsRBSwap ? srcRow[0] : srcRow[2];
uint8_t a = srcRow[3];
if (needsConvertTo16Bits) {
*(uint16_t*)destRow = PackRGB565(r, g, b);
} else {
destRow[0] = r;
destRow[1] = g;
destRow[2] = b;
destRow[3] = a;
}
srcRow += BytesPerPixel(aSource->GetFormat());
destRow += BytesPerPixel(aDest->GetFormat());
}
srcRow += srcRowHole;
destRow += destRowHole;
--rows;
}
}
static int
CalcStride(int width, int pixelSize, int alignment)
{
@ -437,6 +502,134 @@ ReadPixelsIntoImageSurface(GLContext* gl, gfxImageSurface* dest) {
#endif
}
void
ReadPixelsIntoDataSourceSurface(GLContext* gl, DataSourceSurface* dest) {
gl->MakeCurrent();
MOZ_ASSERT(dest->GetSize().width != 0);
MOZ_ASSERT(dest->GetSize().height != 0);
bool hasAlpha = dest->GetFormat() == SurfaceFormat::B8G8R8A8 ||
dest->GetFormat() == SurfaceFormat::R8G8B8A8;
int destPixelSize;
GLenum destFormat;
GLenum destType;
switch (dest->GetFormat()) {
case SurfaceFormat::B8G8R8A8:
case SurfaceFormat::B8G8R8X8:
// Needs host (little) endian ARGB.
destFormat = LOCAL_GL_BGRA;
destType = LOCAL_GL_UNSIGNED_INT_8_8_8_8_REV;
break;
case SurfaceFormat::R8G8B8A8:
case SurfaceFormat::R8G8B8X8:
// Needs host (little) endian ABGR.
destFormat = LOCAL_GL_RGBA;
destType = LOCAL_GL_UNSIGNED_BYTE;
break;
case SurfaceFormat::R5G6B5:
destFormat = LOCAL_GL_RGB;
destType = LOCAL_GL_UNSIGNED_SHORT_5_6_5_REV;
break;
default:
MOZ_CRASH("Bad format.");
}
destPixelSize = BytesPerPixel(dest->GetFormat());
MOZ_ASSERT(dest->GetSize().width * destPixelSize <= dest->Stride());
GLenum readFormat = destFormat;
GLenum readType = destType;
bool needsTempSurf = !GetActualReadFormats(gl,
destFormat, destType,
readFormat, readType);
RefPtr<DataSourceSurface> tempSurf;
DataSourceSurface* readSurf = nullptr;
int readAlignment = 0;
if (needsTempSurf) {
if (gl->DebugMode()) {
NS_WARNING("Needing intermediary surface for ReadPixels. This will be slow!");
}
SurfaceFormat readFormatGFX;
// If needs temp surface, readFormat is always LOCAL_GL_RGBA
// and readType is always LOCAL_GL_UNSIGNED_BYTE
MOZ_ASSERT(readFormat == LOCAL_GL_RGBA);
MOZ_ASSERT(readType == LOCAL_GL_UNSIGNED_BYTE);
readFormatGFX = hasAlpha ? SurfaceFormat::R8G8B8A8
: SurfaceFormat::R8G8B8X8;
readAlignment = 1;
int32_t stride = dest->GetSize().width * BytesPerPixel(readFormatGFX);
tempSurf = Factory::CreateDataSourceSurfaceWithStride(dest->GetSize(),
readFormatGFX,
stride);
readSurf = tempSurf;
} else {
// Figure out alignment. We don't need to know why, we just need it
// to be valid.
readAlignment = GuessAlignment(dest->GetSize().width,
destPixelSize,
dest->Stride());
readSurf = dest;
}
MOZ_ASSERT(readAlignment);
MOZ_ASSERT(reinterpret_cast<uintptr_t>(readSurf->GetData()) % readAlignment == 0);
GLint currentPackAlignment = 0;
gl->fGetIntegerv(LOCAL_GL_PACK_ALIGNMENT, &currentPackAlignment);
if (currentPackAlignment != readAlignment)
gl->fPixelStorei(LOCAL_GL_PACK_ALIGNMENT, readAlignment);
GLsizei width = dest->GetSize().width;
GLsizei height = dest->GetSize().height;
gl->fReadPixels(0, 0,
width, height,
readFormat, readType,
readSurf->GetData());
if (currentPackAlignment != readAlignment)
gl->fPixelStorei(LOCAL_GL_PACK_ALIGNMENT, currentPackAlignment);
if (readSurf != dest) {
MOZ_ASSERT(readFormat == LOCAL_GL_RGBA);
MOZ_ASSERT(readType == LOCAL_GL_UNSIGNED_BYTE);
CopyDataSourceSurface(readSurf, dest);
}
// Check if GL is giving back 1.0 alpha for
// RGBA reads to RGBA images from no-alpha buffers.
#ifdef XP_MACOSX
if (gl->WorkAroundDriverBugs() &&
gl->Vendor() == gl::GLVendor::NVIDIA &&
(dest->GetFormat() == SurfaceFormat::R8G8B8A8 ||
dest->GetFormat() == SurfaceFormat::B8G8R8A8) &&
width && height)
{
GLint alphaBits = 0;
gl->fGetIntegerv(LOCAL_GL_ALPHA_BITS, &alphaBits);
if (!alphaBits) {
const uint32_t alphaMask = gfxPackedPixelNoPreMultiply(0xff,0,0,0);
MOZ_ASSERT(dest->GetSize().width * destPixelSize == dest->Stride());
uint32_t* itr = (uint32_t*)dest->GetData();
uint32_t testPixel = *itr;
if ((testPixel & alphaMask) != alphaMask) {
// We need to set the alpha channel to 1.0 manually.
uint32_t* itrEnd = itr + width*height; // Stride is guaranteed to be width*4.
for (; itr != itrEnd; itr++) {
*itr |= alphaMask;
}
}
}
}
#endif
}
static TemporaryRef<DataSourceSurface> YInvertImageSurface(DataSourceSurface* aSurf)
{
RefPtr<DataSourceSurface> temp =
@ -519,10 +712,10 @@ ReadScreenIntoImageSurface(GLContext* gl, gfxImageSurface* dest)
break; \
}
already_AddRefed<gfxImageSurface>
TemporaryRef<DataSourceSurface>
GLReadTexImageHelper::ReadTexImage(GLuint aTextureId,
GLenum aTextureTarget,
const gfxIntSize& aSize,
const gfx::IntSize& aSize,
/* ShaderConfigOGL.mFeature */ int aConfig,
bool aYInvert)
{
@ -533,10 +726,11 @@ GLReadTexImageHelper::ReadTexImage(GLuint aTextureId,
mGL->MakeCurrent();
/* Allocate resulting image surface */
nsRefPtr<gfxImageSurface> isurf = new gfxImageSurface(aSize, gfxImageFormat::ARGB32);
if (!isurf || isurf->CairoStatus()) {
return nullptr;
}
int32_t stride = aSize.width * BytesPerPixel(SurfaceFormat::R8G8B8A8);
RefPtr<DataSourceSurface> isurf =
Factory::CreateDataSourceSurfaceWithStride(aSize,
SurfaceFormat::R8G8B8A8,
stride);
GLint oldrb, oldfb, oldprog, oldTexUnit, oldTex;
GLuint rb, fb;
@ -640,7 +834,7 @@ GLReadTexImageHelper::ReadTexImage(GLuint aTextureId,
mGL->fDisableVertexAttribArray(0);
/* Read-back draw results */
ReadPixelsIntoImageSurface(mGL, isurf);
ReadPixelsIntoDataSourceSurface(mGL, isurf);
CLEANUP_IF_GLERROR_OCCURRED("when reading pixels into surface");
} while (false);

View File

@ -60,11 +60,11 @@ public:
* NOTE: aShaderProgram is really mozilla::layers::ShaderProgramType. It is
* passed as int to eliminate including LayerManagerOGLProgram.h here.
*/
already_AddRefed<gfxImageSurface> ReadTexImage(GLuint aTextureId,
GLenum aTextureTarget,
const gfxIntSize& aSize,
/* ShaderProgramType */ int aShaderProgram,
bool aYInvert = false);
TemporaryRef<gfx::DataSourceSurface> ReadTexImage(GLuint aTextureId,
GLenum aTextureTarget,
const gfx::IntSize& aSize,
/* ShaderProgramType */ int aShaderProgram,
bool aYInvert = false);
};

View File

@ -421,7 +421,7 @@ public:
class DebugGLTextureData : public DebugGLData {
public:
DebugGLTextureData(GLContext* cx, void* layerRef, GLuint target, GLenum name, gfxImageSurface* img)
DebugGLTextureData(GLContext* cx, void* layerRef, GLuint target, GLenum name, DataSourceSurface* img)
: DebugGLData(DebugGLData::TextureData, cx),
mLayerRef(layerRef),
mTarget(target),
@ -431,7 +431,7 @@ public:
void *GetLayerRef() const { return mLayerRef; }
GLuint GetName() const { return mName; }
gfxImageSurface* GetImage() const { return mImage; }
DataSourceSurface* GetImage() const { return mImage; }
GLenum GetTextureTarget() const { return mTarget; }
virtual bool Write() {
@ -449,13 +449,13 @@ public:
packet.dataFormat = LOCAL_GL_RGBA;
if (mImage) {
packet.width = mImage->Width();
packet.height = mImage->Height();
packet.width = mImage->GetSize().width;
packet.height = mImage->GetSize().height;
packet.stride = mImage->Stride();
packet.dataSize = mImage->GetDataSize();
packet.dataSize = mImage->GetSize().height * mImage->Stride();
dataptr = (char*) mImage->Data();
datasize = mImage->GetDataSize();
dataptr = (char*) mImage->GetData();
datasize = packet.dataSize;
compresseddata = std::auto_ptr<char>((char*) moz_malloc(LZ4::maxCompressedSize(datasize)));
if (compresseddata.get()) {
@ -497,7 +497,7 @@ protected:
void* mLayerRef;
GLenum mTarget;
GLuint mName;
nsRefPtr<gfxImageSurface> mImage;
RefPtr<DataSourceSurface> mImage;
};
class DebugGLColorData : public DebugGLData {
@ -722,9 +722,9 @@ SendTextureSource(GLContext* aGLContext,
// By sending 0 to ReadTextureImage rely upon aSource->BindTexture binding
// texture correctly. textureId is used for tracking in DebugGLTextureData.
nsRefPtr<gfxImageSurface> img =
RefPtr<DataSourceSurface> img =
aGLContext->ReadTexImageHelper()->ReadTexImage(0, textureTarget,
gfxIntSize(size.width, size.height),
size,
shaderConfig, aFlipY);
gLayerScopeWebSocketManager->AppendDebugData(