Bug 997593 - Add more color conversion code to support encoding QCOM Venus NV12 images. r=roc

This commit is contained in:
John Lin 2014-09-14 19:40:00 +02:00
parent 716b37bff9
commit f0b0c7c2ab
2 changed files with 51 additions and 26 deletions

View File

@ -199,14 +199,6 @@ VideoTrackEncoder::NotifyQueuedTrackChanges(MediaStreamGraph* aGraph,
if (!chunk.IsNull()) { if (!chunk.IsNull()) {
gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize(); gfx::IntSize imgsize = chunk.mFrame.GetImage()->GetSize();
gfxIntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize(); gfxIntSize intrinsicSize = chunk.mFrame.GetIntrinsicSize();
#ifdef MOZ_WIDGET_GONK
// Block the video frames come from video source.
if (chunk.mFrame.GetImage()->GetFormat() != ImageFormat::PLANAR_YCBCR) {
LOG("Can't encode this ImageFormat %x", chunk.mFrame.GetImage()->GetFormat());
NotifyCancel();
break;
}
#endif
nsresult rv = Init(imgsize.width, imgsize.height, nsresult rv = Init(imgsize.width, imgsize.height,
intrinsicSize.width, intrinsicSize.height, intrinsicSize.width, intrinsicSize.height,
aTrackRate); aTrackRate);

View File

@ -299,9 +299,10 @@ ConvertPlanarYCbCrToNV12(const PlanarYCbCrData* aSource, uint8_t* aDestination)
// Convert pixels in graphic buffer to NV12 format. aSource is the layer image // Convert pixels in graphic buffer to NV12 format. aSource is the layer image
// containing source graphic buffer, and aDestination is the destination of // containing source graphic buffer, and aDestination is the destination of
// conversion. Currently only 2 source format are supported: // conversion. Currently 3 source format are supported:
// - NV21/HAL_PIXEL_FORMAT_YCrCb_420_SP (from camera preview window). // - NV21/HAL_PIXEL_FORMAT_YCrCb_420_SP (from camera preview window).
// - YV12/HAL_PIXEL_FORMAT_YV12 (from video decoder). // - YV12/HAL_PIXEL_FORMAT_YV12 (from video decoder).
// - QCOM proprietary/HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS (from Flame HW video decoder)
static void static void
ConvertGrallocImageToNV12(GrallocImage* aSource, uint8_t* aDestination) ConvertGrallocImageToNV12(GrallocImage* aSource, uint8_t* aDestination)
{ {
@ -309,9 +310,6 @@ ConvertGrallocImageToNV12(GrallocImage* aSource, uint8_t* aDestination)
sp<GraphicBuffer> graphicBuffer = aSource->GetGraphicBuffer(); sp<GraphicBuffer> graphicBuffer = aSource->GetGraphicBuffer();
int pixelFormat = graphicBuffer->getPixelFormat(); int pixelFormat = graphicBuffer->getPixelFormat();
// Only support NV21 (from camera) or YV12 (from HW decoder output) for now.
NS_ENSURE_TRUE_VOID(pixelFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
pixelFormat == HAL_PIXEL_FORMAT_YV12);
void* imgPtr = nullptr; void* imgPtr = nullptr;
graphicBuffer->lock(GraphicBuffer::USAGE_SW_READ_MASK, &imgPtr); graphicBuffer->lock(GraphicBuffer::USAGE_SW_READ_MASK, &imgPtr);
@ -353,6 +351,26 @@ ConvertGrallocImageToNV12(GrallocImage* aSource, uint8_t* aDestination)
yuv.mCbSkip = 0; yuv.mCbSkip = 0;
ConvertPlanarYCbCrToNV12(&yuv, aDestination); ConvertPlanarYCbCrToNV12(&yuv, aDestination);
break; break;
// From QCOM video decoder on Flame. See bug 997593.
case GrallocImage::HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS:
// Venus formats are doucmented in kernel/include/media/msm_media_info.h:
yuv.mYChannel = static_cast<uint8_t*>(imgPtr);
yuv.mYSkip = 0;
yuv.mYSize.width = graphicBuffer->getWidth();
yuv.mYSize.height = graphicBuffer->getHeight();
// - Y & UV Width aligned to 128
yuv.mYStride = (yuv.mYSize.width + 127) & ~127;
yuv.mCbCrSize.width = yuv.mYSize.width / 2;
yuv.mCbCrSize.height = yuv.mYSize.height / 2;
// - Y height aligned to 32
yuv.mCbChannel = yuv.mYChannel + (yuv.mYStride * ((yuv.mYSize.height + 31) & ~31));
// Interleaved VU plane.
yuv.mCbSkip = 1;
yuv.mCrChannel = yuv.mCbChannel + 1;
yuv.mCrSkip = 1;
yuv.mCbCrStride = yuv.mYStride;
ConvertPlanarYCbCrToNV12(&yuv, aDestination);
break;
default: default:
NS_ERROR("Unsupported input gralloc image type. Should never be here."); NS_ERROR("Unsupported input gralloc image type. Should never be here.");
} }
@ -369,6 +387,31 @@ OMXVideoEncoder::Encode(const Image* aImage, int aWidth, int aHeight,
NS_ENSURE_TRUE(aWidth == mWidth && aHeight == mHeight && aTimestamp >= 0, NS_ENSURE_TRUE(aWidth == mWidth && aHeight == mHeight && aTimestamp >= 0,
NS_ERROR_INVALID_ARG); NS_ERROR_INVALID_ARG);
Image* img = const_cast<Image*>(aImage);
ImageFormat format = ImageFormat::PLANAR_YCBCR;
if (img) {
format = img->GetFormat();
gfx::IntSize size = img->GetSize();
// Validate input image.
NS_ENSURE_TRUE(aWidth == size.width, NS_ERROR_INVALID_ARG);
NS_ENSURE_TRUE(aHeight == size.height, NS_ERROR_INVALID_ARG);
if (format == ImageFormat::PLANAR_YCBCR) {
NS_ENSURE_TRUE(static_cast<PlanarYCbCrImage*>(img)->IsValid(),
NS_ERROR_INVALID_ARG);
} else if (format == ImageFormat::GRALLOC_PLANAR_YCBCR) {
// Reject unsupported gralloc-ed buffers.
int halFormat = static_cast<GrallocImage*>(img)->GetGraphicBuffer()->getPixelFormat();
NS_ENSURE_TRUE(halFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
halFormat == HAL_PIXEL_FORMAT_YV12 ||
halFormat == GrallocImage::HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS,
NS_ERROR_INVALID_ARG);
} else {
// TODO: support RGB to YUV color conversion.
NS_ERROR("Unsupported input image type.");
return NS_ERROR_INVALID_ARG;
}
}
status_t result; status_t result;
// Dequeue an input buffer. // Dequeue an input buffer.
@ -385,30 +428,20 @@ OMXVideoEncoder::Encode(const Image* aImage, int aWidth, int aHeight,
// Buffer should be large enough to hold input image data. // Buffer should be large enough to hold input image data.
MOZ_ASSERT(dstSize >= yLen + uvLen); MOZ_ASSERT(dstSize >= yLen + uvLen);
inBuf->setRange(0, yLen + uvLen); dstSize = yLen + uvLen;
inBuf->setRange(0, dstSize);
if (!aImage) { if (!img) {
// Generate muted/black image directly in buffer. // Generate muted/black image directly in buffer.
dstSize = yLen + uvLen;
// Fill Y plane. // Fill Y plane.
memset(dst, 0x10, yLen); memset(dst, 0x10, yLen);
// Fill UV plane. // Fill UV plane.
memset(dst + yLen, 0x80, uvLen); memset(dst + yLen, 0x80, uvLen);
} else { } else {
Image* img = const_cast<Image*>(aImage);
ImageFormat format = img->GetFormat();
MOZ_ASSERT(aWidth == img->GetSize().width &&
aHeight == img->GetSize().height);
if (format == ImageFormat::GRALLOC_PLANAR_YCBCR) { if (format == ImageFormat::GRALLOC_PLANAR_YCBCR) {
ConvertGrallocImageToNV12(static_cast<GrallocImage*>(img), dst); ConvertGrallocImageToNV12(static_cast<GrallocImage*>(img), dst);
} else if (format == ImageFormat::PLANAR_YCBCR) { } else if (format == ImageFormat::PLANAR_YCBCR) {
ConvertPlanarYCbCrToNV12(static_cast<PlanarYCbCrImage*>(img)->GetData(), ConvertPlanarYCbCrToNV12(static_cast<PlanarYCbCrImage*>(img)->GetData(),
dst); dst);
} else {
// TODO: support RGB to YUV color conversion.
NS_ERROR("Unsupported input image type.");
} }
} }