From dbcb528c4089cca8580b05e82af14663465b6ae8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 11 Sep 2014 16:48:43 +0100 Subject: vivante: fix composite using non-alpha pictures The Vivante GPU hardware appears to be buggy with some formats and mask combinations, and is particularly noticable with BGRA/RGBA swizzles with non-alpha formats. Fix this by switching to alpha formats, and subsituting a maximal global alpha value instead. This allows rendercheck to pass for all formats with PE20 hardware. Signed-off-by: Russell King --- src/vivante_accel.c | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/src/vivante_accel.c b/src/vivante_accel.c index ae4fb3a..dc05b5f 100644 --- a/src/vivante_accel.c +++ b/src/vivante_accel.c @@ -1451,6 +1451,63 @@ int vivante_accel_Composite(CARD8 op, PicturePtr pSrc, PicturePtr pMask, final_op = vivante_composite_op[op]; + /* + * There is a bug in the GPU hardware with destinations lacking + * alpha and swizzles BGRA/RGBA. Rather than the GPU treating + * bits 7:0 as alpha, it continues to treat bits 31:24 as alpha. + * This results in it replacing the B or R bits on input to the + * blend operation with 1.0. However, it continues to accept the + * non-existent source alpha from bits 31:24. + * + * Work around this by switching to the equivalent alpha format, + * and use global alpha to replace the alpha channel. + */ + if (!PICT_FORMAT_A(pDst->format)) { + final_op.dst_global_alpha = gcvSURF_GLOBAL_ALPHA_ON; + final_op.dst_alpha = 255; + + switch (vDst->pict_format) { + case gcvSURF_X4R4G4B4: + vDst->pict_format = gcvSURF_A4R4G4B4; + break; + case gcvSURF_X4B4G4R4: + vDst->pict_format = gcvSURF_A4B4G4R4; + break; + case gcvSURF_R4G4B4X4: + vDst->pict_format = gcvSURF_R4G4B4A4; + break; + case gcvSURF_B4G4R4X4: + vDst->pict_format = gcvSURF_B4G4R4A4; + break; + case gcvSURF_X1R5G5B5: + vDst->pict_format = gcvSURF_A1R5G5B5; + break; + case gcvSURF_X1B5G5R5: + vDst->pict_format = gcvSURF_A1B5G5R5; + break; + case gcvSURF_R5G5B5X1: + vDst->pict_format = gcvSURF_R5G5B5A1; + break; + case gcvSURF_B5G5R5X1: + vDst->pict_format = gcvSURF_B5G5R5A1; + break; + case gcvSURF_X8R8G8B8: + vDst->pict_format = gcvSURF_A8R8G8B8; + break; + case gcvSURF_X8B8G8R8: + vDst->pict_format = gcvSURF_A8B8G8R8; + break; + case gcvSURF_R8G8B8X8: + vDst->pict_format = gcvSURF_R8G8B8A8; + break; + case gcvSURF_B8G8R8X8: + vDst->pict_format = gcvSURF_B8G8R8A8; + break; + default: + break; + } + } + if (pMask) { uint32_t colour; @@ -1613,6 +1670,57 @@ fprintf(stderr, "%s: i: op 0x%02x src=%p,%d,%d mask=%p,%d,%d dst=%p,%d,%d %ux%u\ &xSrc, &ySrc); if (!vSrc) goto failed; + + /* + * Apply the same work-around for a non-alpha source as for + * a non-alpha destination. + */ + if (!pMask && vSrc != vTemp && !PICT_FORMAT_A(pSrc->format) && + final_op.src_global_alpha == gcvSURF_GLOBAL_ALPHA_OFF) { + final_op.src_global_alpha = gcvSURF_GLOBAL_ALPHA_ON; + final_op.src_alpha = 255; + + switch (vSrc->pict_format) { + case gcvSURF_X4R4G4B4: + vSrc->pict_format = gcvSURF_A4R4G4B4; + break; + case gcvSURF_X4B4G4R4: + vSrc->pict_format = gcvSURF_A4B4G4R4; + break; + case gcvSURF_R4G4B4X4: + vSrc->pict_format = gcvSURF_R4G4B4A4; + break; + case gcvSURF_B4G4R4X4: + vSrc->pict_format = gcvSURF_B4G4R4A4; + break; + case gcvSURF_X1R5G5B5: + vSrc->pict_format = gcvSURF_A1R5G5B5; + break; + case gcvSURF_X1B5G5R5: + vSrc->pict_format = gcvSURF_A1B5G5R5; + break; + case gcvSURF_R5G5B5X1: + vSrc->pict_format = gcvSURF_R5G5B5A1; + break; + case gcvSURF_B5G5R5X1: + vSrc->pict_format = gcvSURF_B5G5R5A1; + break; + case gcvSURF_X8R8G8B8: + vSrc->pict_format = gcvSURF_A8R8G8B8; + break; + case gcvSURF_X8B8G8R8: + vSrc->pict_format = gcvSURF_A8B8G8R8; + break; + case gcvSURF_R8G8B8X8: + vSrc->pict_format = gcvSURF_R8G8B8A8; + break; + case gcvSURF_B8G8R8X8: + vSrc->pict_format = gcvSURF_B8G8R8A8; + break; + default: + break; + } + } } //vivante_batch_wait_commit(vivante, vSrc); -- cgit