Backout bug 828789 (bb755890ec69) because of bug 829335

Backout pixman update.
CLOSED TREE

--HG--
extra : transplant_source : Q%C9%98%DD%A3%83%8CW%22%18k%7B%26VR%8C%5B%FF%91%3E
This commit is contained in:
Jeff Muizelaar 2013-01-10 18:50:12 -05:00
parent 41d5ac5eff
commit 00dd720d3e
41 changed files with 594 additions and 6978 deletions

View File

@ -83,34 +83,29 @@ endif
CSRCS = \
pixman-access.c \
pixman-access-accessors.c \
pixman-arm.c \
pixman-bits-image.c \
pixman.c \
pixman-combine16.c \
pixman-combine32.c \
pixman-combine64.c \
pixman-conical-gradient.c \
pixman-cpu.c \
pixman-edge.c \
pixman-edge-accessors.c \
pixman-fast-path.c \
pixman-general.c \
pixman-gradient-walker.c \
pixman-glyph.c \
pixman-image.c \
pixman-implementation.c \
pixman-linear-gradient.c \
pixman-matrix.c \
pixman-mips.c \
pixman-noop.c \
pixman-ppc.c \
pixman-radial-gradient.c \
pixman-region16.c \
pixman-region32.c \
pixman-solid-fill.c \
pixman-srgb.c \
pixman-trap.c \
pixman-utils.c \
pixman-x86.c \
$(NULL)
ifdef USE_MMX

View File

@ -32,8 +32,8 @@
#include <string.h>
#include <assert.h>
#include "pixman-accessor.h"
#include "pixman-private.h"
#include "pixman-accessor.h"
#define CONVERT_RGB24_TO_Y15(s) \
(((((s) >> 16) & 0xff) * 153 + \
@ -210,7 +210,6 @@ get_shifts (pixman_format_code_t format,
break;
case PIXMAN_TYPE_ARGB:
case PIXMAN_TYPE_ARGB_SRGB:
*b = 0;
*g = *b + PIXMAN_FORMAT_B (format);
*r = *g + PIXMAN_FORMAT_G (format);
@ -1066,130 +1065,6 @@ fetch_pixel_generic_64 (bits_image_t *image,
return result;
}
/* The 32_sRGB paths should be deleted after narrow processing
* is no longer invoked for formats that are considered wide.
* (Also see fetch_pixel_generic_lossy_32) */
static void
fetch_scanline_a8r8g8b8_32_sRGB (pixman_image_t *image,
int x,
int y,
int width,
uint32_t *buffer,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
uint32_t tmp;
while (pixel < end)
{
tmp = READ (image, pixel++);
*buffer++ = (tmp >> 24) << 24
| (srgb_to_linear[(tmp >> 16) & 0xff] >> 8) << 16
| (srgb_to_linear[(tmp >> 8) & 0xff] >> 8) << 8
| (srgb_to_linear[(tmp >> 0) & 0xff] >> 8) << 0;
}
}
static void
fetch_scanline_a8r8g8b8_64_sRGB (pixman_image_t *image,
int x,
int y,
int width,
uint32_t *b,
const uint32_t *mask)
{
const uint32_t *bits = image->bits.bits + y * image->bits.rowstride;
const uint32_t *pixel = (uint32_t *)bits + x;
const uint32_t *end = pixel + width;
uint64_t *buffer = (uint64_t *)b;
uint32_t tmp;
while (pixel < end)
{
tmp = READ (image, pixel++);
*buffer++ = (uint64_t) ((tmp >> 24) * 257) << 48
| (uint64_t) srgb_to_linear[(tmp >> 16) & 0xff] << 32
| (uint64_t) srgb_to_linear[(tmp >> 8) & 0xff] << 16
| (uint64_t) srgb_to_linear[(tmp >> 0) & 0xff] << 0;
}
}
static uint32_t
fetch_pixel_a8r8g8b8_32_sRGB (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t tmp = READ (image, bits + offset);
return (tmp >> 24) << 24
| (srgb_to_linear[(tmp >> 16) & 0xff] >> 8) << 16
| (srgb_to_linear[(tmp >> 8) & 0xff] >> 8) << 8
| (srgb_to_linear[(tmp >> 0) & 0xff] >> 8) << 0;
}
static uint64_t
fetch_pixel_a8r8g8b8_64_sRGB (bits_image_t *image,
int offset,
int line)
{
uint32_t *bits = image->bits + line * image->rowstride;
uint32_t tmp = READ (image, bits + offset);
return (uint64_t) ((tmp >> 24) * 257) << 48
| (uint64_t) srgb_to_linear[(tmp >> 16) & 0xff] << 32
| (uint64_t) srgb_to_linear[(tmp >> 8) & 0xff] << 16
| (uint64_t) srgb_to_linear[(tmp >> 0) & 0xff] << 0;
}
static void
store_scanline_a8r8g8b8_32_sRGB (bits_image_t *image,
int x,
int y,
int width,
const uint32_t *v)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint64_t *values = (uint64_t *)v;
uint32_t *pixel = bits + x;
uint64_t tmp;
int i;
for (i = 0; i < width; ++i)
{
tmp = values[i];
WRITE (image, pixel++,
((uint32_t) (tmp >> 24 ) << 24)
| (linear_to_srgb[(tmp >> 16 << 4) & 0xfff] << 16)
| (linear_to_srgb[(tmp >> 8 << 4) & 0xfff] << 8)
| (linear_to_srgb[(tmp >> 0 << 4) & 0xfff] << 0));
}
}
static void
store_scanline_a8r8g8b8_64_sRGB (bits_image_t *image,
int x,
int y,
int width,
const uint32_t *v)
{
uint32_t *bits = image->bits + image->rowstride * y;
uint64_t *values = (uint64_t *)v;
uint32_t *pixel = bits + x;
uint64_t tmp;
int i;
for (i = 0; i < width; ++i)
{
tmp = values[i];
WRITE (image, pixel++,
((uint32_t) (tmp >> 56) << 24)
| (linear_to_srgb[(tmp >> 36) & 0xfff] << 16)
| (linear_to_srgb[(tmp >> 20) & 0xfff] << 8)
| (linear_to_srgb[(tmp >> 4) & 0xfff] << 0));
}
}
/*
* XXX: The transformed fetch path only works at 32-bpp so far. When all
* paths have wide versions, this can be removed.
@ -1257,13 +1132,6 @@ static const format_info_t accessors[] =
FORMAT_INFO (r8g8b8x8),
FORMAT_INFO (x14r6g6b6),
/* sRGB formats */
{ PIXMAN_a8r8g8b8_sRGB,
fetch_scanline_a8r8g8b8_32_sRGB,
fetch_scanline_a8r8g8b8_64_sRGB,
fetch_pixel_a8r8g8b8_32_sRGB, fetch_pixel_a8r8g8b8_64_sRGB,
store_scanline_a8r8g8b8_32_sRGB, store_scanline_a8r8g8b8_64_sRGB },
/* 24bpp formats */
FORMAT_INFO (r8g8b8),
FORMAT_INFO (b8g8r8),

View File

@ -64,7 +64,6 @@
.altmacro
.p2align 2
#include "pixman-private.h"
#include "pixman-arm-neon-asm.h"
/*
@ -489,12 +488,12 @@ fname:
vmull.u8 q1, d0, d28
vmlal.u8 q1, d1, d29
/* 5 cycles bubble */
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
/* 5 cycles bubble */
bilinear_duplicate_mask mask_fmt, 1, d4
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
/* 3 cycles bubble */
vmovn.u16 d0, q0
/* 1 cycle bubble */
@ -515,16 +514,16 @@ fname:
q1, q11, d0, d1, d20, d21, d22, d23
bilinear_load_mask mask_fmt, 2, d4
bilinear_load_dst dst_fmt, op, 2, d18, d19, q9
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q10, #16
bilinear_duplicate_mask mask_fmt, 2, d4
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vmovn.u16 d0, q0
bilinear_interleave_src_dst \
@ -545,29 +544,29 @@ fname:
q3, q9, d4, d5, d16, d17, d18, d19
pld [TMP1, PF_OFFS]
sub TMP1, TMP1, STRIDE
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS
vshr.u16 q15, q12, #8
vshll.u16 q2, d6, #8
vmlsl.u16 q2, d6, d30
vmlal.u16 q2, d7, d30
vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q8, d18, #8
bilinear_load_mask mask_fmt, 4, d22
bilinear_load_dst dst_fmt, op, 4, d2, d3, q1
pld [TMP1, PF_OFFS]
vmlsl.u16 q8, d18, d31
vmlal.u16 q8, d19, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q10, #16
vshrn.u32 d4, q2, #16
vshrn.u32 d5, q8, #16
bilinear_duplicate_mask mask_fmt, 4, d22
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vmovn.u16 d0, q0
vmovn.u16 d1, q2
vadd.u16 q12, q12, q13
@ -695,13 +694,13 @@ pixman_asm_function fname
blt 0f
tst OUT, #(1 << dst_bpp_shift)
beq 0f
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
bilinear_process_last_pixel
sub WIDTH, WIDTH, #1
0:
vadd.u16 q13, q13, q13
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
cmp WIDTH, #2
@ -922,7 +921,7 @@ pixman_asm_function fname
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
@ -933,27 +932,27 @@ pixman_asm_function fname
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
.endm
.macro bilinear_over_8888_8888_process_pixblock_tail
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vld1.32 {d2, d3}, [OUT, :128]
pld [OUT, #(prefetch_offset * 4)]
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #16
vshr.u16 q15, q12, #8
vshrn.u32 d5, q3, #16
vmovn.u16 d6, q0
vmovn.u16 d7, q2
vuzp.8 d6, d7
@ -976,7 +975,7 @@ pixman_asm_function fname
.endm
.macro bilinear_over_8888_8888_process_pixblock_tail_head
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q2, d20, #8
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
@ -985,21 +984,21 @@ pixman_asm_function fname
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vld1.32 {d2, d3}, [OUT, :128]
pld [OUT, PF_OFFS]
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #16
vshr.u16 q15, q12, #8
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #16
vmovn.u16 d6, q0
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
@ -1023,7 +1022,7 @@ pixman_asm_function fname
vmlal.u8 q10, d23, d29
vmull.u8 q11, d2, d4
vmull.u8 q2, d3, d4
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmlsl.u16 q0, d16, d30
vrshr.u16 q1, q11, #8
vmlal.u16 q0, d17, d30
@ -1038,12 +1037,12 @@ pixman_asm_function fname
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vuzp.8 d6, d7
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vuzp.8 d6, d7
vmlsl.u16 q1, d18, d31
vadd.u16 q12, q12, q13
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vst1.32 {d6, d7}, [OUT, :128]!
.endm
@ -1082,14 +1081,14 @@ pixman_asm_function fname
vmull.u8 q3, d2, d28
vmlal.u8 q2, d1, d29
vmlal.u8 q3, d3, d29
vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d4, #8
vshll.u16 q1, d6, #8
vmlsl.u16 q0, d4, d30
vmlsl.u16 q1, d6, d31
vmlal.u16 q0, d5, d30
vmlal.u16 q1, d7, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vld1.32 {d2}, [TMP3], STRIDE
vld1.32 {d3}, [TMP3]
pld [TMP4, PF_OFFS]
@ -1100,7 +1099,7 @@ pixman_asm_function fname
vmlal.u8 q3, d3, d29
vmull.u8 q1, d4, d28
vmlal.u8 q1, d5, d29
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vld1.32 {d22[0]}, [MASK]!
pld [MASK, #prefetch_offset]
vadd.u16 q12, q12, q13
@ -1108,17 +1107,17 @@ pixman_asm_function fname
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_tail
vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q9, d6, #8
vshll.u16 q10, d2, #8
vmlsl.u16 q9, d6, d30
vmlsl.u16 q10, d2, d31
vmlal.u16 q9, d7, d30
vmlal.u16 q10, d3, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vdup.32 d22, d22[0]
vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d18, q9, #16
vshrn.u32 d19, q10, #16
vmovn.u16 d17, q9
vld1.32 {d18, d19}, [OUT, :128]
pld [OUT, PF_OFFS]
@ -1147,11 +1146,11 @@ pixman_asm_function fname
.endm
.macro bilinear_over_8888_8_8888_process_pixblock_tail_head
vshll.u16 q9, d6, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q9, d6, #8
mov TMP1, X, asr #16
add X, X, UX
add TMP1, TOP, TMP1, asl #2
vshll.u16 q10, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q10, d2, #8
vld1.32 {d0}, [TMP1], STRIDE
mov TMP2, X, asr #16
add X, X, UX
@ -1168,12 +1167,12 @@ pixman_asm_function fname
mov TMP4, X, asr #16
add X, X, UX
add TMP4, TOP, TMP4, asl #2
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vld1.32 {d3}, [TMP2]
vdup.32 d22, d22[0]
vshrn.u32 d18, q9, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d19, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d18, q9, #16
vshrn.u32 d19, q10, #16
vmull.u8 q2, d0, d28
vmull.u8 q3, d2, d28
vmovn.u16 d17, q9
@ -1183,8 +1182,8 @@ pixman_asm_function fname
vmlal.u8 q3, d3, d29
vuzp.8 d16, d17
vuzp.8 d18, d19
vshll.u16 q0, d4, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d6, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d4, #8
vshll.u16 q1, d6, #8
vuzp.8 d16, d17
vuzp.8 d18, d19
vmlsl.u16 q0, d4, d30
@ -1195,8 +1194,8 @@ pixman_asm_function fname
vmlal.u16 q1, d7, d31
vrsra.u16 q10, q10, #8
vrsra.u16 q11, q11, #8
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vrshrn.u16 d16, q10, #8
vrshrn.u16 d17, q11, #8
vld1.32 {d2}, [TMP3], STRIDE
@ -1217,7 +1216,7 @@ pixman_asm_function fname
vraddhn.u16 d18, q9, q10
vraddhn.u16 d19, q15, q11
vmlal.u8 q1, d5, d29
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vqadd.u8 q9, q8, q9
vld1.32 {d22[0]}, [MASK]!
vuzp.8 d18, d19

View File

@ -49,7 +49,6 @@
.altmacro
.p2align 2
#include "pixman-private.h"
#include "pixman-arm-neon-asm.h"
/* Global configuration options and preferences */
@ -2987,11 +2986,11 @@ fname:
vmull.u8 q1, d0, d28
vmlal.u8 q1, d1, d29
/* 5 cycles bubble */
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
/* 5 cycles bubble */
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
/* 3 cycles bubble */
vmovn.u16 d0, q0
/* 1 cycle bubble */
@ -3001,15 +3000,15 @@ fname:
.macro bilinear_interpolate_two_pixels src_fmt, dst_fmt
bilinear_load_and_vertical_interpolate_two_&src_fmt \
q1, q11, d0, d1, d20, d21, d22, d23
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q10, #16
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
vmovn.u16 d0, q0
bilinear_store_&dst_fmt 2, q2, q3
@ -3021,26 +3020,26 @@ fname:
q3, q9, d4, d5, d16, d17, d18, d19
pld [TMP1, PF_OFFS]
sub TMP1, TMP1, STRIDE
vshll.u16 q0, d2, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d2, #8
vmlsl.u16 q0, d2, d30
vmlal.u16 q0, d3, d30
vshll.u16 q10, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q10, d22, #8
vmlsl.u16 q10, d22, d31
vmlal.u16 q10, d23, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d6, #BILINEAR_INTERPOLATION_BITS
vshr.u16 q15, q12, #8
vshll.u16 q2, d6, #8
vmlsl.u16 q2, d6, d30
vmlal.u16 q2, d7, d30
vshll.u16 q8, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q8, d18, #8
pld [TMP2, PF_OFFS]
vmlsl.u16 q8, d18, d31
vmlal.u16 q8, d19, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q10, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q8, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q10, #16
vshrn.u32 d4, q2, #16
vshrn.u32 d5, q8, #16
vshr.u16 q15, q12, #8
vmovn.u16 d0, q0
vmovn.u16 d1, q2
vadd.u16 q12, q12, q13
@ -3159,13 +3158,13 @@ pixman_asm_function fname
blt 0f
tst OUT, #(1 << dst_bpp_shift)
beq 0f
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
bilinear_interpolate_last_pixel src_fmt, dst_fmt
sub WIDTH, WIDTH, #1
0:
vadd.u16 q13, q13, q13
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vadd.u16 q12, q12, q13
cmp WIDTH, #2
@ -3283,7 +3282,7 @@ pixman_asm_function fname
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
@ -3294,25 +3293,25 @@ pixman_asm_function fname
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
.endm
.macro bilinear_interpolate_four_pixels_8888_8888_tail
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vshr.u16 q15, q12, #8
vshrn.u32 d5, q3, #16
vmovn.u16 d6, q0
vmovn.u16 d7, q2
vadd.u16 q12, q12, q13
@ -3327,22 +3326,22 @@ pixman_asm_function fname
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
@ -3354,12 +3353,12 @@ pixman_asm_function fname
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d6, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmovn.u16 d7, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
@ -3371,7 +3370,7 @@ pixman_asm_function fname
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vst1.32 {d6, d7}, [OUT, :128]!
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
.endm
@ -3404,7 +3403,7 @@ pixman_asm_function fname
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
pld [TMP4, PF_OFFS]
@ -3413,7 +3412,7 @@ pixman_asm_function fname
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
mov TMP1, X, asr #16
@ -3423,22 +3422,22 @@ pixman_asm_function fname
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
@ -3450,12 +3449,12 @@ pixman_asm_function fname
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d8, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmovn.u16 d9, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
@ -3466,25 +3465,25 @@ pixman_asm_function fname
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
.endm
.macro bilinear_interpolate_eight_pixels_8888_0565_tail
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vadd.u16 q12, q12, q13
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vshr.u16 q15, q12, #8
vshrn.u32 d5, q3, #16
vmovn.u16 d10, q0
vmovn.u16 d11, q2
vadd.u16 q12, q12, q13
@ -3509,23 +3508,23 @@ pixman_asm_function fname
add X, X, UX
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vuzp.u8 d8, d9
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q2, d20, #8
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
vld1.32 {d21}, [TMP1]
vmull.u8 q8, d20, d28
vmlal.u8 q8, d21, d29
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vshrn.u32 d1, q1, #16
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
@ -3537,12 +3536,12 @@ pixman_asm_function fname
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d10, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmovn.u16 d11, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
@ -3554,7 +3553,7 @@ pixman_asm_function fname
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vuzp.u8 d10, d11
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vmlsl.u16 q1, d18, d31
mov TMP1, X, asr #16
@ -3565,12 +3564,12 @@ pixman_asm_function fname
add TMP2, TOP, TMP2, asl #2
vmlal.u16 q1, d19, d31
vuzp.u8 d9, d11
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshll.u16 q2, d20, #BILINEAR_INTERPOLATION_BITS
vshr.u16 q15, q12, #8
vshll.u16 q2, d20, #8
vuzp.u8 d8, d10
vmlsl.u16 q2, d20, d30
vmlal.u16 q2, d21, d30
vshll.u16 q3, d22, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q3, d22, #8
vld1.32 {d20}, [TMP1], STRIDE
vmlsl.u16 q3, d22, d31
vmlal.u16 q3, d23, d31
@ -3580,13 +3579,13 @@ pixman_asm_function fname
vshll.u8 q6, d9, #8
vshll.u8 q5, d10, #8
vshll.u8 q7, d8, #8
vshrn.u32 d0, q0, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d0, q0, #16
vsri.u16 q5, q6, #5
vshrn.u32 d1, q1, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d1, q1, #16
vsri.u16 q5, q7, #11
vshrn.u32 d4, q2, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d4, q2, #16
vld1.32 {d22}, [TMP2], STRIDE
vshrn.u32 d5, q3, #(2 * BILINEAR_INTERPOLATION_BITS)
vshrn.u32 d5, q3, #16
vadd.u16 q12, q12, q13
vld1.32 {d23}, [TMP2]
vmull.u8 q9, d22, d28
@ -3598,12 +3597,12 @@ pixman_asm_function fname
add TMP4, TOP, TMP4, asl #2
vmlal.u8 q9, d23, d29
vld1.32 {d22}, [TMP3], STRIDE
vshr.u16 q15, q12, #(16 - BILINEAR_INTERPOLATION_BITS)
vshr.u16 q15, q12, #8
vld1.32 {d23}, [TMP3]
vmull.u8 q10, d22, d28
vmlal.u8 q10, d23, d29
vmovn.u16 d8, q0
vshll.u16 q0, d16, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q0, d16, #8
vmovn.u16 d9, q2
vmlsl.u16 q0, d16, d30
vmlal.u16 q0, d17, d30
@ -3614,7 +3613,7 @@ pixman_asm_function fname
pld [TMP4, PF_OFFS]
vmull.u8 q11, d16, d28
vmlal.u8 q11, d17, d29
vshll.u16 q1, d18, #BILINEAR_INTERPOLATION_BITS
vshll.u16 q1, d18, #8
vst1.32 {d10, d11}, [OUT, :128]!
vmlsl.u16 q1, d18, d31
.endm

View File

@ -1,229 +0,0 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
typedef enum
{
ARM_V7 = (1 << 0),
ARM_V6 = (1 << 1),
ARM_VFP = (1 << 2),
ARM_NEON = (1 << 3),
ARM_IWMMXT = (1 << 4)
} arm_cpu_features_t;
#if defined(USE_ARM_SIMD) || defined(USE_ARM_NEON) || defined(USE_ARM_IWMMXT)
#if defined(_MSC_VER)
/* Needed for EXCEPTION_ILLEGAL_INSTRUCTION */
#include <windows.h>
extern int pixman_msvc_try_arm_neon_op ();
extern int pixman_msvc_try_arm_simd_op ();
static arm_cpu_features_t
detect_cpu_features (void)
{
arm_cpu_features_t features = 0;
__try
{
pixman_msvc_try_arm_simd_op ();
features |= ARM_V6;
}
__except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION)
{
}
__try
{
pixman_msvc_try_arm_neon_op ();
features |= ARM_NEON;
}
__except (GetExceptionCode () == EXCEPTION_ILLEGAL_INSTRUCTION)
{
}
return features;
}
#elif defined(__APPLE__) && defined(TARGET_OS_IPHONE) /* iOS */
#include "TargetConditionals.h"
static arm_cpu_features_t
detect_cpu_features (void)
{
arm_cpu_features_t features = 0;
features |= ARM_V6;
/* Detection of ARM NEON on iOS is fairly simple because iOS binaries
* contain separate executable images for each processor architecture.
* So all we have to do is detect the armv7 architecture build. The
* operating system automatically runs the armv7 binary for armv7 devices
* and the armv6 binary for armv6 devices.
*/
#if defined(__ARM_NEON__)
features |= ARM_NEON;
#endif
return features;
}
#elif defined(__ANDROID__) || defined(ANDROID) /* Android */
static arm_cpu_features_t
detect_cpu_features (void)
{
arm_cpu_features_t features = 0;
char buf[1024];
char* pos;
const char* ver_token = "CPU architecture: ";
FILE* f = fopen("/proc/cpuinfo", "r");
if (!f) {
return features;
}
fread(buf, sizeof(char), sizeof(buf), f);
fclose(f);
pos = strstr(buf, ver_token);
if (pos) {
char vchar = *(pos + strlen(ver_token));
if (vchar >= '0' && vchar <= '9') {
int ver = vchar - '0';
if (ver >= 7)
features |= ARM_V7;
}
}
if (strstr(buf, "neon") != NULL)
features |= ARM_NEON;
if (strstr(buf, "vfp") != NULL)
features |= ARM_VFP;
return features;
}
#elif defined (__linux__) /* linux ELF */
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <string.h>
#include <elf.h>
static arm_cpu_features_t
detect_cpu_features (void)
{
arm_cpu_features_t features = 0;
Elf32_auxv_t aux;
int fd;
fd = open ("/proc/self/auxv", O_RDONLY);
if (fd >= 0)
{
while (read (fd, &aux, sizeof(Elf32_auxv_t)) == sizeof(Elf32_auxv_t))
{
if (aux.a_type == AT_HWCAP)
{
uint32_t hwcap = aux.a_un.a_val;
/* hardcode these values to avoid depending on specific
* versions of the hwcap header, e.g. HWCAP_NEON
*/
if ((hwcap & 64) != 0)
features |= ARM_VFP;
if ((hwcap & 512) != 0)
features |= ARM_IWMMXT;
/* this flag is only present on kernel 2.6.29 */
if ((hwcap & 4096) != 0)
features |= ARM_NEON;
}
else if (aux.a_type == AT_PLATFORM)
{
const char *plat = (const char*) aux.a_un.a_val;
if (strncmp (plat, "v7l", 3) == 0)
features |= (ARM_V7 | ARM_V6);
else if (strncmp (plat, "v6l", 3) == 0)
features |= ARM_V6;
}
}
close (fd);
}
return features;
}
#else /* Unknown */
static arm_cpu_features_t
detect_cpu_features (void)
{
return 0;
}
#endif /* Linux elf */
static pixman_bool_t
have_feature (arm_cpu_features_t feature)
{
static pixman_bool_t initialized;
static arm_cpu_features_t features;
if (!initialized)
{
features = detect_cpu_features();
initialized = TRUE;
}
return (features & feature) == feature;
}
#endif /* USE_ARM_SIMD || USE_ARM_NEON || USE_ARM_IWMMXT */
pixman_implementation_t *
_pixman_arm_get_implementations (pixman_implementation_t *imp)
{
#ifdef USE_ARM_SIMD
if (!_pixman_disabled ("arm-simd") && have_feature (ARM_V6))
imp = _pixman_implementation_create_arm_simd (imp);
#endif
#ifdef USE_ARM_IWMMXT
if (!_pixman_disabled ("arm-iwmmxt") && have_feature (ARM_IWMMXT))
imp = _pixman_implementation_create_mmx (imp);
#endif
#ifdef USE_ARM_NEON
if (!_pixman_disabled ("arm-neon") && have_feature (ARM_NEON))
imp = _pixman_implementation_create_arm_neon (imp);
#endif
return imp;
}

View File

@ -42,14 +42,14 @@
* we could produce smoother gradients by evaluating them at higher color
* depth, but that's a project for the future.
*/
static uint32_t *
_pixman_image_get_scanline_generic_64 (pixman_iter_t * iter,
const uint32_t *mask)
static void
_pixman_image_get_scanline_generic_64 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
int width = iter->width;
uint32_t * buffer = iter->buffer;
pixman_iter_get_scanline_t fetch_32 = iter->data;
uint32_t *mask8 = NULL;
/* Contract the mask image, if one exists, so that the 32-bit fetch
@ -59,20 +59,18 @@ _pixman_image_get_scanline_generic_64 (pixman_iter_t * iter,
{
mask8 = pixman_malloc_ab (width, sizeof(uint32_t));
if (!mask8)
return buffer;
return;
pixman_contract (mask8, (uint64_t *)mask, width);
}
/* Fetch the source image into the first half of buffer. */
fetch_32 (iter, mask8);
image->bits.get_scanline_32 (image, x, y, width, (uint32_t*)buffer, mask8);
/* Expand from 32bpp to 64bpp in place. */
pixman_expand ((uint64_t *)buffer, buffer, PIXMAN_a8r8g8b8, width);
free (mask8);
return buffer;
}
/* Fetch functions */
@ -131,8 +129,8 @@ bits_image_fetch_pixel_bilinear (bits_image_t *image,
x1 = x - pixman_fixed_1 / 2;
y1 = y - pixman_fixed_1 / 2;
distx = pixman_fixed_to_bilinear_weight (x1);
disty = pixman_fixed_to_bilinear_weight (y1);
distx = interpolation_coord(x1);
disty = interpolation_coord(y1);
x1 = pixman_fixed_to_int (x1);
y1 = pixman_fixed_to_int (y1);
@ -162,17 +160,14 @@ bits_image_fetch_pixel_bilinear (bits_image_t *image,
return bilinear_interpolation (tl, tr, bl, br, distx, disty);
}
static uint32_t *
bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
const uint32_t *mask)
static void
bits_image_fetch_bilinear_no_repeat_8888 (pixman_image_t * ima,
int offset,
int line,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
pixman_image_t * ima = iter->image;
int offset = iter->x;
int line = iter->y++;
int width = iter->width;
uint32_t * buffer = iter->buffer;
bits_image_t *bits = &ima->bits;
pixman_fixed_t x_top, x_bottom, x;
pixman_fixed_t ux_top, ux_bottom, ux;
@ -194,13 +189,13 @@ bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
v.vector[2] = pixman_fixed_1;
if (!pixman_transform_point_3d (bits->common.transform, &v))
return iter->buffer;
return;
ux = ux_top = ux_bottom = bits->common.transform->matrix[0][0];
x = x_top = x_bottom = v.vector[0] - pixman_fixed_1/2;
y = v.vector[1] - pixman_fixed_1/2;
disty = pixman_fixed_to_bilinear_weight (y);
disty = interpolation_coord(y);
/* Load the pointers to the first and second lines from the source
* image that bilinear code must read.
@ -261,7 +256,7 @@ bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
if (top_row == zero && bottom_row == zero)
{
memset (buffer, 0, width * sizeof (uint32_t));
return iter->buffer;
return;
}
else if (bits->format == PIXMAN_x8r8g8b8)
{
@ -309,7 +304,7 @@ bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
tr = top_row[pixman_fixed_to_int (x_top) + 1] | top_mask;
br = bottom_row[pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
distx = pixman_fixed_to_bilinear_weight (x);
distx = interpolation_coord(x);
*buffer++ = bilinear_interpolation (0, tr, 0, br, distx, disty);
@ -334,7 +329,7 @@ bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
br = bottom_row [pixman_fixed_to_int (x_bottom) + 1] | bottom_mask;
distx = pixman_fixed_to_bilinear_weight (x);
distx = interpolation_coord(x);
*buffer = bilinear_interpolation (tl, tr, bl, br, distx, disty);
}
@ -358,7 +353,7 @@ bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
tl = top_row [pixman_fixed_to_int (x_top)] | top_mask;
bl = bottom_row [pixman_fixed_to_int (x_bottom)] | bottom_mask;
distx = pixman_fixed_to_bilinear_weight (x);
distx = interpolation_coord(x);
*buffer = bilinear_interpolation (tl, 0, bl, 0, distx, disty);
}
@ -373,8 +368,6 @@ bits_image_fetch_bilinear_no_repeat_8888 (pixman_iter_t *iter,
/* Zero fill to the left of the image */
while (buffer < end)
*buffer++ = 0;
return iter->buffer;
}
static force_inline uint32_t
@ -388,11 +381,11 @@ bits_image_fetch_pixel_convolution (bits_image_t *image,
int y_off = (params[1] - pixman_fixed_1) >> 1;
int32_t cwidth = pixman_fixed_to_int (params[0]);
int32_t cheight = pixman_fixed_to_int (params[1]);
int32_t srtot, sgtot, sbtot, satot;
int32_t i, j, x1, x2, y1, y2;
pixman_repeat_t repeat_mode = image->common.repeat;
int width = image->width;
int height = image->height;
int srtot, sgtot, sbtot, satot;
params += 2;
@ -428,10 +421,10 @@ bits_image_fetch_pixel_convolution (bits_image_t *image,
pixel = get_pixel (image, rx, ry, TRUE);
}
srtot += (int)RED_8 (pixel) * f;
sgtot += (int)GREEN_8 (pixel) * f;
sbtot += (int)BLUE_8 (pixel) * f;
satot += (int)ALPHA_8 (pixel) * f;
srtot += RED_8 (pixel) * f;
sgtot += GREEN_8 (pixel) * f;
sbtot += BLUE_8 (pixel) * f;
satot += ALPHA_8 (pixel) * f;
}
params++;
@ -481,16 +474,14 @@ bits_image_fetch_pixel_filtered (bits_image_t *image,
return 0;
}
static uint32_t *
bits_image_fetch_affine_no_alpha (pixman_iter_t * iter,
static void
bits_image_fetch_affine_no_alpha (pixman_image_t * image,
int offset,
int line,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
pixman_image_t *image = iter->image;
int offset = iter->x;
int line = iter->y++;
int width = iter->width;
uint32_t * buffer = iter->buffer;
pixman_fixed_t x, y;
pixman_fixed_t ux, uy;
pixman_vector_t v;
@ -504,7 +495,7 @@ bits_image_fetch_affine_no_alpha (pixman_iter_t * iter,
if (image->common.transform)
{
if (!pixman_transform_point_3d (image->common.transform, &v))
return iter->buffer;
return;
ux = image->common.transform->matrix[0][0];
uy = image->common.transform->matrix[1][0];
@ -529,8 +520,6 @@ bits_image_fetch_affine_no_alpha (pixman_iter_t * iter,
x += ux;
y += uy;
}
return buffer;
}
/* General fetcher */
@ -574,16 +563,14 @@ fetch_pixel_general (bits_image_t *image, int x, int y, pixman_bool_t check_boun
return pixel;
}
static uint32_t *
bits_image_fetch_general (pixman_iter_t *iter,
const uint32_t *mask)
static void
bits_image_fetch_general (pixman_image_t * image,
int offset,
int line,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
pixman_image_t *image = iter->image;
int offset = iter->x;
int line = iter->y++;
int width = iter->width;
uint32_t * buffer = iter->buffer;
pixman_fixed_t x, y, w;
pixman_fixed_t ux, uy, uw;
pixman_vector_t v;
@ -597,7 +584,7 @@ bits_image_fetch_general (pixman_iter_t *iter,
if (image->common.transform)
{
if (!pixman_transform_point_3d (image->common.transform, &v))
return buffer;
return;
ux = image->common.transform->matrix[0][0];
uy = image->common.transform->matrix[1][0];
@ -639,8 +626,6 @@ bits_image_fetch_general (pixman_iter_t *iter,
y += uy;
w += uw;
}
return buffer;
}
static const uint8_t zero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
@ -695,8 +680,8 @@ bits_image_fetch_bilinear_affine (pixman_image_t * image,
x1 = x - pixman_fixed_1 / 2;
y1 = y - pixman_fixed_1 / 2;
distx = pixman_fixed_to_bilinear_weight (x1);
disty = pixman_fixed_to_bilinear_weight (y1);
distx = interpolation_coord(x1);
disty = interpolation_coord(y1);
y1 = pixman_fixed_to_int (y1);
y2 = y1 + 1;
@ -897,33 +882,35 @@ convert_r5g6b5 (const uint8_t *row, int x)
}
#define MAKE_BILINEAR_FETCHER(name, format, repeat_mode) \
static uint32_t * \
bits_image_fetch_bilinear_affine_ ## name (pixman_iter_t *iter, \
static void \
bits_image_fetch_bilinear_affine_ ## name (pixman_image_t *image, \
int offset, \
int line, \
int width, \
uint32_t * buffer, \
const uint32_t * mask) \
{ \
bits_image_fetch_bilinear_affine (iter->image, \
iter->x, iter->y++, \
iter->width, \
iter->buffer, mask, \
bits_image_fetch_bilinear_affine (image, offset, line, \
width, buffer, mask, \
convert_ ## format, \
PIXMAN_ ## format, \
repeat_mode); \
return iter->buffer; \
}
#define MAKE_NEAREST_FETCHER(name, format, repeat_mode) \
static uint32_t * \
bits_image_fetch_nearest_affine_ ## name (pixman_iter_t *iter, \
static void \
bits_image_fetch_nearest_affine_ ## name (pixman_image_t *image, \
int offset, \
int line, \
int width, \
uint32_t * buffer, \
const uint32_t * mask) \
{ \
bits_image_fetch_nearest_affine (iter->image, \
iter->x, iter->y++, \
iter->width, \
iter->buffer, mask, \
bits_image_fetch_nearest_affine (image, offset, line, \
width, buffer, mask, \
convert_ ## format, \
PIXMAN_ ## format, \
repeat_mode); \
return iter->buffer; \
}
#define MAKE_FETCHERS(name, format, repeat_mode) \
@ -1072,16 +1059,14 @@ bits_image_fetch_untransformed_repeat_normal (bits_image_t *image,
}
}
static uint32_t *
bits_image_fetch_untransformed_32 (pixman_iter_t * iter,
const uint32_t *mask)
static void
bits_image_fetch_untransformed_32 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * mask)
{
pixman_image_t *image = iter->image;
int x = iter->x;
int y = iter->y;
int width = iter->width;
uint32_t * buffer = iter->buffer;
if (image->common.repeat == PIXMAN_REPEAT_NONE)
{
bits_image_fetch_untransformed_repeat_none (
@ -1092,22 +1077,16 @@ bits_image_fetch_untransformed_32 (pixman_iter_t * iter,
bits_image_fetch_untransformed_repeat_normal (
&image->bits, FALSE, x, y, width, buffer);
}
iter->y++;
return buffer;
}
static uint32_t *
bits_image_fetch_untransformed_64 (pixman_iter_t * iter,
const uint32_t *mask)
static void
bits_image_fetch_untransformed_64 (pixman_image_t * image,
int x,
int y,
int width,
uint32_t * buffer,
const uint32_t * unused)
{
pixman_image_t *image = iter->image;
int x = iter->x;
int y = iter->y;
int width = iter->width;
uint32_t * buffer = iter->buffer;
if (image->common.repeat == PIXMAN_REPEAT_NONE)
{
bits_image_fetch_untransformed_repeat_none (
@ -1118,21 +1097,14 @@ bits_image_fetch_untransformed_64 (pixman_iter_t * iter,
bits_image_fetch_untransformed_repeat_normal (
&image->bits, TRUE, x, y, width, buffer);
}
iter->y++;
return buffer;
}
static uint32_t *
_pixman_image_get_scanline_generic_64 (pixman_iter_t *iter,
const uint32_t * mask);
typedef struct
{
pixman_format_code_t format;
uint32_t flags;
pixman_iter_get_scanline_t get_scanline_32;
pixman_iter_get_scanline_t get_scanline_64;
fetch_scanline_t fetch_32;
fetch_scanline_t fetch_64;
} fetcher_info_t;
static const fetcher_info_t fetcher_info[] =
@ -1234,36 +1206,53 @@ static const fetcher_info_t fetcher_info[] =
static void
bits_image_property_changed (pixman_image_t *image)
{
uint32_t flags = image->common.flags;
pixman_format_code_t format = image->common.extended_format_code;
const fetcher_info_t *info;
_pixman_bits_image_setup_accessors (&image->bits);
info = fetcher_info;
while (info->format != PIXMAN_null)
{
if ((info->format == format || info->format == PIXMAN_any) &&
(info->flags & flags) == info->flags)
{
image->bits.get_scanline_32 = info->fetch_32;
image->bits.get_scanline_64 = info->fetch_64;
break;
}
info++;
}
}
static uint32_t *
src_get_scanline_narrow (pixman_iter_t *iter, const uint32_t *mask)
{
iter->image->bits.get_scanline_32 (
iter->image, iter->x, iter->y++, iter->width, iter->buffer, mask);
return iter->buffer;
}
static uint32_t *
src_get_scanline_wide (pixman_iter_t *iter, const uint32_t *mask)
{
iter->image->bits.get_scanline_64 (
iter->image, iter->x, iter->y++, iter->width, iter->buffer, mask);
return iter->buffer;
}
void
_pixman_bits_image_src_iter_init (pixman_image_t *image, pixman_iter_t *iter)
{
pixman_format_code_t format = image->common.extended_format_code;
uint32_t flags = image->common.flags;
const fetcher_info_t *info;
if (iter->flags & ITER_NARROW)
iter->get_scanline = src_get_scanline_narrow;
else
iter->get_scanline = src_get_scanline_wide;
for (info = fetcher_info; info->format != PIXMAN_null; ++info)
{
if ((info->format == format || info->format == PIXMAN_any) &&
(info->flags & flags) == info->flags)
{
if (iter->iter_flags & ITER_NARROW)
{
iter->get_scanline = info->get_scanline_32;
}
else
{
iter->data = info->get_scanline_32;
iter->get_scanline = info->get_scanline_64;
}
return;
}
}
/* Just in case we somehow didn't find a scanline function */
iter->get_scanline = _pixman_iter_get_scanline_noop;
}
static uint32_t *
@ -1420,9 +1409,9 @@ dest_write_back_wide (pixman_iter_t *iter)
void
_pixman_bits_image_dest_iter_init (pixman_image_t *image, pixman_iter_t *iter)
{
if (iter->iter_flags & ITER_16)
if (iter->flags & ITER_16)
{
if ((iter->iter_flags & (ITER_IGNORE_RGB | ITER_IGNORE_ALPHA)) ==
if ((iter->flags & (ITER_IGNORE_RGB | ITER_IGNORE_ALPHA)) ==
(ITER_IGNORE_RGB | ITER_IGNORE_ALPHA))
{
iter->get_scanline = _pixman_iter_get_scanline_noop;
@ -1433,9 +1422,9 @@ _pixman_bits_image_dest_iter_init (pixman_image_t *image, pixman_iter_t *iter)
}
iter->write_back = dest_write_back_16;
}
else if (iter->iter_flags & ITER_NARROW)
else if (iter->flags & ITER_NARROW)
{
if ((iter->iter_flags & (ITER_IGNORE_RGB | ITER_IGNORE_ALPHA)) ==
if ((iter->flags & (ITER_IGNORE_RGB | ITER_IGNORE_ALPHA)) ==
(ITER_IGNORE_RGB | ITER_IGNORE_ALPHA))
{
iter->get_scanline = _pixman_iter_get_scanline_noop;

View File

@ -441,7 +441,7 @@ combine_saturate_u (pixman_implementation_t *imp,
* PDF_NON_SEPARABLE_BLEND_MODE macros, which take the blend function as an
* argument. Note that this implementation operates on premultiplied colors,
* while the PDF specification does not. Therefore the code uses the formula
* Cra = (1 as) . Dca + (1 ad) . Sca + B(Dca, ad, Sca, as)
* ar.Cra = (1 as) . Dca + (1 ad) . Sca + B(Dca, ad, Sca, as)
*/
/*
@ -526,7 +526,7 @@ combine_multiply_ca (pixman_implementation_t *imp,
UN8x4_MUL_UN8_ADD_UN8x4_MUL_UN8 (result, isa, s, ida); \
\
*(dest + i) = result + \
(DIV_ONE_UN8 (sa * (uint32_t)da) << A_SHIFT) + \
(DIV_ONE_UN8 (sa * da) << A_SHIFT) + \
(blend_ ## name (RED_8 (d), da, RED_8 (s), sa) << R_SHIFT) + \
(blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), sa) << G_SHIFT) + \
(blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), sa)); \
@ -556,7 +556,7 @@ combine_multiply_ca (pixman_implementation_t *imp,
UN8x4_MUL_UN8x4_ADD_UN8x4_MUL_UN8 (result, ~m, s, ida); \
\
result += \
(DIV_ONE_UN8 (ALPHA_8 (m) * (uint32_t)da) << A_SHIFT) + \
(DIV_ONE_UN8 (ALPHA_8 (m) * da) << A_SHIFT) + \
(blend_ ## name (RED_8 (d), da, RED_8 (s), RED_8 (m)) << R_SHIFT) + \
(blend_ ## name (GREEN_8 (d), da, GREEN_8 (s), GREEN_8 (m)) << G_SHIFT) + \
(blend_ ## name (BLUE_8 (d), da, BLUE_8 (s), BLUE_8 (m))); \
@ -853,7 +853,7 @@ PDF_SEPARABLE_BLEND_MODE (exclusion)
*
* r * set_sat (C, s) = set_sat (x * C, r * s)
*
* The above holds for all non-zero x, because the x'es in the fraction for
* The above holds for all non-zero x, because they x'es in the fraction for
* C_mid cancel out. Specifically, it holds for x = r:
*
* r * set_sat (C, s) = set_sat (r_c, rs)
@ -889,7 +889,8 @@ PDF_SEPARABLE_BLEND_MODE (exclusion)
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (set_sat (S/a_s, SAT (D/a_d)), LUM (D/a_d), 1)
* = set_lum (set_sat (a_d * S, a_s * SAT (D)), a_s * LUM (D), a_s * a_d)
* = a_s * a_d * set_lum (set_sat (a_d * S, a_s * SAT (D)),
* a_s * LUM (D), a_s * a_d)
*
*/
@ -930,7 +931,7 @@ PDF_SEPARABLE_BLEND_MODE (exclusion)
blend_ ## name (c, dc, da, sc, sa); \
\
*(dest + i) = result + \
(DIV_ONE_UN8 (sa * (uint32_t)da) << A_SHIFT) + \
(DIV_ONE_UN8 (sa * da) << A_SHIFT) + \
(DIV_ONE_UN8 (c[0]) << R_SHIFT) + \
(DIV_ONE_UN8 (c[1]) << G_SHIFT) + \
(DIV_ONE_UN8 (c[2])); \
@ -1147,7 +1148,9 @@ PDF_NON_SEPARABLE_BLEND_MODE (hsl_luminosity)
#undef CH_MIN
#undef PDF_NON_SEPARABLE_BLEND_MODE
/* All of the disjoint/conjoint composing functions
/* Overlay
*
* All of the disjoint composing functions
*
* The four entries in the first column indicate what source contributions
* come from each of the four areas of the picture -- areas covered by neither
@ -1168,9 +1171,6 @@ PDF_NON_SEPARABLE_BLEND_MODE (hsl_luminosity)
* (0,0,B,A) max(1-(1-b)/a,0) min(1,(1-a)/b) min(1,b/a) max(1-a/b,0)
* (0,A,0,B) min(1,(1-b)/a) max(1-(1-a)/b,0) max(1-b/a,0) min(1,a/b)
* (0,A,B,0) min(1,(1-b)/a) min(1,(1-a)/b) max(1-b/a,0) max(1-a/b,0)
*
* See http://marc.info/?l=xfree-render&m=99792000027857&w=2 for more
* information about these operators.
*/
#define COMBINE_A_OUT 1

View File

@ -29,10 +29,10 @@
*/
#define MUL_UN8(a, b, t) \
((t) = (a) * (uint16_t)(b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
((t) = (a) * (b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
#define DIV_UN8(a, b) \
(((uint16_t) (a) * MASK + ((b) / 2)) / (b))
(((uint16_t) (a) * MASK) / (b))
#define ADD_UN8(x, y, t) \
((t) = (x) + (y), \

View File

@ -441,7 +441,7 @@ combine_saturate_u (pixman_implementation_t *imp,
* PDF_NON_SEPARABLE_BLEND_MODE macros, which take the blend function as an
* argument. Note that this implementation operates on premultiplied colors,
* while the PDF specification does not. Therefore the code uses the formula
* Cra = (1 as) . Dca + (1 ad) . Sca + B(Dca, ad, Sca, as)
* ar.Cra = (1 as) . Dca + (1 ad) . Sca + B(Dca, ad, Sca, as)
*/
/*
@ -526,7 +526,7 @@ combine_multiply_ca (pixman_implementation_t *imp,
UN16x4_MUL_UN16_ADD_UN16x4_MUL_UN16 (result, isa, s, ida); \
\
*(dest + i) = result + \
(DIV_ONE_UN16 (sa * (uint64_t)da) << A_SHIFT) + \
(DIV_ONE_UN16 (sa * da) << A_SHIFT) + \
(blend_ ## name (RED_16 (d), da, RED_16 (s), sa) << R_SHIFT) + \
(blend_ ## name (GREEN_16 (d), da, GREEN_16 (s), sa) << G_SHIFT) + \
(blend_ ## name (BLUE_16 (d), da, BLUE_16 (s), sa)); \
@ -556,7 +556,7 @@ combine_multiply_ca (pixman_implementation_t *imp,
UN16x4_MUL_UN16x4_ADD_UN16x4_MUL_UN16 (result, ~m, s, ida); \
\
result += \
(DIV_ONE_UN16 (ALPHA_16 (m) * (uint64_t)da) << A_SHIFT) + \
(DIV_ONE_UN16 (ALPHA_16 (m) * da) << A_SHIFT) + \
(blend_ ## name (RED_16 (d), da, RED_16 (s), RED_16 (m)) << R_SHIFT) + \
(blend_ ## name (GREEN_16 (d), da, GREEN_16 (s), GREEN_16 (m)) << G_SHIFT) + \
(blend_ ## name (BLUE_16 (d), da, BLUE_16 (s), BLUE_16 (m))); \
@ -853,7 +853,7 @@ PDF_SEPARABLE_BLEND_MODE (exclusion)
*
* r * set_sat (C, s) = set_sat (x * C, r * s)
*
* The above holds for all non-zero x, because the x'es in the fraction for
* The above holds for all non-zero x, because they x'es in the fraction for
* C_mid cancel out. Specifically, it holds for x = r:
*
* r * set_sat (C, s) = set_sat (r_c, rs)
@ -889,7 +889,8 @@ PDF_SEPARABLE_BLEND_MODE (exclusion)
*
* a_s * a_d * B(s, d)
* = a_s * a_d * set_lum (set_sat (S/a_s, SAT (D/a_d)), LUM (D/a_d), 1)
* = set_lum (set_sat (a_d * S, a_s * SAT (D)), a_s * LUM (D), a_s * a_d)
* = a_s * a_d * set_lum (set_sat (a_d * S, a_s * SAT (D)),
* a_s * LUM (D), a_s * a_d)
*
*/
@ -930,7 +931,7 @@ PDF_SEPARABLE_BLEND_MODE (exclusion)
blend_ ## name (c, dc, da, sc, sa); \
\
*(dest + i) = result + \
(DIV_ONE_UN16 (sa * (uint64_t)da) << A_SHIFT) + \
(DIV_ONE_UN16 (sa * da) << A_SHIFT) + \
(DIV_ONE_UN16 (c[0]) << R_SHIFT) + \
(DIV_ONE_UN16 (c[1]) << G_SHIFT) + \
(DIV_ONE_UN16 (c[2])); \
@ -1147,7 +1148,9 @@ PDF_NON_SEPARABLE_BLEND_MODE (hsl_luminosity)
#undef CH_MIN
#undef PDF_NON_SEPARABLE_BLEND_MODE
/* All of the disjoint/conjoint composing functions
/* Overlay
*
* All of the disjoint composing functions
*
* The four entries in the first column indicate what source contributions
* come from each of the four areas of the picture -- areas covered by neither
@ -1168,9 +1171,6 @@ PDF_NON_SEPARABLE_BLEND_MODE (hsl_luminosity)
* (0,0,B,A) max(1-(1-b)/a,0) min(1,(1-a)/b) min(1,b/a) max(1-a/b,0)
* (0,A,0,B) min(1,(1-b)/a) max(1-(1-a)/b,0) max(1-b/a,0) min(1,a/b)
* (0,A,B,0) min(1,(1-b)/a) min(1,(1-a)/b) max(1-b/a,0) max(1-a/b,0)
*
* See http://marc.info/?l=xfree-render&m=99792000027857&w=2 for more
* information about these operators.
*/
#define COMBINE_A_OUT 1

View File

@ -29,10 +29,10 @@
*/
#define MUL_UN16(a, b, t) \
((t) = (a) * (uint32_t)(b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
((t) = (a) * (b) + ONE_HALF, ((((t) >> G_SHIFT ) + (t) ) >> G_SHIFT ))
#define DIV_UN16(a, b) \
(((uint32_t) (a) * MASK + ((b) / 2)) / (b))
(((uint32_t) (a) * MASK) / (b))
#define ADD_UN16(x, y, t) \
((t) = (x) + (y), \

View File

@ -96,10 +96,6 @@
#endif
/* member offsets */
#define CONTAINER_OF(type, member, data) \
((type *)(((uint8_t *)data) - offsetof (type, member)))
/* TLS */
#if defined(PIXMAN_NO_TLS)

View File

@ -173,7 +173,7 @@ conical_get_scanline_wide (pixman_iter_t *iter, const uint32_t *mask)
void
_pixman_conical_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter)
{
if (iter->iter_flags & ITER_NARROW)
if (iter->flags & ITER_NARROW)
iter->get_scanline = conical_get_scanline_narrow;
else
iter->get_scanline = conical_get_scanline_wide;

View File

@ -809,48 +809,6 @@ fast_composite_add_8_8 (pixman_implementation_t *imp,
}
}
static void
fast_composite_add_0565_0565 (pixman_implementation_t *imp,
pixman_composite_info_t *info)
{
PIXMAN_COMPOSITE_ARGS (info);
uint16_t *dst_line, *dst;
uint32_t d;
uint16_t *src_line, *src;
uint32_t s;
int dst_stride, src_stride;
int32_t w;
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint16_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
while (w--)
{
s = *src++;
if (s)
{
d = *dst;
s = CONVERT_0565_TO_8888 (s);
if (d)
{
d = CONVERT_0565_TO_8888 (d);
UN8x4_ADD_UN8x4 (s, d);
}
*dst = CONVERT_8888_TO_0565 (s);
}
dst++;
}
}
}
static void
fast_composite_add_8888_8888 (pixman_implementation_t *imp,
pixman_composite_info_t *info)
@ -1257,8 +1215,8 @@ scaled_bilinear_scanline_8888_565_OVER (uint16_t * dst,
d = *dst;
src = bilinear_interpolation (tl, tr,
bl, br,
pixman_fixed_to_bilinear_weight(vx),
wb);
interpolation_coord(vx),
wb >> (8 - INTERPOLATION_PRECISION_BITS));
vx += unit_x;
result = over (src, CONVERT_0565_TO_0888 (d));
*dst++ = CONVERT_8888_TO_0565(result);
@ -1290,8 +1248,8 @@ scaled_bilinear_scanline_8888_8888_OVER (uint32_t * dst,
d = *dst;
src = bilinear_interpolation (tl, tr,
bl, br,
pixman_fixed_to_bilinear_weight(vx),
wb);
interpolation_coord(vx),
wb >> (8 - INTERPOLATION_PRECISION_BITS));
vx += unit_x;
*dst++ = over (src, d);
}
@ -1323,8 +1281,8 @@ scaled_bilinear_scanline_565_565_SRC (uint16_t * dst,
CONVERT_0565_TO_8888(tr),
CONVERT_0565_TO_8888(bl),
CONVERT_0565_TO_8888(br),
pixman_fixed_to_bilinear_weight(vx),
wb);
interpolation_coord(vx),
wb >> (8 - INTERPOLATION_PRECISION_BITS));
vx += unit_x;
*dst++ = CONVERT_8888_TO_0565(d);
}
@ -1394,9 +1352,7 @@ scaled_bilinear_scanline_565_565_SRC (uint16_t * dst,
uint16_t bl = src_bottom [pixman_fixed_to_int (vx)];
uint16_t br = src_bottom [pixman_fixed_to_int (vx) + 1];
uint16_t d = bilinear_interpolation_565 (tl, tr, bl, br,
pixman_fixed_to_bilinear_weight(vx),
wb);
uint16_t d = bilinear_interpolation_565 (tl, tr, bl, br, (vx >> 12) & 0xf, wb >> 4);
vx += unit_x;
*dst++ = d;
}
@ -1502,9 +1458,8 @@ fast_composite_tiled_repeat (pixman_implementation_t *imp,
src_bpp = PIXMAN_FORMAT_BPP (src_image->bits.format);
if (src_image->bits.width < REPEAT_MIN_WIDTH &&
(src_bpp == 32 || src_bpp == 16 || src_bpp == 8) &&
!src_image->bits.indexed)
if (src_image->bits.width < REPEAT_MIN_WIDTH &&
(src_bpp == 32 || src_bpp == 16 || src_bpp == 8))
{
sx = src_x;
sx = MOD (sx, src_image->bits.width);
@ -2101,8 +2056,6 @@ static const pixman_fast_path_t c_fast_paths[] =
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, x8b8g8r8, fast_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, b5g6r5, fast_composite_over_8888_0565),
PIXMAN_STD_FAST_PATH (ADD, r5g6b5, null, r5g6b5, fast_composite_add_0565_0565),
PIXMAN_STD_FAST_PATH (ADD, b5g6r5, null, b5g6r5, fast_composite_add_0565_0565),
PIXMAN_STD_FAST_PATH (ADD, a8r8g8b8, null, a8r8g8b8, fast_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8b8g8r8, null, a8b8g8r8, fast_composite_add_8888_8888),
PIXMAN_STD_FAST_PATH (ADD, a8, null, a8, fast_composite_add_8_8),

View File

@ -110,7 +110,7 @@ general_composite_rect (pixman_implementation_t *imp,
pixman_iter_t src_iter, mask_iter, dest_iter;
pixman_combine_32_func_t compose;
pixman_bool_t component_alpha;
iter_flags_t narrow, src_iter_flags;
iter_flags_t narrow, src_flags;
iter_flags_t rgb16;
int Bpp;
int i;
@ -155,14 +155,14 @@ general_composite_rect (pixman_implementation_t *imp,
dest_buffer = mask_buffer + width * Bpp;
/* src iter */
src_iter_flags = narrow | op_flags[op].src | rgb16;
src_flags = narrow | op_flags[op].src | rgb16;
_pixman_implementation_src_iter_init (imp->toplevel, &src_iter, src_image,
src_x, src_y, width, height,
src_buffer, src_iter_flags, info->src_flags);
src_buffer, src_flags);
/* mask iter */
if ((src_iter_flags & (ITER_IGNORE_ALPHA | ITER_IGNORE_RGB)) ==
if ((src_flags & (ITER_IGNORE_ALPHA | ITER_IGNORE_RGB)) ==
(ITER_IGNORE_ALPHA | ITER_IGNORE_RGB))
{
/* If it doesn't matter what the source is, then it doesn't matter
@ -179,12 +179,12 @@ general_composite_rect (pixman_implementation_t *imp,
_pixman_implementation_src_iter_init (
imp->toplevel, &mask_iter, mask_image, mask_x, mask_y, width, height,
mask_buffer, narrow | (component_alpha? 0 : ITER_IGNORE_RGB), info->mask_flags);
mask_buffer, narrow | (component_alpha? 0 : ITER_IGNORE_RGB));
/* dest iter */
_pixman_implementation_dest_iter_init (
imp->toplevel, &dest_iter, dest_image, dest_x, dest_y, width, height,
dest_buffer, narrow | op_flags[op].dst | rgb16, info->dest_flags);
dest_buffer, narrow | op_flags[op].dst | rgb16);
compose = _pixman_implementation_lookup_combiner (
imp->toplevel, op, component_alpha, narrow, !!rgb16);

View File

@ -1,676 +0,0 @@
/*
* Copyright 2010, 2012, Soren Sandmann <sandmann@cs.au.dk>
* Copyright 2010, 2011, 2012, Red Hat, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Author: Soren Sandmann <sandmann@cs.au.dk>
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
#include <stdlib.h>
typedef struct glyph_metrics_t glyph_metrics_t;
typedef struct glyph_t glyph_t;
#define TOMBSTONE ((glyph_t *)0x1)
/* XXX: These numbers are arbitrary---we've never done any measurements.
*/
#define N_GLYPHS_HIGH_WATER (16384)
#define N_GLYPHS_LOW_WATER (8192)
#define HASH_SIZE (2 * N_GLYPHS_HIGH_WATER)
#define HASH_MASK (HASH_SIZE - 1)
struct glyph_t
{
void * font_key;
void * glyph_key;
int origin_x;
int origin_y;
pixman_image_t * image;
pixman_link_t mru_link;
};
struct pixman_glyph_cache_t
{
int n_glyphs;
int n_tombstones;
int freeze_count;
pixman_list_t mru;
glyph_t * glyphs[HASH_SIZE];
};
static void
free_glyph (glyph_t *glyph)
{
pixman_list_unlink (&glyph->mru_link);
pixman_image_unref (glyph->image);
free (glyph);
}
static unsigned int
hash (const void *font_key, const void *glyph_key)
{
size_t key = (size_t)font_key + (size_t)glyph_key;
/* This hash function is based on one found on Thomas Wang's
* web page at
*
* http://www.concentric.net/~Ttwang/tech/inthash.htm
*
*/
key = (key << 15) - key - 1;
key = key ^ (key >> 12);
key = key + (key << 2);
key = key ^ (key >> 4);
key = key + (key << 3) + (key << 11);
key = key ^ (key >> 16);
return key;
}
static glyph_t *
lookup_glyph (pixman_glyph_cache_t *cache,
void *font_key,
void *glyph_key)
{
unsigned idx;
glyph_t *g;
idx = hash (font_key, glyph_key);
while ((g = cache->glyphs[idx++ & HASH_MASK]))
{
if (g != TOMBSTONE &&
g->font_key == font_key &&
g->glyph_key == glyph_key)
{
return g;
}
}
return NULL;
}
static void
insert_glyph (pixman_glyph_cache_t *cache,
glyph_t *glyph)
{
unsigned idx;
glyph_t **loc;
idx = hash (glyph->font_key, glyph->glyph_key);
/* Note: we assume that there is room in the table. If there isn't,
* this will be an infinite loop.
*/
do
{
loc = &cache->glyphs[idx++ & HASH_MASK];
} while (*loc && *loc != TOMBSTONE);
if (*loc == TOMBSTONE)
cache->n_tombstones--;
cache->n_glyphs++;
*loc = glyph;
}
static void
remove_glyph (pixman_glyph_cache_t *cache,
glyph_t *glyph)
{
unsigned idx;
idx = hash (glyph->font_key, glyph->glyph_key);
while (cache->glyphs[idx & HASH_MASK] != glyph)
idx++;
cache->glyphs[idx & HASH_MASK] = TOMBSTONE;
cache->n_tombstones++;
cache->n_glyphs--;
/* Eliminate tombstones if possible */
if (cache->glyphs[(idx + 1) & HASH_MASK] == NULL)
{
while (cache->glyphs[idx & HASH_MASK] == TOMBSTONE)
{
cache->glyphs[idx & HASH_MASK] = NULL;
cache->n_tombstones--;
idx--;
}
}
}
static void
clear_table (pixman_glyph_cache_t *cache)
{
int i;
for (i = 0; i < HASH_SIZE; ++i)
{
glyph_t *glyph = cache->glyphs[i];
if (glyph && glyph != TOMBSTONE)
free_glyph (glyph);
cache->glyphs[i] = NULL;
}
cache->n_glyphs = 0;
cache->n_tombstones = 0;
}
PIXMAN_EXPORT pixman_glyph_cache_t *
pixman_glyph_cache_create (void)
{
pixman_glyph_cache_t *cache;
if (!(cache = malloc (sizeof *cache)))
return NULL;
memset (cache->glyphs, 0, sizeof (cache->glyphs));
cache->n_glyphs = 0;
cache->n_tombstones = 0;
cache->freeze_count = 0;
pixman_list_init (&cache->mru);
return cache;
}
PIXMAN_EXPORT void
pixman_glyph_cache_destroy (pixman_glyph_cache_t *cache)
{
return_if_fail (cache->freeze_count == 0);
clear_table (cache);
free (cache);
}
PIXMAN_EXPORT void
pixman_glyph_cache_freeze (pixman_glyph_cache_t *cache)
{
cache->freeze_count++;
}
PIXMAN_EXPORT void
pixman_glyph_cache_thaw (pixman_glyph_cache_t *cache)
{
if (--cache->freeze_count == 0 &&
cache->n_glyphs + cache->n_tombstones > N_GLYPHS_HIGH_WATER)
{
if (cache->n_tombstones > N_GLYPHS_HIGH_WATER)
{
/* More than half the entries are
* tombstones. Just dump the whole table.
*/
clear_table (cache);
}
while (cache->n_glyphs > N_GLYPHS_LOW_WATER)
{
glyph_t *glyph = CONTAINER_OF (glyph_t, mru_link, cache->mru.tail);
remove_glyph (cache, glyph);
free_glyph (glyph);
}
}
}
PIXMAN_EXPORT const void *
pixman_glyph_cache_lookup (pixman_glyph_cache_t *cache,
void *font_key,
void *glyph_key)
{
return lookup_glyph (cache, font_key, glyph_key);
}
PIXMAN_EXPORT const void *
pixman_glyph_cache_insert (pixman_glyph_cache_t *cache,
void *font_key,
void *glyph_key,
int origin_x,
int origin_y,
pixman_image_t *image)
{
glyph_t *glyph;
int32_t width, height;
return_val_if_fail (cache->freeze_count > 0, NULL);
return_val_if_fail (image->type == BITS, NULL);
width = image->bits.width;
height = image->bits.height;
if (cache->n_glyphs >= HASH_SIZE)
return NULL;
if (!(glyph = malloc (sizeof *glyph)))
return NULL;
glyph->font_key = font_key;
glyph->glyph_key = glyph_key;
glyph->origin_x = origin_x;
glyph->origin_y = origin_y;
if (!(glyph->image = pixman_image_create_bits (
image->bits.format, width, height, NULL, -1)))
{
free (glyph);
return NULL;
}
pixman_image_composite32 (PIXMAN_OP_SRC,
image, NULL, glyph->image, 0, 0, 0, 0, 0, 0,
width, height);
if (PIXMAN_FORMAT_A (glyph->image->bits.format) != 0 &&
PIXMAN_FORMAT_RGB (glyph->image->bits.format) != 0)
{
pixman_image_set_component_alpha (glyph->image, TRUE);
}
pixman_list_prepend (&cache->mru, &glyph->mru_link);
_pixman_image_validate (glyph->image);
insert_glyph (cache, glyph);
return glyph;
}
PIXMAN_EXPORT void
pixman_glyph_cache_remove (pixman_glyph_cache_t *cache,
void *font_key,
void *glyph_key)
{
glyph_t *glyph;
if ((glyph = lookup_glyph (cache, font_key, glyph_key)))
{
remove_glyph (cache, glyph);
free_glyph (glyph);
}
}
PIXMAN_EXPORT void
pixman_glyph_get_extents (pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t *glyphs,
pixman_box32_t *extents)
{
int i;
extents->x1 = extents->y1 = INT32_MAX;
extents->x2 = extents->y2 = INT32_MIN;
for (i = 0; i < n_glyphs; ++i)
{
glyph_t *glyph = (glyph_t *)glyphs[i].glyph;
int x1, y1, x2, y2;
x1 = glyphs[i].x - glyph->origin_x;
y1 = glyphs[i].y - glyph->origin_y;
x2 = glyphs[i].x - glyph->origin_x + glyph->image->bits.width;
y2 = glyphs[i].y - glyph->origin_y + glyph->image->bits.height;
if (x1 < extents->x1)
extents->x1 = x1;
if (y1 < extents->y1)
extents->y1 = y1;
if (x2 > extents->x2)
extents->x2 = x2;
if (y2 > extents->y2)
extents->y2 = y2;
}
}
/* This function returns a format that is suitable for use as a mask for the
* set of glyphs in question.
*/
PIXMAN_EXPORT pixman_format_code_t
pixman_glyph_get_mask_format (pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t * glyphs)
{
pixman_format_code_t format = PIXMAN_a1;
int i;
for (i = 0; i < n_glyphs; ++i)
{
const glyph_t *glyph = glyphs[i].glyph;
pixman_format_code_t glyph_format = glyph->image->bits.format;
if (PIXMAN_FORMAT_TYPE (glyph_format) == PIXMAN_TYPE_A)
{
if (PIXMAN_FORMAT_A (glyph_format) > PIXMAN_FORMAT_A (format))
format = glyph_format;
}
else
{
return PIXMAN_a8r8g8b8;
}
}
return format;
}
static pixman_bool_t
box32_intersect (pixman_box32_t *dest,
const pixman_box32_t *box1,
const pixman_box32_t *box2)
{
dest->x1 = MAX (box1->x1, box2->x1);
dest->y1 = MAX (box1->y1, box2->y1);
dest->x2 = MIN (box1->x2, box2->x2);
dest->y2 = MIN (box1->y2, box2->y2);
return dest->x2 > dest->x1 && dest->y2 > dest->y1;
}
PIXMAN_EXPORT void
pixman_composite_glyphs_no_mask (pixman_op_t op,
pixman_image_t *src,
pixman_image_t *dest,
int32_t src_x,
int32_t src_y,
int32_t dest_x,
int32_t dest_y,
pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t *glyphs)
{
pixman_region32_t region;
pixman_format_code_t glyph_format = PIXMAN_null;
uint32_t glyph_flags = 0;
pixman_format_code_t dest_format;
uint32_t dest_flags;
pixman_composite_func_t func = NULL;
pixman_implementation_t *implementation = NULL;
pixman_composite_info_t info;
int i;
_pixman_image_validate (src);
_pixman_image_validate (dest);
dest_format = dest->common.extended_format_code;
dest_flags = dest->common.flags;
pixman_region32_init (&region);
if (!_pixman_compute_composite_region32 (
&region,
src, NULL, dest,
src_x - dest_x, src_y - dest_y, 0, 0, 0, 0,
dest->bits.width, dest->bits.height))
{
goto out;
}
info.op = op;
info.src_image = src;
info.dest_image = dest;
info.src_flags = src->common.flags;
info.dest_flags = dest->common.flags;
for (i = 0; i < n_glyphs; ++i)
{
glyph_t *glyph = (glyph_t *)glyphs[i].glyph;
pixman_image_t *glyph_img = glyph->image;
pixman_box32_t glyph_box;
pixman_box32_t *pbox;
uint32_t extra = FAST_PATH_SAMPLES_COVER_CLIP_NEAREST;
pixman_box32_t composite_box;
int n;
glyph_box.x1 = dest_x + glyphs[i].x - glyph->origin_x;
glyph_box.y1 = dest_y + glyphs[i].y - glyph->origin_y;
glyph_box.x2 = glyph_box.x1 + glyph->image->bits.width;
glyph_box.y2 = glyph_box.y1 + glyph->image->bits.height;
pbox = pixman_region32_rectangles (&region, &n);
info.mask_image = glyph_img;
while (n--)
{
if (box32_intersect (&composite_box, pbox, &glyph_box))
{
if (glyph_img->common.extended_format_code != glyph_format ||
glyph_img->common.flags != glyph_flags)
{
glyph_format = glyph_img->common.extended_format_code;
glyph_flags = glyph_img->common.flags;
_pixman_lookup_composite_function (
get_implementation(), op,
src->common.extended_format_code, src->common.flags,
glyph_format, glyph_flags | extra,
dest_format, dest_flags,
&implementation, &func);
if (!func)
goto out;
}
info.src_x = src_x + composite_box.x1 - dest_x;
info.src_y = src_y + composite_box.y1 - dest_y;
info.mask_x = composite_box.x1 - (dest_x + glyphs[i].x - glyph->origin_x);
info.mask_y = composite_box.y1 - (dest_y + glyphs[i].y - glyph->origin_y);
info.dest_x = composite_box.x1;
info.dest_y = composite_box.y1;
info.width = composite_box.x2 - composite_box.x1;
info.height = composite_box.y2 - composite_box.y1;
info.mask_flags = glyph_flags;
func (implementation, &info);
}
pbox++;
}
pixman_list_move_to_front (&cache->mru, &glyph->mru_link);
}
out:
pixman_region32_fini (&region);
}
static void
add_glyphs (pixman_glyph_cache_t *cache,
pixman_image_t *dest,
int off_x, int off_y,
int n_glyphs, pixman_glyph_t *glyphs)
{
pixman_format_code_t glyph_format = PIXMAN_null;
uint32_t glyph_flags = 0;
pixman_composite_func_t func = NULL;
pixman_implementation_t *implementation = NULL;
uint32_t dest_format;
uint32_t dest_flags;
pixman_box32_t dest_box;
pixman_composite_info_t info;
pixman_image_t *white_img = NULL;
pixman_bool_t white_src = FALSE;
int i;
_pixman_image_validate (dest);
dest_format = dest->common.extended_format_code;
dest_flags = dest->common.flags;
info.op = PIXMAN_OP_ADD;
info.dest_image = dest;
info.src_x = 0;
info.src_y = 0;
info.dest_flags = dest_flags;
dest_box.x1 = 0;
dest_box.y1 = 0;
dest_box.x2 = dest->bits.width;
dest_box.y2 = dest->bits.height;
for (i = 0; i < n_glyphs; ++i)
{
glyph_t *glyph = (glyph_t *)glyphs[i].glyph;
pixman_image_t *glyph_img = glyph->image;
pixman_box32_t glyph_box;
pixman_box32_t composite_box;
if (glyph_img->common.extended_format_code != glyph_format ||
glyph_img->common.flags != glyph_flags)
{
pixman_format_code_t src_format, mask_format;
glyph_format = glyph_img->common.extended_format_code;
glyph_flags = glyph_img->common.flags;
if (glyph_format == dest->bits.format)
{
src_format = glyph_format;
mask_format = PIXMAN_null;
info.src_flags = glyph_flags | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST;
info.mask_flags = FAST_PATH_IS_OPAQUE;
info.mask_image = NULL;
white_src = FALSE;
}
else
{
if (!white_img)
{
pixman_color_t white = { 0xffff, 0xffff, 0xffff, 0xffff };
if (!(white_img = pixman_image_create_solid_fill (&white)))
goto out;
_pixman_image_validate (white_img);
}
src_format = PIXMAN_solid;
mask_format = glyph_format;
info.src_flags = white_img->common.flags;
info.mask_flags = glyph_flags | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST;
info.src_image = white_img;
white_src = TRUE;
}
_pixman_lookup_composite_function (
get_implementation(), PIXMAN_OP_ADD,
src_format, info.src_flags,
mask_format, info.mask_flags,
dest_format, dest_flags,
&implementation, &func);
if (!func)
goto out;
}
glyph_box.x1 = glyphs[i].x - glyph->origin_x + off_x;
glyph_box.y1 = glyphs[i].y - glyph->origin_y + off_y;
glyph_box.x2 = glyph_box.x1 + glyph->image->bits.width;
glyph_box.y2 = glyph_box.y1 + glyph->image->bits.height;
if (box32_intersect (&composite_box, &glyph_box, &dest_box))
{
int src_x = composite_box.x1 - glyph_box.x1;
int src_y = composite_box.y1 - glyph_box.y1;
if (white_src)
info.mask_image = glyph_img;
else
info.src_image = glyph_img;
info.mask_x = info.src_x = src_x;
info.mask_y = info.src_y = src_y;
info.dest_x = composite_box.x1;
info.dest_y = composite_box.y1;
info.width = composite_box.x2 - composite_box.x1;
info.height = composite_box.y2 - composite_box.y1;
func (implementation, &info);
pixman_list_move_to_front (&cache->mru, &glyph->mru_link);
}
}
out:
if (white_img)
pixman_image_unref (white_img);
}
/* Conceptually, for each glyph, (white IN glyph) is PIXMAN_OP_ADDed to an
* infinitely big mask image at the position such that the glyph origin point
* is positioned at the (glyphs[i].x, glyphs[i].y) point.
*
* Then (mask_x, mask_y) in the infinite mask and (src_x, src_y) in the source
* image are both aligned with (dest_x, dest_y) in the destination image. Then
* these three images are composited within the
*
* (dest_x, dest_y, dst_x + width, dst_y + height)
*
* rectangle.
*
* TODO:
* - Trim the mask to the destination clip/image?
* - Trim composite region based on sources, when the op ignores 0s.
*/
PIXMAN_EXPORT void
pixman_composite_glyphs (pixman_op_t op,
pixman_image_t *src,
pixman_image_t *dest,
pixman_format_code_t mask_format,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height,
pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t *glyphs)
{
pixman_image_t *mask;
if (!(mask = pixman_image_create_bits (mask_format, width, height, NULL, -1)))
return;
if (PIXMAN_FORMAT_A (mask_format) != 0 &&
PIXMAN_FORMAT_RGB (mask_format) != 0)
{
pixman_image_set_component_alpha (mask, TRUE);
}
add_glyphs (cache, mask, - mask_x, - mask_y, n_glyphs, glyphs);
pixman_image_composite32 (op, src, mask, dest,
src_x, src_y,
0, 0,
dest_x, dest_y,
width, height);
pixman_image_unref (mask);
}

View File

@ -883,38 +883,16 @@ _pixman_image_get_solid (pixman_implementation_t *imp,
pixman_format_code_t format)
{
uint32_t result;
pixman_iter_t iter;
if (image->type == SOLID)
{
result = image->solid.color_32;
}
else if (image->type == BITS)
{
if (image->bits.format == PIXMAN_a8r8g8b8)
result = image->bits.bits[0];
else if (image->bits.format == PIXMAN_x8r8g8b8)
result = image->bits.bits[0] | 0xff000000;
else if (image->bits.format == PIXMAN_a8)
result = (*(uint8_t *)image->bits.bits) << 24;
else
goto otherwise;
}
else
{
pixman_iter_t iter;
_pixman_implementation_src_iter_init (
imp, &iter, image, 0, 0, 1, 1,
(uint8_t *)&result, ITER_NARROW);
otherwise:
_pixman_implementation_src_iter_init (
imp, &iter, image, 0, 0, 1, 1,
(uint8_t *)&result,
ITER_NARROW, image->common.flags);
result = *iter.get_scanline (&iter, NULL);
}
result = *iter.get_scanline (&iter, NULL);
/* If necessary, convert RGB <--> BGR. */
if (PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_ARGB
&& PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_ARGB_SRGB)
if (PIXMAN_FORMAT_TYPE (format) != PIXMAN_TYPE_ARGB)
{
result = (((result & 0xff000000) >> 0) |
((result & 0x00ff0000) >> 16) |

View File

@ -191,8 +191,7 @@ _pixman_implementation_src_iter_init (pixman_implementation_t *imp,
int width,
int height,
uint8_t *buffer,
iter_flags_t iter_flags,
uint32_t image_flags)
iter_flags_t flags)
{
iter->image = image;
iter->buffer = (uint32_t *)buffer;
@ -200,8 +199,7 @@ _pixman_implementation_src_iter_init (pixman_implementation_t *imp,
iter->y = y;
iter->width = width;
iter->height = height;
iter->iter_flags = iter_flags;
iter->image_flags = image_flags;
iter->flags = flags;
(*imp->src_iter_init) (imp, iter);
}
@ -215,8 +213,7 @@ _pixman_implementation_dest_iter_init (pixman_implementation_t *imp,
int width,
int height,
uint8_t *buffer,
iter_flags_t iter_flags,
uint32_t image_flags)
iter_flags_t flags)
{
iter->image = image;
iter->buffer = (uint32_t *)buffer;
@ -224,59 +221,7 @@ _pixman_implementation_dest_iter_init (pixman_implementation_t *imp,
iter->y = y;
iter->width = width;
iter->height = height;
iter->iter_flags = iter_flags;
iter->image_flags = image_flags;
iter->flags = flags;
(*imp->dest_iter_init) (imp, iter);
}
pixman_bool_t
_pixman_disabled (const char *name)
{
const char *env;
if ((env = getenv ("PIXMAN_DISABLE")))
{
do
{
const char *end;
int len;
if ((end = strchr (env, ' ')))
len = end - env;
else
len = strlen (env);
if (strlen (name) == len && strncmp (name, env, len) == 0)
{
printf ("pixman: Disabled %s implementation\n", name);
return TRUE;
}
env += len;
}
while (*env++);
}
return FALSE;
}
pixman_implementation_t *
_pixman_choose_implementation (void)
{
pixman_implementation_t *imp;
imp = _pixman_implementation_create_general();
if (!_pixman_disabled ("fast"))
imp = _pixman_implementation_create_fast_path (imp);
imp = _pixman_x86_get_implementations (imp);
imp = _pixman_arm_get_implementations (imp);
imp = _pixman_ppc_get_implementations (imp);
imp = _pixman_mips_get_implementations (imp);
imp = _pixman_implementation_create_noop (imp);
return imp;
}

View File

@ -81,13 +81,27 @@ repeat (pixman_repeat_t repeat, int *c, int size)
return TRUE;
}
static force_inline int
pixman_fixed_to_bilinear_weight (pixman_fixed_t x)
#ifdef MOZ_GFX_OPTIMIZE_MOBILE
#define LOW_QUALITY_INTERPOLATION
#define LOWER_QUALITY_INTERPOLATION
#endif
#ifdef LOW_QUALITY_INTERPOLATION
#define INTERPOLATION_PRECISION_BITS 4
#else
#define INTERPOLATION_PRECISION_BITS 8
#endif
static force_inline int32_t
interpolation_coord(pixman_fixed_t t)
{
return (x >> (16 - BILINEAR_INTERPOLATION_BITS)) &
((1 << BILINEAR_INTERPOLATION_BITS) - 1);
#ifdef LOW_QUALITY_INTERPOLATION
return (t >> 12) & 0xf;
#else
return (t >> 8) & 0xff;
#endif
}
#if SIZEOF_LONG > 4
static force_inline uint32_t
@ -99,9 +113,6 @@ bilinear_interpolation (uint32_t tl, uint32_t tr,
uint64_t tl64, tr64, bl64, br64;
uint64_t f, r;
distx <<= (8 - BILINEAR_INTERPOLATION_BITS);
disty <<= (8 - BILINEAR_INTERPOLATION_BITS);
distxy = distx * disty;
distxiy = distx * (256 - disty);
distixy = (256 - distx) * disty;
@ -173,9 +184,6 @@ bilinear_interpolation (uint32_t tl, uint32_t tr,
int distxy, distxiy, distixy, distixiy;
uint32_t f, r;
distx <<= (8 - BILINEAR_INTERPOLATION_BITS);
disty <<= (8 - BILINEAR_INTERPOLATION_BITS);
distxy = distx * disty;
distxiy = (distx << 8) - distxy; /* distx * (256 - disty) */
distixy = (disty << 8) - distxy; /* disty * (256 - distx) */
@ -806,14 +814,12 @@ bilinear_pad_repeat_get_scanline_bounds (int32_t source_image_width,
* all source pixels are fetched from zero padding
* zone for NONE repeat
*
* Note: normally the sum of 'weight_top' and 'weight_bottom' is equal to
* BILINEAR_INTERPOLATION_RANGE, but sometimes it may be less than that
* for NONE repeat when handling fuzzy antialiased top or bottom image
* edges. Also both top and bottom weight variables are guaranteed to
* have value, which is less than BILINEAR_INTERPOLATION_RANGE.
* For example, the weights can fit into unsigned byte or be used
* with 8-bit SIMD multiplication instructions for 8-bit interpolation
* precision.
* Note: normally the sum of 'weight_top' and 'weight_bottom' is equal to 256,
* but sometimes it may be less than that for NONE repeat when handling
* fuzzy antialiased top or bottom image edges. Also both top and
* bottom weight variables are guaranteed to have value in 0-255
* range and can fit into unsigned byte or be used with 8-bit SIMD
* multiplication instructions.
*/
/* Replace a single "scanline_func" with "fetch_func" & "op_func" to allow optional
@ -968,18 +974,18 @@ fast_composite_scaled_bilinear ## scale_func_name (pixman_implementation_t *imp,
} \
\
y1 = pixman_fixed_to_int (vy); \
weight2 = pixman_fixed_to_bilinear_weight (vy); \
weight2 = (vy >> 8) & 0xff; \
if (weight2) \
{ \
/* both weight1 and weight2 are smaller than BILINEAR_INTERPOLATION_RANGE */ \
/* normal case, both row weights are in 0-255 range and fit unsigned byte */ \
y2 = y1 + 1; \
weight1 = BILINEAR_INTERPOLATION_RANGE - weight2; \
weight1 = 256 - weight2; \
} \
else \
{ \
/* set both top and bottom row to the same scanline and tweak weights */ \
/* set both top and bottom row to the same scanline, and weights to 128+128 */ \
y2 = y1; \
weight1 = weight2 = BILINEAR_INTERPOLATION_RANGE / 2; \
weight1 = weight2 = 128; \
} \
vy += unit_y; \
if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \

View File

@ -398,9 +398,9 @@ _pixman_linear_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter)
if (0 && linear_gradient_is_horizontal (
iter->image, iter->x, iter->y, iter->width, iter->height))
{
if (iter->iter_flags & ITER_16)
if (iter->flags & ITER_16)
linear_get_scanline_16 (iter, NULL);
else if (iter->iter_flags & ITER_NARROW)
else if (iter->flags & ITER_NARROW)
linear_get_scanline_narrow (iter, NULL);
else
linear_get_scanline_wide (iter, NULL);
@ -409,9 +409,9 @@ _pixman_linear_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter)
}
else
{
if (iter->iter_flags & ITER_16)
if (iter->flags & ITER_16)
iter->get_scanline = linear_get_scanline_16;
else if (iter->iter_flags & ITER_NARROW)
else if (iter->flags & ITER_NARROW)
iter->get_scanline = linear_get_scanline_narrow;
else
iter->get_scanline = linear_get_scanline_wide;

View File

@ -25,7 +25,7 @@
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#include "config.h"
#endif
#include <math.h>
@ -471,8 +471,8 @@ pixman_f_transform_invert (struct pixman_f_transform * dst,
{
double det;
int i, j;
static const int a[3] = { 2, 2, 1 };
static const int b[3] = { 1, 0, 0 };
static int a[3] = { 2, 2, 1 };
static int b[3] = { 1, 0, 0 };
det = 0;
for (i = 0; i < 3; i++)

File diff suppressed because it is too large Load Diff

View File

@ -1,659 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nemanja Lukic (nlukic@mips.com)
*/
#ifndef PIXMAN_MIPS_DSPR2_ASM_H
#define PIXMAN_MIPS_DSPR2_ASM_H
#define zero $0
#define AT $1
#define v0 $2
#define v1 $3
#define a0 $4
#define a1 $5
#define a2 $6
#define a3 $7
#define t0 $8
#define t1 $9
#define t2 $10
#define t3 $11
#define t4 $12
#define t5 $13
#define t6 $14
#define t7 $15
#define s0 $16
#define s1 $17
#define s2 $18
#define s3 $19
#define s4 $20
#define s5 $21
#define s6 $22
#define s7 $23
#define t8 $24
#define t9 $25
#define k0 $26
#define k1 $27
#define gp $28
#define sp $29
#define fp $30
#define s8 $30
#define ra $31
/*
* LEAF_MIPS32R2 - declare leaf routine for MIPS32r2
*/
#define LEAF_MIPS32R2(symbol) \
.globl symbol; \
.align 2; \
.type symbol, @function; \
.ent symbol, 0; \
symbol: .frame sp, 0, ra; \
.set push; \
.set arch=mips32r2; \
.set noreorder; \
.set noat;
/*
* LEAF_MIPS32R2 - declare leaf routine for MIPS DSPr2
*/
#define LEAF_MIPS_DSPR2(symbol) \
LEAF_MIPS32R2(symbol) \
.set dspr2;
/*
* END - mark end of function
*/
#define END(function) \
.set pop; \
.end function; \
.size function,.-function
/*
* Checks if stack offset is big enough for storing/restoring regs_num
* number of register to/from stack. Stack offset must be greater than
* or equal to the number of bytes needed for storing registers (regs_num*4).
* Since MIPS ABI allows usage of first 16 bytes of stack frame (this is
* preserved for input arguments of the functions, already stored in a0-a3),
* stack size can be further optimized by utilizing this space.
*/
.macro CHECK_STACK_OFFSET regs_num, stack_offset
.if \stack_offset < \regs_num * 4 - 16
.error "Stack offset too small."
.endif
.endm
/*
* Saves set of registers on stack. Maximum number of registers that
* can be saved on stack is limitted to 14 (a0-a3, v0-v1 and s0-s7).
* Stack offset is number of bytes that are added to stack pointer (sp)
* before registers are pushed in order to provide enough space on stack
* (offset must be multiple of 4, and must be big enough, as described by
* CHECK_STACK_OFFSET macro). This macro is intended to be used in
* combination with RESTORE_REGS_FROM_STACK macro. Example:
* SAVE_REGS_ON_STACK 4, v0, v1, s0, s1
* RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1
*/
.macro SAVE_REGS_ON_STACK stack_offset = 0, r1, \
r2 = 0, r3 = 0, r4 = 0, \
r5 = 0, r6 = 0, r7 = 0, \
r8 = 0, r9 = 0, r10 = 0, \
r11 = 0, r12 = 0, r13 = 0, \
r14 = 0
.if (\stack_offset < 0) || (\stack_offset - (\stack_offset / 4) * 4)
.error "Stack offset must be pozitive and multiple of 4."
.endif
.if \stack_offset != 0
addiu sp, sp, -\stack_offset
.endif
sw \r1, 0(sp)
.if \r2 != 0
sw \r2, 4(sp)
.endif
.if \r3 != 0
sw \r3, 8(sp)
.endif
.if \r4 != 0
sw \r4, 12(sp)
.endif
.if \r5 != 0
CHECK_STACK_OFFSET 5, \stack_offset
sw \r5, 16(sp)
.endif
.if \r6 != 0
CHECK_STACK_OFFSET 6, \stack_offset
sw \r6, 20(sp)
.endif
.if \r7 != 0
CHECK_STACK_OFFSET 7, \stack_offset
sw \r7, 24(sp)
.endif
.if \r8 != 0
CHECK_STACK_OFFSET 8, \stack_offset
sw \r8, 28(sp)
.endif
.if \r9 != 0
CHECK_STACK_OFFSET 9, \stack_offset
sw \r9, 32(sp)
.endif
.if \r10 != 0
CHECK_STACK_OFFSET 10, \stack_offset
sw \r10, 36(sp)
.endif
.if \r11 != 0
CHECK_STACK_OFFSET 11, \stack_offset
sw \r11, 40(sp)
.endif
.if \r12 != 0
CHECK_STACK_OFFSET 12, \stack_offset
sw \r12, 44(sp)
.endif
.if \r13 != 0
CHECK_STACK_OFFSET 13, \stack_offset
sw \r13, 48(sp)
.endif
.if \r14 != 0
CHECK_STACK_OFFSET 14, \stack_offset
sw \r14, 52(sp)
.endif
.endm
/*
* Restores set of registers from stack. Maximum number of registers that
* can be restored from stack is limitted to 14 (a0-a3, v0-v1 and s0-s7).
* Stack offset is number of bytes that are added to stack pointer (sp)
* after registers are restored (offset must be multiple of 4, and must
* be big enough, as described by CHECK_STACK_OFFSET macro). This macro is
* intended to be used in combination with RESTORE_REGS_FROM_STACK macro.
* Example:
* SAVE_REGS_ON_STACK 4, v0, v1, s0, s1
* RESTORE_REGS_FROM_STACK 4, v0, v1, s0, s1
*/
.macro RESTORE_REGS_FROM_STACK stack_offset = 0, r1, \
r2 = 0, r3 = 0, r4 = 0, \
r5 = 0, r6 = 0, r7 = 0, \
r8 = 0, r9 = 0, r10 = 0, \
r11 = 0, r12 = 0, r13 = 0, \
r14 = 0
.if (\stack_offset < 0) || (\stack_offset - (\stack_offset/4)*4)
.error "Stack offset must be pozitive and multiple of 4."
.endif
lw \r1, 0(sp)
.if \r2 != 0
lw \r2, 4(sp)
.endif
.if \r3 != 0
lw \r3, 8(sp)
.endif
.if \r4 != 0
lw \r4, 12(sp)
.endif
.if \r5 != 0
CHECK_STACK_OFFSET 5, \stack_offset
lw \r5, 16(sp)
.endif
.if \r6 != 0
CHECK_STACK_OFFSET 6, \stack_offset
lw \r6, 20(sp)
.endif
.if \r7 != 0
CHECK_STACK_OFFSET 7, \stack_offset
lw \r7, 24(sp)
.endif
.if \r8 != 0
CHECK_STACK_OFFSET 8, \stack_offset
lw \r8, 28(sp)
.endif
.if \r9 != 0
CHECK_STACK_OFFSET 9, \stack_offset
lw \r9, 32(sp)
.endif
.if \r10 != 0
CHECK_STACK_OFFSET 10, \stack_offset
lw \r10, 36(sp)
.endif
.if \r11 != 0
CHECK_STACK_OFFSET 11, \stack_offset
lw \r11, 40(sp)
.endif
.if \r12 != 0
CHECK_STACK_OFFSET 12, \stack_offset
lw \r12, 44(sp)
.endif
.if \r13 != 0
CHECK_STACK_OFFSET 13, \stack_offset
lw \r13, 48(sp)
.endif
.if \r14 != 0
CHECK_STACK_OFFSET 14, \stack_offset
lw \r14, 52(sp)
.endif
.if \stack_offset != 0
addiu sp, sp, \stack_offset
.endif
.endm
/*
* Conversion of single r5g6b5 pixel (in_565) to single a8r8g8b8 pixel
* returned in (out_8888) register. Requires two temporary registers
* (scratch1 and scratch2).
*/
.macro CONVERT_1x0565_TO_1x8888 in_565, \
out_8888, \
scratch1, scratch2
lui \out_8888, 0xff00
sll \scratch1, \in_565, 0x3
andi \scratch2, \scratch1, 0xff
ext \scratch1, \in_565, 0x2, 0x3
or \scratch1, \scratch2, \scratch1
or \out_8888, \out_8888, \scratch1
sll \scratch1, \in_565, 0x5
andi \scratch1, \scratch1, 0xfc00
srl \scratch2, \in_565, 0x1
andi \scratch2, \scratch2, 0x300
or \scratch2, \scratch1, \scratch2
or \out_8888, \out_8888, \scratch2
andi \scratch1, \in_565, 0xf800
srl \scratch2, \scratch1, 0x5
andi \scratch2, \scratch2, 0xff00
or \scratch1, \scratch1, \scratch2
sll \scratch1, \scratch1, 0x8
or \out_8888, \out_8888, \scratch1
.endm
/*
* Conversion of two r5g6b5 pixels (in1_565 and in2_565) to two a8r8g8b8 pixels
* returned in (out1_8888 and out2_8888) registers. Requires four scratch
* registers (scratch1 ... scratch4). It also requires maskG and maskB for
* color component extractions. These masks must have following values:
* li maskG, 0x07e007e0
* li maskB, 0x001F001F
*/
.macro CONVERT_2x0565_TO_2x8888 in1_565, in2_565, \
out1_8888, out2_8888, \
maskG, maskB, \
scratch1, scratch2, scratch3, scratch4
sll \scratch1, \in1_565, 16
or \scratch1, \scratch1, \in2_565
lui \out2_8888, 0xff00
ori \out2_8888, \out2_8888, 0xff00
shrl.ph \scratch2, \scratch1, 11
and \scratch3, \scratch1, \maskG
shra.ph \scratch4, \scratch2, 2
shll.ph \scratch2, \scratch2, 3
shll.ph \scratch3, \scratch3, 5
or \scratch2, \scratch2, \scratch4
shrl.qb \scratch4, \scratch3, 6
or \out2_8888, \out2_8888, \scratch2
or \scratch3, \scratch3, \scratch4
and \scratch1, \scratch1, \maskB
shll.ph \scratch2, \scratch1, 3
shra.ph \scratch4, \scratch1, 2
or \scratch2, \scratch2, \scratch4
or \scratch3, \scratch2, \scratch3
precrq.ph.w \out1_8888, \out2_8888, \scratch3
precr_sra.ph.w \out2_8888, \scratch3, 0
.endm
/*
* Conversion of single a8r8g8b8 pixel (in_8888) to single r5g6b5 pixel
* returned in (out_565) register. Requires two temporary registers
* (scratch1 and scratch2).
*/
.macro CONVERT_1x8888_TO_1x0565 in_8888, \
out_565, \
scratch1, scratch2
ext \out_565, \in_8888, 0x3, 0x5
srl \scratch1, \in_8888, 0x5
andi \scratch1, \scratch1, 0x07e0
srl \scratch2, \in_8888, 0x8
andi \scratch2, \scratch2, 0xf800
or \out_565, \out_565, \scratch1
or \out_565, \out_565, \scratch2
.endm
/*
* Conversion of two a8r8g8b8 pixels (in1_8888 and in2_8888) to two r5g6b5
* pixels returned in (out1_565 and out2_565) registers. Requires two temporary
* registers (scratch1 and scratch2). It also requires maskR, maskG and maskB
* for color component extractions. These masks must have following values:
* li maskR, 0xf800f800
* li maskG, 0x07e007e0
* li maskB, 0x001F001F
* Value of input register in2_8888 is lost.
*/
.macro CONVERT_2x8888_TO_2x0565 in1_8888, in2_8888, \
out1_565, out2_565, \
maskR, maskG, maskB, \
scratch1, scratch2
precrq.ph.w \scratch1, \in2_8888, \in1_8888
precr_sra.ph.w \in2_8888, \in1_8888, 0
shll.ph \scratch1, \scratch1, 8
srl \in2_8888, \in2_8888, 3
and \scratch2, \in2_8888, \maskB
and \scratch1, \scratch1, \maskR
srl \in2_8888, \in2_8888, 2
and \out2_565, \in2_8888, \maskG
or \out2_565, \out2_565, \scratch2
or \out1_565, \out2_565, \scratch1
srl \out2_565, \out1_565, 16
.endm
/*
* Multiply pixel (a8) with single pixel (a8r8g8b8). It requires maskLSR needed
* for rounding process. maskLSR must have following value:
* li maskLSR, 0x00ff00ff
*/
.macro MIPS_UN8x4_MUL_UN8 s_8888, \
m_8, \
d_8888, \
maskLSR, \
scratch1, scratch2, scratch3
replv.ph \m_8, \m_8 /* 0 | M | 0 | M */
muleu_s.ph.qbl \scratch1, \s_8888, \m_8 /* A*M | R*M */
muleu_s.ph.qbr \scratch2, \s_8888, \m_8 /* G*M | B*M */
shra_r.ph \scratch3, \scratch1, 8
shra_r.ph \d_8888, \scratch2, 8
and \scratch3, \scratch3, \maskLSR /* 0 |A*M| 0 |R*M */
and \d_8888, \d_8888, \maskLSR /* 0 |G*M| 0 |B*M */
addq.ph \scratch1, \scratch1, \scratch3 /* A*M+A*M | R*M+R*M */
addq.ph \scratch2, \scratch2, \d_8888 /* G*M+G*M | B*M+B*M */
shra_r.ph \scratch1, \scratch1, 8
shra_r.ph \scratch2, \scratch2, 8
precr.qb.ph \d_8888, \scratch1, \scratch2
.endm
/*
* Multiply two pixels (a8) with two pixels (a8r8g8b8). It requires maskLSR
* needed for rounding process. maskLSR must have following value:
* li maskLSR, 0x00ff00ff
*/
.macro MIPS_2xUN8x4_MUL_2xUN8 s1_8888, \
s2_8888, \
m1_8, \
m2_8, \
d1_8888, \
d2_8888, \
maskLSR, \
scratch1, scratch2, scratch3, \
scratch4, scratch5, scratch6
replv.ph \m1_8, \m1_8 /* 0 | M1 | 0 | M1 */
replv.ph \m2_8, \m2_8 /* 0 | M2 | 0 | M2 */
muleu_s.ph.qbl \scratch1, \s1_8888, \m1_8 /* A1*M1 | R1*M1 */
muleu_s.ph.qbr \scratch2, \s1_8888, \m1_8 /* G1*M1 | B1*M1 */
muleu_s.ph.qbl \scratch3, \s2_8888, \m2_8 /* A2*M2 | R2*M2 */
muleu_s.ph.qbr \scratch4, \s2_8888, \m2_8 /* G2*M2 | B2*M2 */
shra_r.ph \scratch5, \scratch1, 8
shra_r.ph \d1_8888, \scratch2, 8
shra_r.ph \scratch6, \scratch3, 8
shra_r.ph \d2_8888, \scratch4, 8
and \scratch5, \scratch5, \maskLSR /* 0 |A1*M1| 0 |R1*M1 */
and \d1_8888, \d1_8888, \maskLSR /* 0 |G1*M1| 0 |B1*M1 */
and \scratch6, \scratch6, \maskLSR /* 0 |A2*M2| 0 |R2*M2 */
and \d2_8888, \d2_8888, \maskLSR /* 0 |G2*M2| 0 |B2*M2 */
addq.ph \scratch1, \scratch1, \scratch5
addq.ph \scratch2, \scratch2, \d1_8888
addq.ph \scratch3, \scratch3, \scratch6
addq.ph \scratch4, \scratch4, \d2_8888
shra_r.ph \scratch1, \scratch1, 8
shra_r.ph \scratch2, \scratch2, 8
shra_r.ph \scratch3, \scratch3, 8
shra_r.ph \scratch4, \scratch4, 8
precr.qb.ph \d1_8888, \scratch1, \scratch2
precr.qb.ph \d2_8888, \scratch3, \scratch4
.endm
/*
* Multiply pixel (a8r8g8b8) with single pixel (a8r8g8b8). It requires maskLSR
* needed for rounding process. maskLSR must have following value:
* li maskLSR, 0x00ff00ff
*/
.macro MIPS_UN8x4_MUL_UN8x4 s_8888, \
m_8888, \
d_8888, \
maskLSR, \
scratch1, scratch2, scratch3, scratch4
preceu.ph.qbl \scratch1, \m_8888 /* 0 | A | 0 | R */
preceu.ph.qbr \scratch2, \m_8888 /* 0 | G | 0 | B */
muleu_s.ph.qbl \scratch3, \s_8888, \scratch1 /* A*A | R*R */
muleu_s.ph.qbr \scratch4, \s_8888, \scratch2 /* G*G | B*B */
shra_r.ph \scratch1, \scratch3, 8
shra_r.ph \scratch2, \scratch4, 8
and \scratch1, \scratch1, \maskLSR /* 0 |A*A| 0 |R*R */
and \scratch2, \scratch2, \maskLSR /* 0 |G*G| 0 |B*B */
addq.ph \scratch1, \scratch1, \scratch3
addq.ph \scratch2, \scratch2, \scratch4
shra_r.ph \scratch1, \scratch1, 8
shra_r.ph \scratch2, \scratch2, 8
precr.qb.ph \d_8888, \scratch1, \scratch2
.endm
/*
* Multiply two pixels (a8r8g8b8) with two pixels (a8r8g8b8). It requires
* maskLSR needed for rounding process. maskLSR must have following value:
* li maskLSR, 0x00ff00ff
*/
.macro MIPS_2xUN8x4_MUL_2xUN8x4 s1_8888, \
s2_8888, \
m1_8888, \
m2_8888, \
d1_8888, \
d2_8888, \
maskLSR, \
scratch1, scratch2, scratch3, \
scratch4, scratch5, scratch6
preceu.ph.qbl \scratch1, \m1_8888 /* 0 | A | 0 | R */
preceu.ph.qbr \scratch2, \m1_8888 /* 0 | G | 0 | B */
preceu.ph.qbl \scratch3, \m2_8888 /* 0 | A | 0 | R */
preceu.ph.qbr \scratch4, \m2_8888 /* 0 | G | 0 | B */
muleu_s.ph.qbl \scratch5, \s1_8888, \scratch1 /* A*A | R*R */
muleu_s.ph.qbr \scratch6, \s1_8888, \scratch2 /* G*G | B*B */
muleu_s.ph.qbl \scratch1, \s2_8888, \scratch3 /* A*A | R*R */
muleu_s.ph.qbr \scratch2, \s2_8888, \scratch4 /* G*G | B*B */
shra_r.ph \scratch3, \scratch5, 8
shra_r.ph \scratch4, \scratch6, 8
shra_r.ph \d1_8888, \scratch1, 8
shra_r.ph \d2_8888, \scratch2, 8
and \scratch3, \scratch3, \maskLSR /* 0 |A*A| 0 |R*R */
and \scratch4, \scratch4, \maskLSR /* 0 |G*G| 0 |B*B */
and \d1_8888, \d1_8888, \maskLSR /* 0 |A*A| 0 |R*R */
and \d2_8888, \d2_8888, \maskLSR /* 0 |G*G| 0 |B*B */
addq.ph \scratch3, \scratch3, \scratch5
addq.ph \scratch4, \scratch4, \scratch6
addq.ph \d1_8888, \d1_8888, \scratch1
addq.ph \d2_8888, \d2_8888, \scratch2
shra_r.ph \scratch3, \scratch3, 8
shra_r.ph \scratch4, \scratch4, 8
shra_r.ph \scratch5, \d1_8888, 8
shra_r.ph \scratch6, \d2_8888, 8
precr.qb.ph \d1_8888, \scratch3, \scratch4
precr.qb.ph \d2_8888, \scratch5, \scratch6
.endm
/*
* OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8
* destination pixel (d_8888) using a8 mask (m_8). It also requires maskLSR
* needed for rounding process. maskLSR must have following value:
* li maskLSR, 0x00ff00ff
*/
.macro OVER_8888_8_8888 s_8888, \
m_8, \
d_8888, \
out_8888, \
maskLSR, \
scratch1, scratch2, scratch3, scratch4
MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \
\scratch1, \maskLSR, \
\scratch2, \scratch3, \scratch4
not \scratch2, \scratch1
srl \scratch2, \scratch2, 24
MIPS_UN8x4_MUL_UN8 \d_8888, \scratch2, \
\d_8888, \maskLSR, \
\scratch3, \scratch4, \out_8888
addu_s.qb \out_8888, \d_8888, \scratch1
.endm
/*
* OVER operation on two a8r8g8b8 source pixels (s1_8888 and s2_8888) and two
* a8r8g8b8 destination pixels (d1_8888 and d2_8888) using a8 masks (m1_8 and
* m2_8). It also requires maskLSR needed for rounding process. maskLSR must
* have following value:
* li maskLSR, 0x00ff00ff
*/
.macro OVER_2x8888_2x8_2x8888 s1_8888, \
s2_8888, \
m1_8, \
m2_8, \
d1_8888, \
d2_8888, \
out1_8888, \
out2_8888, \
maskLSR, \
scratch1, scratch2, scratch3, \
scratch4, scratch5, scratch6
MIPS_2xUN8x4_MUL_2xUN8 \s1_8888, \s2_8888, \
\m1_8, \m2_8, \
\scratch1, \scratch2, \
\maskLSR, \
\scratch3, \scratch4, \out1_8888, \
\out2_8888, \scratch5, \scratch6
not \scratch3, \scratch1
srl \scratch3, \scratch3, 24
not \scratch4, \scratch2
srl \scratch4, \scratch4, 24
MIPS_2xUN8x4_MUL_2xUN8 \d1_8888, \d2_8888, \
\scratch3, \scratch4, \
\d1_8888, \d2_8888, \
\maskLSR, \
\scratch5, \scratch6, \out1_8888, \
\out2_8888, \scratch3, \scratch4
addu_s.qb \out1_8888, \d1_8888, \scratch1
addu_s.qb \out2_8888, \d2_8888, \scratch2
.endm
/*
* OVER operation on single a8r8g8b8 source pixel (s_8888) and single a8r8g8b8
* destination pixel (d_8888). It also requires maskLSR needed for rounding
* process. maskLSR must have following value:
* li maskLSR, 0x00ff00ff
*/
.macro OVER_8888_8888 s_8888, \
d_8888, \
out_8888, \
maskLSR, \
scratch1, scratch2, scratch3, scratch4
not \scratch1, \s_8888
srl \scratch1, \scratch1, 24
MIPS_UN8x4_MUL_UN8 \d_8888, \scratch1, \
\out_8888, \maskLSR, \
\scratch2, \scratch3, \scratch4
addu_s.qb \out_8888, \out_8888, \s_8888
.endm
.macro MIPS_UN8x4_MUL_UN8_ADD_UN8x4 s_8888, \
m_8, \
d_8888, \
out_8888, \
maskLSR, \
scratch1, scratch2, scratch3
MIPS_UN8x4_MUL_UN8 \s_8888, \m_8, \
\out_8888, \maskLSR, \
\scratch1, \scratch2, \scratch3
addu_s.qb \out_8888, \out_8888, \d_8888
.endm
.macro BILINEAR_INTERPOLATE_SINGLE_PIXEL tl, tr, bl, br, \
scratch1, scratch2, \
alpha, red, green, blue \
wt1, wt2, wb1, wb2
andi \scratch1, \tl, 0xff
andi \scratch2, \tr, 0xff
andi \alpha, \bl, 0xff
andi \red, \br, 0xff
multu $ac0, \wt1, \scratch1
maddu $ac0, \wt2, \scratch2
maddu $ac0, \wb1, \alpha
maddu $ac0, \wb2, \red
ext \scratch1, \tl, 8, 8
ext \scratch2, \tr, 8, 8
ext \alpha, \bl, 8, 8
ext \red, \br, 8, 8
multu $ac1, \wt1, \scratch1
maddu $ac1, \wt2, \scratch2
maddu $ac1, \wb1, \alpha
maddu $ac1, \wb2, \red
ext \scratch1, \tl, 16, 8
ext \scratch2, \tr, 16, 8
ext \alpha, \bl, 16, 8
ext \red, \br, 16, 8
mflo \blue, $ac0
multu $ac2, \wt1, \scratch1
maddu $ac2, \wt2, \scratch2
maddu $ac2, \wb1, \alpha
maddu $ac2, \wb2, \red
ext \scratch1, \tl, 24, 8
ext \scratch2, \tr, 24, 8
ext \alpha, \bl, 24, 8
ext \red, \br, 24, 8
mflo \green, $ac1
multu $ac3, \wt1, \scratch1
maddu $ac3, \wt2, \scratch2
maddu $ac3, \wb1, \alpha
maddu $ac3, \wb2, \red
mflo \red, $ac2
mflo \alpha, $ac3
precr.qb.ph \alpha, \alpha, \red
precr.qb.ph \scratch1, \green, \blue
precrq.qb.ph \tl, \alpha, \scratch1
.endm
#endif //PIXMAN_MIPS_DSPR2_ASM_H

View File

@ -1,326 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nemanja Lukic (nlukic@mips.com)
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
#include "pixman-mips-dspr2.h"
PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_x888_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_8888_0565,
uint32_t, 1, uint16_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (0, src_0565_8888,
uint16_t, 1, uint32_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (DO_FAST_MEMCPY, src_0565_0565,
uint16_t, 1, uint16_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (DO_FAST_MEMCPY, src_8888_8888,
uint32_t, 1, uint32_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST (DO_FAST_MEMCPY, src_0888_0888,
uint8_t, 3, uint8_t, 3)
PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8888_8888_ca,
uint32_t, 1, uint32_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8888_0565_ca,
uint32_t, 1, uint16_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8_8888,
uint8_t, 1, uint32_t, 1)
PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST (SKIP_ZERO_SRC, over_n_8_0565,
uint8_t, 1, uint16_t, 1)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 8888_8888, SRC,
uint32_t, uint32_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 8888_0565, SRC,
uint32_t, uint16_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 0565_8888, SRC,
uint16_t, uint32_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (0, 0565_0565, SRC,
uint16_t, uint16_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, 8888_8888, OVER,
uint32_t, uint32_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST (SKIP_ZERO_SRC, 8888_8888, ADD,
uint32_t, uint32_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 8888_8_8888, SRC,
uint32_t, uint32_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 8888_8_0565, SRC,
uint32_t, uint16_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 0565_8_x888, SRC,
uint16_t, uint32_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (0, 0565_8_0565, SRC,
uint16_t, uint16_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, 8888_8_8888, OVER,
uint32_t, uint32_t)
PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST (SKIP_ZERO_SRC, 8888_8_8888, ADD,
uint32_t, uint32_t)
static pixman_bool_t
pixman_fill_mips (uint32_t *bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t _xor)
{
uint8_t *byte_line;
uint32_t byte_width;
switch (bpp)
{
case 16:
stride = stride * (int) sizeof (uint32_t) / 2;
byte_line = (uint8_t *)(((uint16_t *)bits) + stride * y + x);
byte_width = width * 2;
stride *= 2;
while (height--)
{
uint8_t *dst = byte_line;
byte_line += stride;
pixman_fill_buff16_mips (dst, byte_width, _xor & 0xffff);
}
return TRUE;
case 32:
stride = stride * (int) sizeof (uint32_t) / 4;
byte_line = (uint8_t *)(((uint32_t *)bits) + stride * y + x);
byte_width = width * 4;
stride *= 4;
while (height--)
{
uint8_t *dst = byte_line;
byte_line += stride;
pixman_fill_buff32_mips (dst, byte_width, _xor);
}
return TRUE;
default:
return FALSE;
}
}
static pixman_bool_t
pixman_blt_mips (uint32_t *src_bits,
uint32_t *dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dest_x,
int dest_y,
int width,
int height)
{
if (src_bpp != dst_bpp)
return FALSE;
uint8_t *src_bytes;
uint8_t *dst_bytes;
uint32_t byte_width;
switch (src_bpp)
{
case 16:
src_stride = src_stride * (int) sizeof (uint32_t) / 2;
dst_stride = dst_stride * (int) sizeof (uint32_t) / 2;
src_bytes =(uint8_t *)(((uint16_t *)src_bits)
+ src_stride * (src_y) + (src_x));
dst_bytes = (uint8_t *)(((uint16_t *)dst_bits)
+ dst_stride * (dest_y) + (dest_x));
byte_width = width * 2;
src_stride *= 2;
dst_stride *= 2;
while (height--)
{
uint8_t *src = src_bytes;
uint8_t *dst = dst_bytes;
src_bytes += src_stride;
dst_bytes += dst_stride;
pixman_mips_fast_memcpy (dst, src, byte_width);
}
return TRUE;
case 32:
src_stride = src_stride * (int) sizeof (uint32_t) / 4;
dst_stride = dst_stride * (int) sizeof (uint32_t) / 4;
src_bytes = (uint8_t *)(((uint32_t *)src_bits)
+ src_stride * (src_y) + (src_x));
dst_bytes = (uint8_t *)(((uint32_t *)dst_bits)
+ dst_stride * (dest_y) + (dest_x));
byte_width = width * 4;
src_stride *= 4;
dst_stride *= 4;
while (height--)
{
uint8_t *src = src_bytes;
uint8_t *dst = dst_bytes;
src_bytes += src_stride;
dst_bytes += dst_stride;
pixman_mips_fast_memcpy (dst, src, byte_width);
}
return TRUE;
default:
return FALSE;
}
}
static const pixman_fast_path_t mips_dspr2_fast_paths[] =
{
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, r5g6b5, mips_composite_src_0565_0565),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, b5g6r5, mips_composite_src_0565_0565),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, mips_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, mips_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, mips_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, mips_composite_src_8888_0565),
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, a8r8g8b8, mips_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, r5g6b5, null, x8r8g8b8, mips_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, a8b8g8r8, mips_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, b5g6r5, null, x8b8g8r8, mips_composite_src_0565_8888),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, x8r8g8b8, mips_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, x8r8g8b8, mips_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, x8b8g8r8, mips_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, x8b8g8r8, mips_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, mips_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, a8b8g8r8, mips_composite_src_8888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, mips_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, mips_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, r8g8b8, null, r8g8b8, mips_composite_src_0888_0888),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, a8r8g8b8, mips_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, x8r8g8b8, mips_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, a8b8g8r8, mips_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, x8b8g8r8, mips_composite_over_n_8888_8888_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8r8g8b8, r5g6b5, mips_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH_CA (OVER, solid, a8b8g8r8, b5g6r5, mips_composite_over_n_8888_0565_ca),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8r8g8b8, mips_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8r8g8b8, mips_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, a8b8g8r8, mips_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, x8b8g8r8, mips_composite_over_n_8_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, r5g6b5, mips_composite_over_n_8_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, a8, b5g6r5, mips_composite_over_n_8_0565),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, mips_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, mips_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, mips_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, r5g6b5, mips_8888_0565),
SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, r5g6b5, mips_8888_0565),
SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, x8r8g8b8, mips_0565_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, r5g6b5, r5g6b5, mips_0565_0565),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mips_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mips_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, mips_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, mips_8888_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, mips_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, mips_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, mips_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, r5g6b5, mips_8888_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, x8r8g8b8, r5g6b5, mips_8888_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, x8r8g8b8, mips_0565_8_x888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, r5g6b5, r5g6b5, mips_0565_8_0565),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, mips_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, mips_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, mips_8888_8_8888),
SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, mips_8888_8_8888),
{ PIXMAN_OP_NONE },
};
static pixman_bool_t
mips_dspr2_blt (pixman_implementation_t *imp,
uint32_t * src_bits,
uint32_t * dst_bits,
int src_stride,
int dst_stride,
int src_bpp,
int dst_bpp,
int src_x,
int src_y,
int dest_x,
int dest_y,
int width,
int height)
{
if (!pixman_blt_mips (
src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
src_x, src_y, dest_x, dest_y, width, height))
{
return _pixman_implementation_blt (
imp->delegate,
src_bits, dst_bits, src_stride, dst_stride, src_bpp, dst_bpp,
src_x, src_y, dest_x, dest_y, width, height);
}
return TRUE;
}
static pixman_bool_t
mips_dspr2_fill (pixman_implementation_t *imp,
uint32_t * bits,
int stride,
int bpp,
int x,
int y,
int width,
int height,
uint32_t xor)
{
if (pixman_fill_mips (bits, stride, bpp, x, y, width, height, xor))
return TRUE;
return _pixman_implementation_fill (
imp->delegate, bits, stride, bpp, x, y, width, height, xor);
}
pixman_implementation_t *
_pixman_implementation_create_mips_dspr2 (pixman_implementation_t *fallback)
{
pixman_implementation_t *imp =
_pixman_implementation_create (fallback, mips_dspr2_fast_paths);
imp->blt = mips_dspr2_blt;
imp->fill = mips_dspr2_fill;
return imp;
}

View File

@ -1,231 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Author: Nemanja Lukic (nlukic@mips.com)
*/
#ifndef PIXMAN_MIPS_DSPR2_H
#define PIXMAN_MIPS_DSPR2_H
#include "pixman-private.h"
#include "pixman-inlines.h"
#define SKIP_ZERO_SRC 1
#define SKIP_ZERO_MASK 2
#define DO_FAST_MEMCPY 3
void
pixman_mips_fast_memcpy (void *dst, void *src, uint32_t n_bytes);
void
pixman_fill_buff16_mips (void *dst, uint32_t n_bytes, uint16_t value);
void
pixman_fill_buff32_mips (void *dst, uint32_t n_bytes, uint32_t value);
/****************************************************************/
#define PIXMAN_MIPS_BIND_FAST_PATH_SRC_DST(flags, name, \
src_type, src_cnt, \
dst_type, dst_cnt) \
void \
pixman_composite_##name##_asm_mips (dst_type *dst, \
src_type *src, \
int32_t w); \
\
static void \
mips_composite_##name (pixman_implementation_t *imp, \
pixman_composite_info_t *info) \
{ \
PIXMAN_COMPOSITE_ARGS (info); \
dst_type *dst_line, *dst; \
src_type *src_line, *src; \
int32_t dst_stride, src_stride; \
int bpp = PIXMAN_FORMAT_BPP (dest_image->bits.format) / 8; \
\
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, src_type, \
src_stride, src_line, src_cnt); \
PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \
dst_stride, dst_line, dst_cnt); \
\
while (height--) \
{ \
dst = dst_line; \
dst_line += dst_stride; \
src = src_line; \
src_line += src_stride; \
\
if (flags == DO_FAST_MEMCPY) \
pixman_mips_fast_memcpy (dst, src, width * bpp); \
else \
pixman_composite_##name##_asm_mips (dst, src, width); \
} \
}
/*******************************************************************/
#define PIXMAN_MIPS_BIND_FAST_PATH_N_MASK_DST(flags, name, \
mask_type, mask_cnt, \
dst_type, dst_cnt) \
void \
pixman_composite_##name##_asm_mips (dst_type *dst, \
uint32_t src, \
mask_type *mask, \
int32_t w); \
\
static void \
mips_composite_##name (pixman_implementation_t *imp, \
pixman_composite_info_t *info) \
{ \
PIXMAN_COMPOSITE_ARGS (info); \
dst_type *dst_line, *dst; \
mask_type *mask_line, *mask; \
int32_t dst_stride, mask_stride; \
uint32_t src; \
\
src = _pixman_image_get_solid ( \
imp, src_image, dest_image->bits.format); \
\
if ((flags & SKIP_ZERO_SRC) && src == 0) \
return; \
\
PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type, \
dst_stride, dst_line, dst_cnt); \
PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type, \
mask_stride, mask_line, mask_cnt); \
\
while (height--) \
{ \
dst = dst_line; \
dst_line += dst_stride; \
mask = mask_line; \
mask_line += mask_stride; \
pixman_composite_##name##_asm_mips (dst, src, mask, width); \
} \
}
/****************************************************************************/
#define PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_DST(flags, name, op, \
src_type, dst_type) \
void \
pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips( \
dst_type * dst, \
const src_type * src_top, \
const src_type * src_bottom, \
int32_t w, \
int wt, \
int wb, \
pixman_fixed_t vx, \
pixman_fixed_t unit_x); \
static force_inline void \
scaled_bilinear_scanline_mips_##name##_##op (dst_type * dst, \
const uint32_t * mask, \
const src_type * src_top, \
const src_type * src_bottom, \
int32_t w, \
int wt, \
int wb, \
pixman_fixed_t vx, \
pixman_fixed_t unit_x, \
pixman_fixed_t max_vx, \
pixman_bool_t zero_src) \
{ \
if ((flags & SKIP_ZERO_SRC) && zero_src) \
return; \
pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips (dst, src_top, \
src_bottom, w, \
wt, wb, \
vx, unit_x); \
} \
\
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_cover_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint32_t, dst_type, COVER, FLAG_NONE) \
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_none_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint32_t, dst_type, NONE, FLAG_NONE) \
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_pad_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint32_t, dst_type, PAD, FLAG_NONE) \
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_normal_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint32_t, dst_type, NORMAL, \
FLAG_NONE)
/*****************************************************************************/
#define PIXMAN_MIPS_BIND_SCALED_BILINEAR_SRC_A8_DST(flags, name, op, \
src_type, dst_type) \
void \
pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips ( \
dst_type * dst, \
const uint8_t * mask, \
const src_type * top, \
const src_type * bottom, \
int wt, \
int wb, \
pixman_fixed_t x, \
pixman_fixed_t ux, \
int width); \
\
static force_inline void \
scaled_bilinear_scanline_mips_##name##_##op (dst_type * dst, \
const uint8_t * mask, \
const src_type * src_top, \
const src_type * src_bottom, \
int32_t w, \
int wt, \
int wb, \
pixman_fixed_t vx, \
pixman_fixed_t unit_x, \
pixman_fixed_t max_vx, \
pixman_bool_t zero_src) \
{ \
if ((flags & SKIP_ZERO_SRC) && zero_src) \
return; \
pixman_scaled_bilinear_scanline_##name##_##op##_asm_mips ( \
dst, mask, src_top, src_bottom, wt, wb, vx, unit_x, w); \
} \
\
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_cover_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint8_t, dst_type, COVER, \
FLAG_HAVE_NON_SOLID_MASK) \
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_none_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint8_t, dst_type, NONE, \
FLAG_HAVE_NON_SOLID_MASK) \
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_pad_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint8_t, dst_type, PAD, \
FLAG_HAVE_NON_SOLID_MASK) \
FAST_BILINEAR_MAINLOOP_COMMON (mips_##name##_normal_##op, \
scaled_bilinear_scanline_mips_##name##_##op, \
src_type, uint8_t, dst_type, NORMAL, \
FLAG_HAVE_NON_SOLID_MASK)
#endif //PIXMAN_MIPS_DSPR2_H

View File

@ -1,382 +0,0 @@
/*
* Copyright (c) 2012
* MIPS Technologies, Inc., California.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "pixman-mips-dspr2-asm.h"
/*
* This routine could be optimized for MIPS64. The current code only
* uses MIPS32 instructions.
*/
#ifdef EB
# define LWHI lwl /* high part is left in big-endian */
# define SWHI swl /* high part is left in big-endian */
# define LWLO lwr /* low part is right in big-endian */
# define SWLO swr /* low part is right in big-endian */
#else
# define LWHI lwr /* high part is right in little-endian */
# define SWHI swr /* high part is right in little-endian */
# define LWLO lwl /* low part is left in big-endian */
# define SWLO swl /* low part is left in big-endian */
#endif
LEAF_MIPS32R2(pixman_mips_fast_memcpy)
slti AT, a2, 8
bne AT, zero, $last8
move v0, a0 /* memcpy returns the dst pointer */
/* Test if the src and dst are word-aligned, or can be made word-aligned */
xor t8, a1, a0
andi t8, t8, 0x3 /* t8 is a0/a1 word-displacement */
bne t8, zero, $unaligned
negu a3, a0
andi a3, a3, 0x3 /* we need to copy a3 bytes to make a0/a1 aligned */
beq a3, zero, $chk16w /* when a3=0 then the dst (a0) is word-aligned */
subu a2, a2, a3 /* now a2 is the remining bytes count */
LWHI t8, 0(a1)
addu a1, a1, a3
SWHI t8, 0(a0)
addu a0, a0, a3
/* Now the dst/src are mutually word-aligned with word-aligned addresses */
$chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */
/* t8 is the byte count after 64-byte chunks */
beq a2, t8, $chk8w /* if a2==t8, no 64-byte chunks */
/* There will be at most 1 32-byte chunk after it */
subu a3, a2, t8 /* subtract from a2 the reminder */
/* Here a3 counts bytes in 16w chunks */
addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */
addu t0, a0, a2 /* t0 is the "past the end" address */
/*
* When in the loop we exercise "pref 30, x(a0)", the a0+x should not be past
* the "t0-32" address
* This means: for x=128 the last "safe" a0 address is "t0-160"
* Alternatively, for x=64 the last "safe" a0 address is "t0-96"
* In the current version we use "pref 30, 128(a0)", so "t0-160" is the limit
*/
subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */
pref 0, 0(a1) /* bring the first line of src, addr 0 */
pref 0, 32(a1) /* bring the second line of src, addr 32 */
pref 0, 64(a1) /* bring the third line of src, addr 64 */
pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */
/* In case the a0 > t9 don't use "pref 30" at all */
sgtu v1, a0, t9
bgtz v1, $loop16w /* skip "pref 30, 64(a0)" for too short arrays */
nop
/* otherwise, start with using pref30 */
pref 30, 64(a0)
$loop16w:
pref 0, 96(a1)
lw t0, 0(a1)
bgtz v1, $skip_pref30_96 /* skip "pref 30, 96(a0)" */
lw t1, 4(a1)
pref 30, 96(a0) /* continue setting up the dest, addr 96 */
$skip_pref30_96:
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 16(a1)
lw t5, 20(a1)
lw t6, 24(a1)
lw t7, 28(a1)
pref 0, 128(a1) /* bring the next lines of src, addr 128 */
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
lw t0, 32(a1)
bgtz v1, $skip_pref30_128 /* skip "pref 30, 128(a0)" */
lw t1, 36(a1)
pref 30, 128(a0) /* continue setting up the dest, addr 128 */
$skip_pref30_128:
lw t2, 40(a1)
lw t3, 44(a1)
lw t4, 48(a1)
lw t5, 52(a1)
lw t6, 56(a1)
lw t7, 60(a1)
pref 0, 160(a1) /* bring the next lines of src, addr 160 */
sw t0, 32(a0)
sw t1, 36(a0)
sw t2, 40(a0)
sw t3, 44(a0)
sw t4, 48(a0)
sw t5, 52(a0)
sw t6, 56(a0)
sw t7, 60(a0)
addiu a0, a0, 64 /* adding 64 to dest */
sgtu v1, a0, t9
bne a0, a3, $loop16w
addiu a1, a1, 64 /* adding 64 to src */
move a2, t8
/* Here we have src and dest word-aligned but less than 64-bytes to go */
$chk8w:
pref 0, 0x0(a1)
andi t8, a2, 0x1f /* is there a 32-byte chunk? */
/* the t8 is the reminder count past 32-bytes */
beq a2, t8, $chk1w /* when a2=t8, no 32-byte chunk */
nop
lw t0, 0(a1)
lw t1, 4(a1)
lw t2, 8(a1)
lw t3, 12(a1)
lw t4, 16(a1)
lw t5, 20(a1)
lw t6, 24(a1)
lw t7, 28(a1)
addiu a1, a1, 32
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
addiu a0, a0, 32
$chk1w:
andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */
beq a2, t8, $last8
subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */
addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */
/* copying in words (4-byte chunks) */
$wordCopy_loop:
lw t3, 0(a1) /* the first t3 may be equal t0 ... optimize? */
addiu a1, a1, 4
addiu a0, a0, 4
bne a0, a3, $wordCopy_loop
sw t3, -4(a0)
/* For the last (<8) bytes */
$last8:
blez a2, leave
addu a3, a0, a2 /* a3 is the last dst address */
$last8loop:
lb v1, 0(a1)
addiu a1, a1, 1
addiu a0, a0, 1
bne a0, a3, $last8loop
sb v1, -1(a0)
leave: j ra
nop
/*
* UNALIGNED case
*/
$unaligned:
/* got here with a3="negu a0" */
andi a3, a3, 0x3 /* test if the a0 is word aligned */
beqz a3, $ua_chk16w
subu a2, a2, a3 /* bytes left after initial a3 bytes */
LWHI v1, 0(a1)
LWLO v1, 3(a1)
addu a1, a1, a3 /* a3 may be here 1, 2 or 3 */
SWHI v1, 0(a0)
addu a0, a0, a3 /* below the dst will be word aligned (NOTE1) */
$ua_chk16w: andi t8, a2, 0x3f /* any whole 64-byte chunks? */
/* t8 is the byte count after 64-byte chunks */
beq a2, t8, $ua_chk8w /* if a2==t8, no 64-byte chunks */
/* There will be at most 1 32-byte chunk after it */
subu a3, a2, t8 /* subtract from a2 the reminder */
/* Here a3 counts bytes in 16w chunks */
addu a3, a0, a3 /* Now a3 is the final dst after 64-byte chunks */
addu t0, a0, a2 /* t0 is the "past the end" address */
subu t9, t0, 160 /* t9 is the "last safe pref 30, 128(a0)" address */
pref 0, 0(a1) /* bring the first line of src, addr 0 */
pref 0, 32(a1) /* bring the second line of src, addr 32 */
pref 0, 64(a1) /* bring the third line of src, addr 64 */
pref 30, 32(a0) /* safe, as we have at least 64 bytes ahead */
/* In case the a0 > t9 don't use "pref 30" at all */
sgtu v1, a0, t9
bgtz v1, $ua_loop16w /* skip "pref 30, 64(a0)" for too short arrays */
nop
/* otherwise, start with using pref30 */
pref 30, 64(a0)
$ua_loop16w:
pref 0, 96(a1)
LWHI t0, 0(a1)
LWLO t0, 3(a1)
LWHI t1, 4(a1)
bgtz v1, $ua_skip_pref30_96
LWLO t1, 7(a1)
pref 30, 96(a0) /* continue setting up the dest, addr 96 */
$ua_skip_pref30_96:
LWHI t2, 8(a1)
LWLO t2, 11(a1)
LWHI t3, 12(a1)
LWLO t3, 15(a1)
LWHI t4, 16(a1)
LWLO t4, 19(a1)
LWHI t5, 20(a1)
LWLO t5, 23(a1)
LWHI t6, 24(a1)
LWLO t6, 27(a1)
LWHI t7, 28(a1)
LWLO t7, 31(a1)
pref 0, 128(a1) /* bring the next lines of src, addr 128 */
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
LWHI t0, 32(a1)
LWLO t0, 35(a1)
LWHI t1, 36(a1)
bgtz v1, $ua_skip_pref30_128
LWLO t1, 39(a1)
pref 30, 128(a0) /* continue setting up the dest, addr 128 */
$ua_skip_pref30_128:
LWHI t2, 40(a1)
LWLO t2, 43(a1)
LWHI t3, 44(a1)
LWLO t3, 47(a1)
LWHI t4, 48(a1)
LWLO t4, 51(a1)
LWHI t5, 52(a1)
LWLO t5, 55(a1)
LWHI t6, 56(a1)
LWLO t6, 59(a1)
LWHI t7, 60(a1)
LWLO t7, 63(a1)
pref 0, 160(a1) /* bring the next lines of src, addr 160 */
sw t0, 32(a0)
sw t1, 36(a0)
sw t2, 40(a0)
sw t3, 44(a0)
sw t4, 48(a0)
sw t5, 52(a0)
sw t6, 56(a0)
sw t7, 60(a0)
addiu a0, a0, 64 /* adding 64 to dest */
sgtu v1, a0, t9
bne a0, a3, $ua_loop16w
addiu a1, a1, 64 /* adding 64 to src */
move a2, t8
/* Here we have src and dest word-aligned but less than 64-bytes to go */
$ua_chk8w:
pref 0, 0x0(a1)
andi t8, a2, 0x1f /* is there a 32-byte chunk? */
/* the t8 is the reminder count */
beq a2, t8, $ua_chk1w /* when a2=t8, no 32-byte chunk */
LWHI t0, 0(a1)
LWLO t0, 3(a1)
LWHI t1, 4(a1)
LWLO t1, 7(a1)
LWHI t2, 8(a1)
LWLO t2, 11(a1)
LWHI t3, 12(a1)
LWLO t3, 15(a1)
LWHI t4, 16(a1)
LWLO t4, 19(a1)
LWHI t5, 20(a1)
LWLO t5, 23(a1)
LWHI t6, 24(a1)
LWLO t6, 27(a1)
LWHI t7, 28(a1)
LWLO t7, 31(a1)
addiu a1, a1, 32
sw t0, 0(a0)
sw t1, 4(a0)
sw t2, 8(a0)
sw t3, 12(a0)
sw t4, 16(a0)
sw t5, 20(a0)
sw t6, 24(a0)
sw t7, 28(a0)
addiu a0, a0, 32
$ua_chk1w:
andi a2, t8, 0x3 /* now a2 is the reminder past 1w chunks */
beq a2, t8, $ua_smallCopy
subu a3, t8, a2 /* a3 is count of bytes in 1w chunks */
addu a3, a0, a3 /* now a3 is the dst address past the 1w chunks */
/* copying in words (4-byte chunks) */
$ua_wordCopy_loop:
LWHI v1, 0(a1)
LWLO v1, 3(a1)
addiu a1, a1, 4
addiu a0, a0, 4 /* note: dst=a0 is word aligned here, see NOTE1 */
bne a0, a3, $ua_wordCopy_loop
sw v1, -4(a0)
/* Now less than 4 bytes (value in a2) left to copy */
$ua_smallCopy:
beqz a2, leave
addu a3, a0, a2 /* a3 is the last dst address */
$ua_smallCopy_loop:
lb v1, 0(a1)
addiu a1, a1, 1
addiu a0, a0, 1
bne a0, a3, $ua_smallCopy_loop
sb v1, -1(a0)
j ra
nop
END(pixman_mips_fast_memcpy)

View File

@ -1,84 +0,0 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
#if defined(USE_MIPS_DSPR2) || defined(USE_LOONGSON_MMI)
#include <string.h>
#include <stdlib.h>
static pixman_bool_t
have_feature (const char *search_string)
{
#if defined (__linux__) /* linux ELF */
/* Simple detection of MIPS features at runtime for Linux.
* It is based on /proc/cpuinfo, which reveals hardware configuration
* to user-space applications. According to MIPS (early 2010), no similar
* facility is universally available on the MIPS architectures, so it's up
* to individual OSes to provide such.
*/
const char *file_name = "/proc/cpuinfo";
char cpuinfo_line[256];
FILE *f = NULL;
if ((f = fopen (file_name, "r")) == NULL)
return FALSE;
while (fgets (cpuinfo_line, sizeof (cpuinfo_line), f) != NULL)
{
if (strstr (cpuinfo_line, search_string) != NULL)
{
fclose (f);
return TRUE;
}
}
fclose (f);
#endif
/* Did not find string in the proc file, or not Linux ELF. */
return FALSE;
}
#endif
pixman_implementation_t *
_pixman_mips_get_implementations (pixman_implementation_t *imp)
{
#ifdef USE_LOONGSON_MMI
/* I really don't know if some Loongson CPUs don't have MMI. */
if (!_pixman_disabled ("loongson-mmi") && have_feature ("Loongson"))
imp = _pixman_implementation_create_mmx (imp);
#endif
#ifdef USE_MIPS_DSPR2
/* Only currently available MIPS core that supports DSPr2 is 74K. */
if (!_pixman_disabled ("mips-dspr2") && have_feature ("MIPS 74K"))
imp = _pixman_implementation_create_mips_dspr2 (imp);
#endif
return imp;
}

File diff suppressed because it is too large Load Diff

View File

@ -71,18 +71,18 @@ noop_src_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter)
{
iter->get_scanline = get_scanline_null;
}
else if ((iter->iter_flags & (ITER_IGNORE_ALPHA | ITER_IGNORE_RGB)) ==
else if ((iter->flags & (ITER_IGNORE_ALPHA | ITER_IGNORE_RGB)) ==
(ITER_IGNORE_ALPHA | ITER_IGNORE_RGB))
{
iter->get_scanline = _pixman_iter_get_scanline_noop;
}
else if (image->common.extended_format_code == PIXMAN_solid &&
((iter->image_flags & (FAST_PATH_BITS_IMAGE | FAST_PATH_NO_ALPHA_MAP)) ==
((image->common.flags & (FAST_PATH_BITS_IMAGE | FAST_PATH_NO_ALPHA_MAP)) ==
(FAST_PATH_BITS_IMAGE | FAST_PATH_NO_ALPHA_MAP)))
{
bits_image_t *bits = &image->bits;
if (iter->iter_flags & ITER_NARROW)
if (iter->flags & ITER_NARROW)
{
uint32_t color = bits->fetch_pixel_32 (bits, 0, 0);
uint32_t *buffer = iter->buffer;
@ -104,8 +104,8 @@ noop_src_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter)
iter->get_scanline = _pixman_iter_get_scanline_noop;
}
else if (image->common.extended_format_code == PIXMAN_a8r8g8b8 &&
(iter->iter_flags & ITER_NARROW) &&
(iter->image_flags & FLAGS) == FLAGS &&
(iter->flags & ITER_NARROW) &&
(image->common.flags & FLAGS) == FLAGS &&
iter->x >= 0 && iter->y >= 0 &&
iter->x + iter->width <= image->bits.width &&
iter->y + iter->height <= image->bits.height)
@ -125,8 +125,8 @@ static void
noop_dest_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter)
{
pixman_image_t *image = iter->image;
uint32_t image_flags = iter->image_flags;
uint32_t iter_flags = iter->iter_flags;
uint32_t image_flags = image->common.flags;
uint32_t iter_flags = iter->flags;
if ((image_flags & FAST_PATH_STD_DEST_FLAGS) == FAST_PATH_STD_DEST_FLAGS &&
(iter_flags & ITER_NARROW) == ITER_NARROW &&

View File

@ -1,155 +0,0 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
#ifdef USE_VMX
/* The CPU detection code needs to be in a file not compiled with
* "-maltivec -mabi=altivec", as gcc would try to save vector register
* across function calls causing SIGILL on cpus without Altivec/vmx.
*/
#ifdef __APPLE__
#include <sys/sysctl.h>
static pixman_bool_t
pixman_have_vmx (void)
{
size_t length = sizeof(have_vmx);
int error, have_mmx;
sysctlbyname ("hw.optional.altivec", &have_vmx, &length, NULL, 0);
if (error)
return FALSE;
return have_vmx;
}
#elif defined (__OpenBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <machine/cpu.h>
static pixman_bool_t
pixman_have_vmx (void)
{
int mib[2] = { CTL_MACHDEP, CPU_ALTIVEC };
size_t length = sizeof(have_vmx);
int error, have_vmx;
error = sysctl (mib, 2, &have_vmx, &length, NULL, 0);
if (error != 0)
return FALSE;
return have_vmx;
}
#elif defined (__linux__)
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <linux/auxvec.h>
#include <asm/cputable.h>
static pixman_bool_t
pixman_have_vmx (void)
{
int have_vmx = FALSE;
int fd;
struct
{
unsigned long type;
unsigned long value;
} aux;
fd = open ("/proc/self/auxv", O_RDONLY);
if (fd >= 0)
{
while (read (fd, &aux, sizeof (aux)) == sizeof (aux))
{
if (aux.type == AT_HWCAP && (aux.value & PPC_FEATURE_HAS_ALTIVEC))
{
have_vmx = TRUE;
break;
}
}
close (fd);
}
return have_vmx;
}
#else /* !__APPLE__ && !__OpenBSD__ && !__linux__ */
#include <signal.h>
#include <setjmp.h>
static jmp_buf jump_env;
static void
vmx_test (int sig,
siginfo_t *si,
void * unused)
{
longjmp (jump_env, 1);
}
static pixman_bool_t
pixman_have_vmx (void)
{
struct sigaction sa, osa;
int jmp_result;
sa.sa_flags = SA_SIGINFO;
sigemptyset (&sa.sa_mask);
sa.sa_sigaction = vmx_test;
sigaction (SIGILL, &sa, &osa);
jmp_result = setjmp (jump_env);
if (jmp_result == 0)
{
asm volatile ( "vor 0, 0, 0" );
}
sigaction (SIGILL, &osa, NULL);
return (jmp_result == 0);
}
#endif /* __APPLE__ */
#endif /* USE_VMX */
pixman_implementation_t *
_pixman_ppc_get_implementations (pixman_implementation_t *imp)
{
#ifdef USE_VMX
if (!_pixman_disabled ("vmx") && pixman_have_vmx ())
imp = _pixman_implementation_create_vmx (imp);
#endif
return imp;
}

View File

@ -1,30 +1,10 @@
#ifndef PIXMAN_PRIVATE_H
#define PIXMAN_PRIVATE_H
/*
* The defines which are shared between C and assembly code
*/
/* bilinear interpolation precision (must be <= 8) */
#ifdef MOZ_GFX_OPTIMIZE_MOBILE
#define LOW_QUALITY_INTERPOLATION
#define LOWER_QUALITY_INTERPOLATION
#define BILINEAR_INTERPOLATION_BITS 4
#else
#define BILINEAR_INTERPOLATION_BITS 8
#endif
#define BILINEAR_INTERPOLATION_RANGE (1 << BILINEAR_INTERPOLATION_BITS)
/*
* C specific part
*/
#ifndef __ASSEMBLER__
#ifndef PACKAGE
# error config.h must be included before pixman-private.h
#endif
#ifndef PIXMAN_PRIVATE_H
#define PIXMAN_PRIVATE_H
#define PIXMAN_DISABLE_DEPRECATED
#define PIXMAN_USE_INTERNAL_API
@ -33,7 +13,6 @@
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <stddef.h>
#include "pixman-compiler.h"
@ -175,6 +154,9 @@ struct bits_image
uint32_t * free_me;
int rowstride; /* in number of uint32_t's */
fetch_scanline_t get_scanline_32;
fetch_scanline_t get_scanline_64;
fetch_scanline_t fetch_scanline_16;
fetch_scanline_t fetch_scanline_32;
@ -247,15 +229,13 @@ struct pixman_iter_t
int x, y;
int width;
int height;
iter_flags_t iter_flags;
uint32_t image_flags;
iter_flags_t flags;
/* These function pointers are initialized by the implementation */
pixman_iter_get_scanline_t get_scanline;
pixman_iter_write_back_t write_back;
/* These fields are scratch data that implementations can use */
void * data;
uint8_t * bits;
int stride;
};
@ -550,8 +530,7 @@ _pixman_implementation_src_iter_init (pixman_implementation_t *imp,
int width,
int height,
uint8_t *buffer,
iter_flags_t flags,
uint32_t image_flags);
iter_flags_t flags);
void
_pixman_implementation_dest_iter_init (pixman_implementation_t *imp,
@ -562,8 +541,7 @@ _pixman_implementation_dest_iter_init (pixman_implementation_t *imp,
int width,
int height,
uint8_t *buffer,
iter_flags_t flags,
uint32_t image_flags);
iter_flags_t flags);
/* Specific implementations */
pixman_implementation_t *
@ -575,7 +553,7 @@ _pixman_implementation_create_fast_path (pixman_implementation_t *fallback);
pixman_implementation_t *
_pixman_implementation_create_noop (pixman_implementation_t *fallback);
#if defined USE_X86_MMX || defined USE_ARM_IWMMXT || defined USE_LOONGSON_MMI
#if defined USE_X86_MMX || defined USE_ARM_IWMMXT
pixman_implementation_t *
_pixman_implementation_create_mmx (pixman_implementation_t *fallback);
#endif
@ -605,44 +583,14 @@ pixman_implementation_t *
_pixman_implementation_create_vmx (pixman_implementation_t *fallback);
#endif
pixman_bool_t
_pixman_implementation_disabled (const char *name);
pixman_implementation_t *
_pixman_x86_get_implementations (pixman_implementation_t *imp);
pixman_implementation_t *
_pixman_arm_get_implementations (pixman_implementation_t *imp);
pixman_implementation_t *
_pixman_ppc_get_implementations (pixman_implementation_t *imp);
pixman_implementation_t *
_pixman_mips_get_implementations (pixman_implementation_t *imp);
pixman_implementation_t *
_pixman_choose_implementation (void);
pixman_bool_t
_pixman_disabled (const char *name);
/*
* Utilities
*/
pixman_bool_t
_pixman_compute_composite_region32 (pixman_region32_t * region,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dest_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height);
uint32_t *
_pixman_iter_get_scanline_noop (pixman_iter_t *iter, const uint32_t *mask);
@ -751,18 +699,6 @@ _pixman_iter_get_scanline_noop (pixman_iter_t *iter, const uint32_t *mask);
dest, FAST_PATH_STD_DEST_FLAGS, \
func) }
extern pixman_implementation_t *global_implementation;
static force_inline pixman_implementation_t *
get_implementation (void)
{
#ifndef TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR
if (!global_implementation)
global_implementation = _pixman_choose_implementation ();
#endif
return global_implementation;
}
/* Memory allocation helpers */
void *
pixman_malloc_ab (unsigned int n, unsigned int b);
@ -812,50 +748,6 @@ pixman_bool_t
pixman_region16_copy_from_region32 (pixman_region16_t *dst,
pixman_region32_t *src);
/* Doubly linked lists */
typedef struct pixman_link_t pixman_link_t;
struct pixman_link_t
{
pixman_link_t *next;
pixman_link_t *prev;
};
typedef struct pixman_list_t pixman_list_t;
struct pixman_list_t
{
pixman_link_t *head;
pixman_link_t *tail;
};
static force_inline void
pixman_list_init (pixman_list_t *list)
{
list->head = (pixman_link_t *)list;
list->tail = (pixman_link_t *)list;
}
static force_inline void
pixman_list_prepend (pixman_list_t *list, pixman_link_t *link)
{
link->next = list->head;
link->prev = (pixman_link_t *)list;
list->head->prev = link;
list->head = link;
}
static force_inline void
pixman_list_unlink (pixman_link_t *link)
{
link->prev->next = link->next;
link->next->prev = link->prev;
}
static force_inline void
pixman_list_move_to_front (pixman_list_t *list, pixman_link_t *link)
{
pixman_list_unlink (link);
pixman_list_prepend (list, link);
}
/* Misc macros */
@ -908,8 +800,7 @@ pixman_list_move_to_front (pixman_list_t *list, pixman_link_t *link)
(PIXMAN_FORMAT_A (f) > 8 || \
PIXMAN_FORMAT_R (f) > 8 || \
PIXMAN_FORMAT_G (f) > 8 || \
PIXMAN_FORMAT_B (f) > 8 || \
PIXMAN_FORMAT_TYPE (f) == PIXMAN_TYPE_ARGB_SRGB)
PIXMAN_FORMAT_B (f) > 8)
#ifdef WORDS_BIGENDIAN
# define SCREEN_SHIFT_LEFT(x,n) ((x) << (n))
@ -1105,18 +996,4 @@ void pixman_timer_register (pixman_timer_t *timer);
#endif /* PIXMAN_TIMERS */
/* sRGB<->linear conversion tables. Linear color space is the same
* as sRGB but the components are in linear light (gamma 1.0).
*
* linear_to_srgb maps linear value from 0 to 4095 ([0.0, 1.0])
* and returns 8-bit sRGB value.
*
* srgb_to_linear maps 8-bit sRGB value to 16-bit linear value
* with range 0 to 65535 ([0.0, 1.0]).
*/
extern const uint8_t linear_to_srgb[4096];
extern const uint16_t srgb_to_linear[256];
#endif /* __ASSEMBLER__ */
#endif /* PIXMAN_PRIVATE_H */

View File

@ -671,9 +671,9 @@ radial_get_scanline_wide (pixman_iter_t *iter, const uint32_t *mask)
void
_pixman_radial_gradient_iter_init (pixman_image_t *image, pixman_iter_t *iter)
{
if (iter->iter_flags & ITER_16)
if (iter->flags & ITER_16)
iter->get_scanline = radial_get_scanline_16;
else if (iter->iter_flags & ITER_NARROW)
else if (iter->flags & ITER_NARROW)
iter->get_scanline = radial_get_scanline_narrow;
else
iter->get_scanline = radial_get_scanline_wide;

View File

@ -2041,10 +2041,10 @@ PREFIX (_subtract) (region_type_t *reg_d,
*
*-----------------------------------------------------------------------
*/
PIXMAN_EXPORT pixman_bool_t
PREFIX (_inverse) (region_type_t *new_reg, /* Destination region */
region_type_t *reg1, /* Region to invert */
box_type_t * inv_rect) /* Bounding box for inversion */
pixman_bool_t
PIXMAN_EXPORT PREFIX (_inverse) (region_type_t *new_reg, /* Destination region */
region_type_t *reg1, /* Region to invert */
box_type_t * inv_rect) /* Bounding box for inversion */
{
region_type_t inv_reg; /* Quick and dirty region made from the
* bounding box */
@ -2137,9 +2137,9 @@ find_box_for_y (box_type_t *begin, box_type_t *end, int y)
* partially in the region) or is outside the region (we reached a band
* that doesn't overlap the box at all and part_in is false)
*/
PIXMAN_EXPORT pixman_region_overlap_t
PREFIX (_contains_rectangle) (region_type_t * region,
box_type_t * prect)
pixman_region_overlap_t
PIXMAN_EXPORT PREFIX (_contains_rectangle) (region_type_t * region,
box_type_t * prect)
{
box_type_t * pbox;
box_type_t * pbox_end;

View File

@ -29,7 +29,7 @@
void
_pixman_solid_fill_iter_init (pixman_image_t *image, pixman_iter_t *iter)
{
if (iter->iter_flags & ITER_NARROW)
if (iter->flags & ITER_NARROW)
{
uint32_t *b = (uint32_t *)iter->buffer;
uint32_t *e = b + iter->width;

View File

@ -1,455 +0,0 @@
/* WARNING: This file is generated by make-srgb.pl.
* Please edit that file instead of this one.
*/
#include <stdint.h>
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
const uint8_t linear_to_srgb[4096] =
{
0, 1, 2, 2, 3, 4, 5, 6, 6, 7,
8, 9, 10, 10, 11, 12, 13, 13, 14, 15,
15, 16, 16, 17, 18, 18, 19, 19, 20, 20,
21, 21, 22, 22, 23, 23, 23, 24, 24, 25,
25, 25, 26, 26, 27, 27, 27, 28, 28, 29,
29, 29, 30, 30, 30, 31, 31, 31, 32, 32,
32, 33, 33, 33, 34, 34, 34, 34, 35, 35,
35, 36, 36, 36, 37, 37, 37, 37, 38, 38,
38, 38, 39, 39, 39, 40, 40, 40, 40, 41,
41, 41, 41, 42, 42, 42, 42, 43, 43, 43,
43, 43, 44, 44, 44, 44, 45, 45, 45, 45,
46, 46, 46, 46, 46, 47, 47, 47, 47, 48,
48, 48, 48, 48, 49, 49, 49, 49, 49, 50,
50, 50, 50, 50, 51, 51, 51, 51, 51, 52,
52, 52, 52, 52, 53, 53, 53, 53, 53, 54,
54, 54, 54, 54, 55, 55, 55, 55, 55, 55,
56, 56, 56, 56, 56, 57, 57, 57, 57, 57,
57, 58, 58, 58, 58, 58, 58, 59, 59, 59,
59, 59, 59, 60, 60, 60, 60, 60, 60, 61,
61, 61, 61, 61, 61, 62, 62, 62, 62, 62,
62, 63, 63, 63, 63, 63, 63, 64, 64, 64,
64, 64, 64, 64, 65, 65, 65, 65, 65, 65,
66, 66, 66, 66, 66, 66, 66, 67, 67, 67,
67, 67, 67, 67, 68, 68, 68, 68, 68, 68,
68, 69, 69, 69, 69, 69, 69, 69, 70, 70,
70, 70, 70, 70, 70, 71, 71, 71, 71, 71,
71, 71, 72, 72, 72, 72, 72, 72, 72, 72,
73, 73, 73, 73, 73, 73, 73, 74, 74, 74,
74, 74, 74, 74, 74, 75, 75, 75, 75, 75,
75, 75, 75, 76, 76, 76, 76, 76, 76, 76,
77, 77, 77, 77, 77, 77, 77, 77, 78, 78,
78, 78, 78, 78, 78, 78, 78, 79, 79, 79,
79, 79, 79, 79, 79, 80, 80, 80, 80, 80,
80, 80, 80, 81, 81, 81, 81, 81, 81, 81,
81, 81, 82, 82, 82, 82, 82, 82, 82, 82,
83, 83, 83, 83, 83, 83, 83, 83, 83, 84,
84, 84, 84, 84, 84, 84, 84, 84, 85, 85,
85, 85, 85, 85, 85, 85, 85, 86, 86, 86,
86, 86, 86, 86, 86, 86, 87, 87, 87, 87,
87, 87, 87, 87, 87, 88, 88, 88, 88, 88,
88, 88, 88, 88, 88, 89, 89, 89, 89, 89,
89, 89, 89, 89, 90, 90, 90, 90, 90, 90,
90, 90, 90, 90, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 92, 92, 92, 92, 92, 92,
92, 92, 92, 92, 93, 93, 93, 93, 93, 93,
93, 93, 93, 93, 94, 94, 94, 94, 94, 94,
94, 94, 94, 94, 95, 95, 95, 95, 95, 95,
95, 95, 95, 95, 96, 96, 96, 96, 96, 96,
96, 96, 96, 96, 96, 97, 97, 97, 97, 97,
97, 97, 97, 97, 97, 98, 98, 98, 98, 98,
98, 98, 98, 98, 98, 98, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 101, 101,
101, 101, 101, 101, 101, 101, 101, 101, 101, 102,
102, 102, 102, 102, 102, 102, 102, 102, 102, 102,
103, 103, 103, 103, 103, 103, 103, 103, 103, 103,
103, 103, 104, 104, 104, 104, 104, 104, 104, 104,
104, 104, 104, 105, 105, 105, 105, 105, 105, 105,
105, 105, 105, 105, 105, 106, 106, 106, 106, 106,
106, 106, 106, 106, 106, 106, 106, 107, 107, 107,
107, 107, 107, 107, 107, 107, 107, 107, 107, 108,
108, 108, 108, 108, 108, 108, 108, 108, 108, 108,
108, 109, 109, 109, 109, 109, 109, 109, 109, 109,
109, 109, 109, 110, 110, 110, 110, 110, 110, 110,
110, 110, 110, 110, 110, 111, 111, 111, 111, 111,
111, 111, 111, 111, 111, 111, 111, 111, 112, 112,
112, 112, 112, 112, 112, 112, 112, 112, 112, 112,
113, 113, 113, 113, 113, 113, 113, 113, 113, 113,
113, 113, 113, 114, 114, 114, 114, 114, 114, 114,
114, 114, 114, 114, 114, 114, 115, 115, 115, 115,
115, 115, 115, 115, 115, 115, 115, 115, 115, 116,
116, 116, 116, 116, 116, 116, 116, 116, 116, 116,
116, 116, 117, 117, 117, 117, 117, 117, 117, 117,
117, 117, 117, 117, 117, 117, 118, 118, 118, 118,
118, 118, 118, 118, 118, 118, 118, 118, 118, 119,
119, 119, 119, 119, 119, 119, 119, 119, 119, 119,
119, 119, 119, 120, 120, 120, 120, 120, 120, 120,
120, 120, 120, 120, 120, 120, 120, 121, 121, 121,
121, 121, 121, 121, 121, 121, 121, 121, 121, 121,
122, 122, 122, 122, 122, 122, 122, 122, 122, 122,
122, 122, 122, 122, 122, 123, 123, 123, 123, 123,
123, 123, 123, 123, 123, 123, 123, 123, 123, 124,
124, 124, 124, 124, 124, 124, 124, 124, 124, 124,
124, 124, 124, 125, 125, 125, 125, 125, 125, 125,
125, 125, 125, 125, 125, 125, 125, 125, 126, 126,
126, 126, 126, 126, 126, 126, 126, 126, 126, 126,
126, 126, 127, 127, 127, 127, 127, 127, 127, 127,
127, 127, 127, 127, 127, 127, 127, 128, 128, 128,
128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
128, 128, 129, 129, 129, 129, 129, 129, 129, 129,
129, 129, 129, 129, 129, 129, 129, 130, 130, 130,
130, 130, 130, 130, 130, 130, 130, 130, 130, 130,
130, 130, 131, 131, 131, 131, 131, 131, 131, 131,
131, 131, 131, 131, 131, 131, 131, 131, 132, 132,
132, 132, 132, 132, 132, 132, 132, 132, 132, 132,
132, 132, 132, 133, 133, 133, 133, 133, 133, 133,
133, 133, 133, 133, 133, 133, 133, 133, 133, 134,
134, 134, 134, 134, 134, 134, 134, 134, 134, 134,
134, 134, 134, 134, 134, 135, 135, 135, 135, 135,
135, 135, 135, 135, 135, 135, 135, 135, 135, 135,
135, 136, 136, 136, 136, 136, 136, 136, 136, 136,
136, 136, 136, 136, 136, 136, 136, 137, 137, 137,
137, 137, 137, 137, 137, 137, 137, 137, 137, 137,
137, 137, 137, 138, 138, 138, 138, 138, 138, 138,
138, 138, 138, 138, 138, 138, 138, 138, 138, 139,
139, 139, 139, 139, 139, 139, 139, 139, 139, 139,
139, 139, 139, 139, 139, 139, 140, 140, 140, 140,
140, 140, 140, 140, 140, 140, 140, 140, 140, 140,
140, 140, 140, 141, 141, 141, 141, 141, 141, 141,
141, 141, 141, 141, 141, 141, 141, 141, 141, 141,
142, 142, 142, 142, 142, 142, 142, 142, 142, 142,
142, 142, 142, 142, 142, 142, 142, 143, 143, 143,
143, 143, 143, 143, 143, 143, 143, 143, 143, 143,
143, 143, 143, 143, 144, 144, 144, 144, 144, 144,
144, 144, 144, 144, 144, 144, 144, 144, 144, 144,
144, 145, 145, 145, 145, 145, 145, 145, 145, 145,
145, 145, 145, 145, 145, 145, 145, 145, 145, 146,
146, 146, 146, 146, 146, 146, 146, 146, 146, 146,
146, 146, 146, 146, 146, 146, 147, 147, 147, 147,
147, 147, 147, 147, 147, 147, 147, 147, 147, 147,
147, 147, 147, 147, 148, 148, 148, 148, 148, 148,
148, 148, 148, 148, 148, 148, 148, 148, 148, 148,
148, 148, 149, 149, 149, 149, 149, 149, 149, 149,
149, 149, 149, 149, 149, 149, 149, 149, 149, 149,
150, 150, 150, 150, 150, 150, 150, 150, 150, 150,
150, 150, 150, 150, 150, 150, 150, 150, 150, 151,
151, 151, 151, 151, 151, 151, 151, 151, 151, 151,
151, 151, 151, 151, 151, 151, 151, 152, 152, 152,
152, 152, 152, 152, 152, 152, 152, 152, 152, 152,
152, 152, 152, 152, 152, 152, 153, 153, 153, 153,
153, 153, 153, 153, 153, 153, 153, 153, 153, 153,
153, 153, 153, 153, 154, 154, 154, 154, 154, 154,
154, 154, 154, 154, 154, 154, 154, 154, 154, 154,
154, 154, 154, 155, 155, 155, 155, 155, 155, 155,
155, 155, 155, 155, 155, 155, 155, 155, 155, 155,
155, 155, 156, 156, 156, 156, 156, 156, 156, 156,
156, 156, 156, 156, 156, 156, 156, 156, 156, 156,
156, 156, 157, 157, 157, 157, 157, 157, 157, 157,
157, 157, 157, 157, 157, 157, 157, 157, 157, 157,
157, 158, 158, 158, 158, 158, 158, 158, 158, 158,
158, 158, 158, 158, 158, 158, 158, 158, 158, 158,
159, 159, 159, 159, 159, 159, 159, 159, 159, 159,
159, 159, 159, 159, 159, 159, 159, 159, 159, 159,
160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
160, 160, 160, 160, 160, 160, 160, 160, 160, 160,
161, 161, 161, 161, 161, 161, 161, 161, 161, 161,
161, 161, 161, 161, 161, 161, 161, 161, 161, 161,
162, 162, 162, 162, 162, 162, 162, 162, 162, 162,
162, 162, 162, 162, 162, 162, 162, 162, 162, 162,
163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
163, 163, 163, 163, 163, 163, 163, 163, 163, 163,
164, 164, 164, 164, 164, 164, 164, 164, 164, 164,
164, 164, 164, 164, 164, 164, 164, 164, 164, 164,
164, 165, 165, 165, 165, 165, 165, 165, 165, 165,
165, 165, 165, 165, 165, 165, 165, 165, 165, 165,
165, 165, 166, 166, 166, 166, 166, 166, 166, 166,
166, 166, 166, 166, 166, 166, 166, 166, 166, 166,
166, 166, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 167, 167, 167, 167, 167, 167, 167,
167, 167, 167, 168, 168, 168, 168, 168, 168, 168,
168, 168, 168, 168, 168, 168, 168, 168, 168, 168,
168, 168, 168, 168, 168, 169, 169, 169, 169, 169,
169, 169, 169, 169, 169, 169, 169, 169, 169, 169,
169, 169, 169, 169, 169, 169, 170, 170, 170, 170,
170, 170, 170, 170, 170, 170, 170, 170, 170, 170,
170, 170, 170, 170, 170, 170, 170, 171, 171, 171,
171, 171, 171, 171, 171, 171, 171, 171, 171, 171,
171, 171, 171, 171, 171, 171, 171, 171, 171, 172,
172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
172, 172, 172, 172, 172, 172, 172, 172, 172, 172,
172, 173, 173, 173, 173, 173, 173, 173, 173, 173,
173, 173, 173, 173, 173, 173, 173, 173, 173, 173,
173, 173, 173, 174, 174, 174, 174, 174, 174, 174,
174, 174, 174, 174, 174, 174, 174, 174, 174, 174,
174, 174, 174, 174, 174, 175, 175, 175, 175, 175,
175, 175, 175, 175, 175, 175, 175, 175, 175, 175,
175, 175, 175, 175, 175, 175, 175, 176, 176, 176,
176, 176, 176, 176, 176, 176, 176, 176, 176, 176,
176, 176, 176, 176, 176, 176, 176, 176, 176, 176,
177, 177, 177, 177, 177, 177, 177, 177, 177, 177,
177, 177, 177, 177, 177, 177, 177, 177, 177, 177,
177, 177, 178, 178, 178, 178, 178, 178, 178, 178,
178, 178, 178, 178, 178, 178, 178, 178, 178, 178,
178, 178, 178, 178, 178, 179, 179, 179, 179, 179,
179, 179, 179, 179, 179, 179, 179, 179, 179, 179,
179, 179, 179, 179, 179, 179, 179, 179, 180, 180,
180, 180, 180, 180, 180, 180, 180, 180, 180, 180,
180, 180, 180, 180, 180, 180, 180, 180, 180, 180,
180, 181, 181, 181, 181, 181, 181, 181, 181, 181,
181, 181, 181, 181, 181, 181, 181, 181, 181, 181,
181, 181, 181, 181, 182, 182, 182, 182, 182, 182,
182, 182, 182, 182, 182, 182, 182, 182, 182, 182,
182, 182, 182, 182, 182, 182, 182, 182, 183, 183,
183, 183, 183, 183, 183, 183, 183, 183, 183, 183,
183, 183, 183, 183, 183, 183, 183, 183, 183, 183,
183, 184, 184, 184, 184, 184, 184, 184, 184, 184,
184, 184, 184, 184, 184, 184, 184, 184, 184, 184,
184, 184, 184, 184, 184, 185, 185, 185, 185, 185,
185, 185, 185, 185, 185, 185, 185, 185, 185, 185,
185, 185, 185, 185, 185, 185, 185, 185, 185, 186,
186, 186, 186, 186, 186, 186, 186, 186, 186, 186,
186, 186, 186, 186, 186, 186, 186, 186, 186, 186,
186, 186, 186, 187, 187, 187, 187, 187, 187, 187,
187, 187, 187, 187, 187, 187, 187, 187, 187, 187,
187, 187, 187, 187, 187, 187, 187, 187, 188, 188,
188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
188, 188, 189, 189, 189, 189, 189, 189, 189, 189,
189, 189, 189, 189, 189, 189, 189, 189, 189, 189,
189, 189, 189, 189, 189, 189, 189, 190, 190, 190,
190, 190, 190, 190, 190, 190, 190, 190, 190, 190,
190, 190, 190, 190, 190, 190, 190, 190, 190, 190,
190, 190, 191, 191, 191, 191, 191, 191, 191, 191,
191, 191, 191, 191, 191, 191, 191, 191, 191, 191,
191, 191, 191, 191, 191, 191, 192, 192, 192, 192,
192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
192, 192, 192, 192, 192, 192, 192, 192, 192, 192,
192, 192, 193, 193, 193, 193, 193, 193, 193, 193,
193, 193, 193, 193, 193, 193, 193, 193, 193, 193,
193, 193, 193, 193, 193, 193, 193, 194, 194, 194,
194, 194, 194, 194, 194, 194, 194, 194, 194, 194,
194, 194, 194, 194, 194, 194, 194, 194, 194, 194,
194, 194, 195, 195, 195, 195, 195, 195, 195, 195,
195, 195, 195, 195, 195, 195, 195, 195, 195, 195,
195, 195, 195, 195, 195, 195, 195, 195, 196, 196,
196, 196, 196, 196, 196, 196, 196, 196, 196, 196,
196, 196, 196, 196, 196, 196, 196, 196, 196, 196,
196, 196, 196, 196, 197, 197, 197, 197, 197, 197,
197, 197, 197, 197, 197, 197, 197, 197, 197, 197,
197, 197, 197, 197, 197, 197, 197, 197, 197, 197,
198, 198, 198, 198, 198, 198, 198, 198, 198, 198,
198, 198, 198, 198, 198, 198, 198, 198, 198, 198,
198, 198, 198, 198, 198, 198, 199, 199, 199, 199,
199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
199, 199, 199, 199, 199, 199, 199, 199, 199, 199,
199, 199, 200, 200, 200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200, 200, 200, 200, 200,
200, 200, 200, 200, 200, 200, 200, 200, 200, 201,
201, 201, 201, 201, 201, 201, 201, 201, 201, 201,
201, 201, 201, 201, 201, 201, 201, 201, 201, 201,
201, 201, 201, 201, 201, 201, 202, 202, 202, 202,
202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
202, 202, 202, 202, 202, 202, 202, 202, 202, 202,
202, 202, 202, 203, 203, 203, 203, 203, 203, 203,
203, 203, 203, 203, 203, 203, 203, 203, 203, 203,
203, 203, 203, 203, 203, 203, 203, 203, 203, 203,
204, 204, 204, 204, 204, 204, 204, 204, 204, 204,
204, 204, 204, 204, 204, 204, 204, 204, 204, 204,
204, 204, 204, 204, 204, 204, 204, 205, 205, 205,
205, 205, 205, 205, 205, 205, 205, 205, 205, 205,
205, 205, 205, 205, 205, 205, 205, 205, 205, 205,
205, 205, 205, 205, 206, 206, 206, 206, 206, 206,
206, 206, 206, 206, 206, 206, 206, 206, 206, 206,
206, 206, 206, 206, 206, 206, 206, 206, 206, 206,
206, 206, 207, 207, 207, 207, 207, 207, 207, 207,
207, 207, 207, 207, 207, 207, 207, 207, 207, 207,
207, 207, 207, 207, 207, 207, 207, 207, 207, 207,
208, 208, 208, 208, 208, 208, 208, 208, 208, 208,
208, 208, 208, 208, 208, 208, 208, 208, 208, 208,
208, 208, 208, 208, 208, 208, 208, 209, 209, 209,
209, 209, 209, 209, 209, 209, 209, 209, 209, 209,
209, 209, 209, 209, 209, 209, 209, 209, 209, 209,
209, 209, 209, 209, 209, 209, 210, 210, 210, 210,
210, 210, 210, 210, 210, 210, 210, 210, 210, 210,
210, 210, 210, 210, 210, 210, 210, 210, 210, 210,
210, 210, 210, 210, 211, 211, 211, 211, 211, 211,
211, 211, 211, 211, 211, 211, 211, 211, 211, 211,
211, 211, 211, 211, 211, 211, 211, 211, 211, 211,
211, 211, 212, 212, 212, 212, 212, 212, 212, 212,
212, 212, 212, 212, 212, 212, 212, 212, 212, 212,
212, 212, 212, 212, 212, 212, 212, 212, 212, 212,
212, 213, 213, 213, 213, 213, 213, 213, 213, 213,
213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
214, 214, 214, 214, 214, 214, 214, 214, 214, 214,
214, 214, 214, 214, 214, 214, 214, 214, 214, 214,
214, 214, 214, 214, 214, 214, 214, 214, 214, 215,
215, 215, 215, 215, 215, 215, 215, 215, 215, 215,
215, 215, 215, 215, 215, 215, 215, 215, 215, 215,
215, 215, 215, 215, 215, 215, 215, 215, 216, 216,
216, 216, 216, 216, 216, 216, 216, 216, 216, 216,
216, 216, 216, 216, 216, 216, 216, 216, 216, 216,
216, 216, 216, 216, 216, 216, 216, 217, 217, 217,
217, 217, 217, 217, 217, 217, 217, 217, 217, 217,
217, 217, 217, 217, 217, 217, 217, 217, 217, 217,
217, 217, 217, 217, 217, 217, 217, 218, 218, 218,
218, 218, 218, 218, 218, 218, 218, 218, 218, 218,
218, 218, 218, 218, 218, 218, 218, 218, 218, 218,
218, 218, 218, 218, 218, 218, 219, 219, 219, 219,
219, 219, 219, 219, 219, 219, 219, 219, 219, 219,
219, 219, 219, 219, 219, 219, 219, 219, 219, 219,
219, 219, 219, 219, 219, 219, 220, 220, 220, 220,
220, 220, 220, 220, 220, 220, 220, 220, 220, 220,
220, 220, 220, 220, 220, 220, 220, 220, 220, 220,
220, 220, 220, 220, 220, 220, 221, 221, 221, 221,
221, 221, 221, 221, 221, 221, 221, 221, 221, 221,
221, 221, 221, 221, 221, 221, 221, 221, 221, 221,
221, 221, 221, 221, 221, 221, 221, 222, 222, 222,
222, 222, 222, 222, 222, 222, 222, 222, 222, 222,
222, 222, 222, 222, 222, 222, 222, 222, 222, 222,
222, 222, 222, 222, 222, 222, 222, 223, 223, 223,
223, 223, 223, 223, 223, 223, 223, 223, 223, 223,
223, 223, 223, 223, 223, 223, 223, 223, 223, 223,
223, 223, 223, 223, 223, 223, 223, 223, 224, 224,
224, 224, 224, 224, 224, 224, 224, 224, 224, 224,
224, 224, 224, 224, 224, 224, 224, 224, 224, 224,
224, 224, 224, 224, 224, 224, 224, 224, 225, 225,
225, 225, 225, 225, 225, 225, 225, 225, 225, 225,
225, 225, 225, 225, 225, 225, 225, 225, 225, 225,
225, 225, 225, 225, 225, 225, 225, 225, 225, 226,
226, 226, 226, 226, 226, 226, 226, 226, 226, 226,
226, 226, 226, 226, 226, 226, 226, 226, 226, 226,
226, 226, 226, 226, 226, 226, 226, 226, 226, 226,
227, 227, 227, 227, 227, 227, 227, 227, 227, 227,
227, 227, 227, 227, 227, 227, 227, 227, 227, 227,
227, 227, 227, 227, 227, 227, 227, 227, 227, 227,
227, 227, 228, 228, 228, 228, 228, 228, 228, 228,
228, 228, 228, 228, 228, 228, 228, 228, 228, 228,
228, 228, 228, 228, 228, 228, 228, 228, 228, 228,
228, 228, 228, 229, 229, 229, 229, 229, 229, 229,
229, 229, 229, 229, 229, 229, 229, 229, 229, 229,
229, 229, 229, 229, 229, 229, 229, 229, 229, 229,
229, 229, 229, 229, 229, 230, 230, 230, 230, 230,
230, 230, 230, 230, 230, 230, 230, 230, 230, 230,
230, 230, 230, 230, 230, 230, 230, 230, 230, 230,
230, 230, 230, 230, 230, 230, 230, 231, 231, 231,
231, 231, 231, 231, 231, 231, 231, 231, 231, 231,
231, 231, 231, 231, 231, 231, 231, 231, 231, 231,
231, 231, 231, 231, 231, 231, 231, 231, 231, 232,
232, 232, 232, 232, 232, 232, 232, 232, 232, 232,
232, 232, 232, 232, 232, 232, 232, 232, 232, 232,
232, 232, 232, 232, 232, 232, 232, 232, 232, 232,
232, 233, 233, 233, 233, 233, 233, 233, 233, 233,
233, 233, 233, 233, 233, 233, 233, 233, 233, 233,
233, 233, 233, 233, 233, 233, 233, 233, 233, 233,
233, 233, 233, 233, 234, 234, 234, 234, 234, 234,
234, 234, 234, 234, 234, 234, 234, 234, 234, 234,
234, 234, 234, 234, 234, 234, 234, 234, 234, 234,
234, 234, 234, 234, 234, 234, 235, 235, 235, 235,
235, 235, 235, 235, 235, 235, 235, 235, 235, 235,
235, 235, 235, 235, 235, 235, 235, 235, 235, 235,
235, 235, 235, 235, 235, 235, 235, 235, 235, 236,
236, 236, 236, 236, 236, 236, 236, 236, 236, 236,
236, 236, 236, 236, 236, 236, 236, 236, 236, 236,
236, 236, 236, 236, 236, 236, 236, 236, 236, 236,
236, 236, 237, 237, 237, 237, 237, 237, 237, 237,
237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
237, 237, 237, 237, 237, 237, 237, 237, 237, 237,
237, 237, 237, 237, 237, 238, 238, 238, 238, 238,
238, 238, 238, 238, 238, 238, 238, 238, 238, 238,
238, 238, 238, 238, 238, 238, 238, 238, 238, 238,
238, 238, 238, 238, 238, 238, 238, 238, 239, 239,
239, 239, 239, 239, 239, 239, 239, 239, 239, 239,
239, 239, 239, 239, 239, 239, 239, 239, 239, 239,
239, 239, 239, 239, 239, 239, 239, 239, 239, 239,
239, 239, 240, 240, 240, 240, 240, 240, 240, 240,
240, 240, 240, 240, 240, 240, 240, 240, 240, 240,
240, 240, 240, 240, 240, 240, 240, 240, 240, 240,
240, 240, 240, 240, 240, 240, 241, 241, 241, 241,
241, 241, 241, 241, 241, 241, 241, 241, 241, 241,
241, 241, 241, 241, 241, 241, 241, 241, 241, 241,
241, 241, 241, 241, 241, 241, 241, 241, 241, 241,
242, 242, 242, 242, 242, 242, 242, 242, 242, 242,
242, 242, 242, 242, 242, 242, 242, 242, 242, 242,
242, 242, 242, 242, 242, 242, 242, 242, 242, 242,
242, 242, 242, 242, 243, 243, 243, 243, 243, 243,
243, 243, 243, 243, 243, 243, 243, 243, 243, 243,
243, 243, 243, 243, 243, 243, 243, 243, 243, 243,
243, 243, 243, 243, 243, 243, 243, 243, 244, 244,
244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
244, 244, 244, 244, 244, 244, 244, 244, 244, 244,
244, 244, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 245, 245, 245,
245, 245, 245, 245, 245, 245, 245, 246, 246, 246,
246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
246, 246, 246, 246, 246, 246, 246, 246, 246, 246,
246, 246, 247, 247, 247, 247, 247, 247, 247, 247,
247, 247, 247, 247, 247, 247, 247, 247, 247, 247,
247, 247, 247, 247, 247, 247, 247, 247, 247, 247,
247, 247, 247, 247, 247, 247, 247, 248, 248, 248,
248, 248, 248, 248, 248, 248, 248, 248, 248, 248,
248, 248, 248, 248, 248, 248, 248, 248, 248, 248,
248, 248, 248, 248, 248, 248, 248, 248, 248, 248,
248, 248, 249, 249, 249, 249, 249, 249, 249, 249,
249, 249, 249, 249, 249, 249, 249, 249, 249, 249,
249, 249, 249, 249, 249, 249, 249, 249, 249, 249,
249, 249, 249, 249, 249, 249, 249, 250, 250, 250,
250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
250, 250, 250, 250, 250, 250, 250, 250, 250, 250,
250, 250, 250, 251, 251, 251, 251, 251, 251, 251,
251, 251, 251, 251, 251, 251, 251, 251, 251, 251,
251, 251, 251, 251, 251, 251, 251, 251, 251, 251,
251, 251, 251, 251, 251, 251, 251, 251, 251, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 252, 252, 252, 252, 252,
252, 252, 252, 252, 252, 253, 253, 253, 253, 253,
253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
253, 253, 253, 253, 253, 253, 253, 253, 253, 253,
253, 254, 254, 254, 254, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 254, 254, 254, 254, 254,
254, 254, 254, 254, 254, 254, 254, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255,
};
const uint16_t srgb_to_linear[256] =
{
0, 20, 40, 64, 80, 99, 119, 144, 160, 179,
199, 224, 241, 264, 288, 313, 340, 368, 396, 427,
458, 491, 526, 562, 599, 637, 677, 718, 761, 805,
851, 898, 947, 997, 1048, 1101, 1156, 1212, 1270, 1330,
1391, 1453, 1517, 1583, 1651, 1720, 1790, 1863, 1937, 2013,
2090, 2170, 2250, 2333, 2418, 2504, 2592, 2681, 2773, 2866,
2961, 3058, 3157, 3258, 3360, 3464, 3570, 3678, 3788, 3900,
4014, 4129, 4247, 4366, 4488, 4611, 4736, 4864, 4993, 5124,
5257, 5392, 5530, 5669, 5810, 5953, 6099, 6246, 6395, 6547,
6700, 6856, 7014, 7174, 7335, 7500, 7666, 7834, 8004, 8177,
8352, 8528, 8708, 8889, 9072, 9258, 9445, 9635, 9828, 10022,
10219, 10417, 10619, 10822, 11028, 11235, 11446, 11658, 11873, 12090,
12309, 12530, 12754, 12980, 13209, 13440, 13673, 13909, 14146, 14387,
14629, 14874, 15122, 15371, 15623, 15878, 16135, 16394, 16656, 16920,
17187, 17456, 17727, 18001, 18277, 18556, 18837, 19121, 19407, 19696,
19987, 20281, 20577, 20876, 21177, 21481, 21787, 22096, 22407, 22721,
23038, 23357, 23678, 24002, 24329, 24658, 24990, 25325, 25662, 26001,
26344, 26688, 27036, 27386, 27739, 28094, 28452, 28813, 29176, 29542,
29911, 30282, 30656, 31033, 31412, 31794, 32179, 32567, 32957, 33350,
33745, 34143, 34544, 34948, 35355, 35764, 36176, 36591, 37008, 37429,
37852, 38278, 38706, 39138, 39572, 40009, 40449, 40891, 41337, 41785,
42236, 42690, 43147, 43606, 44069, 44534, 45002, 45473, 45947, 46423,
46903, 47385, 47871, 48359, 48850, 49344, 49841, 50341, 50844, 51349,
51858, 52369, 52884, 53401, 53921, 54445, 54971, 55500, 56032, 56567,
57105, 57646, 58190, 58737, 59287, 59840, 60396, 60955, 61517, 62082,
62650, 63221, 63795, 64372, 64952, 65535,
};

View File

@ -53,9 +53,6 @@ static __m128i mask_blue;
static __m128i mask_565_fix_rb;
static __m128i mask_565_fix_g;
static __m128i mask_565_rb;
static __m128i mask_565_pack_multiplier;
static force_inline __m128i
unpack_32_1x128 (uint32_t data)
{
@ -124,29 +121,6 @@ pack_2x128_128 (__m128i lo, __m128i hi)
}
static force_inline __m128i
pack_565_2packedx128_128 (__m128i lo, __m128i hi)
{
__m128i rb0 = _mm_and_si128 (lo, mask_565_rb);
__m128i rb1 = _mm_and_si128 (hi, mask_565_rb);
__m128i t0 = _mm_madd_epi16 (rb0, mask_565_pack_multiplier);
__m128i t1 = _mm_madd_epi16 (rb1, mask_565_pack_multiplier);
__m128i g0 = _mm_and_si128 (lo, mask_green);
__m128i g1 = _mm_and_si128 (hi, mask_green);
t0 = _mm_or_si128 (t0, g0);
t1 = _mm_or_si128 (t1, g1);
/* Simulates _mm_packus_epi32 */
t0 = _mm_slli_epi32 (t0, 16 - 5);
t1 = _mm_slli_epi32 (t1, 16 - 5);
t0 = _mm_srai_epi32 (t0, 16);
t1 = _mm_srai_epi32 (t1, 16);
return _mm_packs_epi32 (t0, t1);
}
__m128i
pack_565_2x128_128 (__m128i lo, __m128i hi)
{
__m128i data;
@ -2857,57 +2831,6 @@ sse2_composite_over_8888_n_8888 (pixman_implementation_t *imp,
}
static void
sse2_composite_src_x888_0565 (pixman_implementation_t *imp,
pixman_composite_info_t *info)
{
PIXMAN_COMPOSITE_ARGS (info);
uint16_t *dst_line, *dst;
uint32_t *src_line, *src, s;
int dst_stride, src_stride;
int32_t w;
PIXMAN_IMAGE_GET_LINE (src_image, src_x, src_y, uint32_t, src_stride, src_line, 1);
PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, uint16_t, dst_stride, dst_line, 1);
while (height--)
{
dst = dst_line;
dst_line += dst_stride;
src = src_line;
src_line += src_stride;
w = width;
while (w && (unsigned long)dst & 15)
{
s = *src++;
*dst = CONVERT_8888_TO_0565 (s);
dst++;
w--;
}
while (w >= 8)
{
__m128i xmm_src0 = load_128_unaligned ((__m128i *)src + 0);
__m128i xmm_src1 = load_128_unaligned ((__m128i *)src + 1);
save_128_aligned ((__m128i*)dst, pack_565_2packedx128_128 (xmm_src0, xmm_src1));
w -= 8;
src += 8;
dst += 8;
}
while (w)
{
s = *src++;
*dst = CONVERT_8888_TO_0565 (s);
dst++;
w--;
}
}
}
static void
sse2_composite_src_x888_8888 (pixman_implementation_t *imp,
pixman_composite_info_t *info)
@ -5394,15 +5317,11 @@ FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_normal_OVER,
scaled_nearest_scanline_sse2_8888_n_8888_OVER,
uint32_t, uint32_t, uint32_t, NORMAL, TRUE, TRUE)
#define BMSK ((1 << BILINEAR_INTERPOLATION_BITS) - 1)
#define BILINEAR_DECLARE_VARIABLES \
const __m128i xmm_wt = _mm_set_epi16 (wt, wt, wt, wt, wt, wt, wt, wt); \
const __m128i xmm_wb = _mm_set_epi16 (wb, wb, wb, wb, wb, wb, wb, wb); \
const __m128i xmm_xorc8 = _mm_set_epi16 (0, 0, 0, 0, BMSK, BMSK, BMSK, BMSK);\
const __m128i xmm_addc8 = _mm_set_epi16 (0, 0, 0, 0, 1, 1, 1, 1); \
const __m128i xmm_xorc7 = _mm_set_epi16 (0, BMSK, 0, BMSK, 0, BMSK, 0, BMSK);\
const __m128i xmm_addc7 = _mm_set_epi16 (0, 1, 0, 1, 0, 1, 0, 1); \
const __m128i xmm_xorc = _mm_set_epi16 (0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff);\
const __m128i xmm_addc = _mm_set_epi16 (0, 0, 0, 0, 1, 1, 1, 1); \
const __m128i xmm_ux = _mm_set_epi16 (unit_x, unit_x, unit_x, unit_x, \
unit_x, unit_x, unit_x, unit_x); \
const __m128i xmm_zero = _mm_setzero_si128 (); \
@ -5411,41 +5330,30 @@ FAST_NEAREST_MAINLOOP_COMMON (sse2_8888_n_8888_normal_OVER,
#define BILINEAR_INTERPOLATE_ONE_PIXEL(pix) \
do { \
__m128i xmm_wh, xmm_lo, xmm_hi, a; \
/* fetch 2x2 pixel block into sse2 registers */ \
__m128i tltr = _mm_loadl_epi64 ( \
(__m128i *)&src_top[pixman_fixed_to_int (vx)]); \
__m128i blbr = _mm_loadl_epi64 ( \
(__m128i *)&src_bottom[pixman_fixed_to_int (vx)]); \
/* fetch 2x2 pixel block into sse2 register */ \
uint32_t tl = src_top [pixman_fixed_to_int (vx)]; \
uint32_t tr = src_top [pixman_fixed_to_int (vx) + 1]; \
uint32_t bl = src_bottom [pixman_fixed_to_int (vx)]; \
uint32_t br = src_bottom [pixman_fixed_to_int (vx) + 1]; \
a = _mm_set_epi32 (tr, tl, br, bl); \
vx += unit_x; \
/* vertical interpolation */ \
a = _mm_add_epi16 (_mm_mullo_epi16 (_mm_unpacklo_epi8 (tltr, xmm_zero), \
a = _mm_add_epi16 (_mm_mullo_epi16 (_mm_unpackhi_epi8 (a, xmm_zero), \
xmm_wt), \
_mm_mullo_epi16 (_mm_unpacklo_epi8 (blbr, xmm_zero), \
_mm_mullo_epi16 (_mm_unpacklo_epi8 (a, xmm_zero), \
xmm_wb)); \
if (BILINEAR_INTERPOLATION_BITS < 8) \
{ \
/* calculate horizontal weights */ \
xmm_wh = _mm_add_epi16 (xmm_addc7, _mm_xor_si128 (xmm_xorc7, \
_mm_srli_epi16 (xmm_x, 16 - BILINEAR_INTERPOLATION_BITS))); \
xmm_x = _mm_add_epi16 (xmm_x, xmm_ux); \
/* horizontal interpolation */ \
a = _mm_madd_epi16 (_mm_unpackhi_epi16 (_mm_shuffle_epi32 ( \
a, _MM_SHUFFLE (1, 0, 3, 2)), a), xmm_wh); \
} \
else \
{ \
/* calculate horizontal weights */ \
xmm_wh = _mm_add_epi16 (xmm_addc8, _mm_xor_si128 (xmm_xorc8, \
_mm_srli_epi16 (xmm_x, 16 - BILINEAR_INTERPOLATION_BITS))); \
xmm_x = _mm_add_epi16 (xmm_x, xmm_ux); \
/* horizontal interpolation */ \
xmm_lo = _mm_mullo_epi16 (a, xmm_wh); \
xmm_hi = _mm_mulhi_epu16 (a, xmm_wh); \
a = _mm_add_epi32 (_mm_unpacklo_epi16 (xmm_lo, xmm_hi), \
_mm_unpackhi_epi16 (xmm_lo, xmm_hi)); \
} \
/* calculate horizontal weights */ \
xmm_wh = _mm_add_epi16 (xmm_addc, \
_mm_xor_si128 (xmm_xorc, \
_mm_srli_epi16 (xmm_x, 8))); \
xmm_x = _mm_add_epi16 (xmm_x, xmm_ux); \
/* horizontal interpolation */ \
xmm_lo = _mm_mullo_epi16 (a, xmm_wh); \
xmm_hi = _mm_mulhi_epu16 (a, xmm_wh); \
a = _mm_add_epi32 (_mm_unpacklo_epi16 (xmm_lo, xmm_hi), \
_mm_unpackhi_epi16 (xmm_lo, xmm_hi)); \
/* shift and pack the result */ \
a = _mm_srli_epi32 (a, BILINEAR_INTERPOLATION_BITS * 2); \
a = _mm_srli_epi32 (a, 16); \
a = _mm_packs_epi32 (a, a); \
a = _mm_packus_epi16 (a, a); \
pix = _mm_cvtsi128_si32 (a); \
@ -5827,7 +5735,6 @@ static const pixman_fast_path_t sse2_fast_paths[] =
PIXMAN_STD_FAST_PATH (OVER, solid, null, a8r8g8b8, sse2_composite_over_n_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, null, x8r8g8b8, sse2_composite_over_n_8888),
PIXMAN_STD_FAST_PATH (OVER, solid, null, r5g6b5, sse2_composite_over_n_0565),
PIXMAN_STD_FAST_PATH (OVER, solid, null, b5g6r5, sse2_composite_over_n_0565),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, a8r8g8b8, sse2_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null, x8r8g8b8, sse2_composite_over_8888_8888),
PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null, a8b8g8r8, sse2_composite_over_8888_8888),
@ -5887,10 +5794,6 @@ static const pixman_fast_path_t sse2_fast_paths[] =
PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8r8g8b8, sse2_composite_src_n_8_8888),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, a8b8g8r8, sse2_composite_src_n_8_8888),
PIXMAN_STD_FAST_PATH (SRC, solid, a8, x8b8g8r8, sse2_composite_src_n_8_8888),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, r5g6b5, sse2_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (SRC, a8b8g8r8, null, b5g6r5, sse2_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, r5g6b5, sse2_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, b5g6r5, sse2_composite_src_x888_0565),
PIXMAN_STD_FAST_PATH (SRC, x8r8g8b8, null, a8r8g8b8, sse2_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, x8b8g8r8, null, a8b8g8r8, sse2_composite_src_x888_8888),
PIXMAN_STD_FAST_PATH (SRC, a8r8g8b8, null, a8r8g8b8, sse2_composite_copy_area),
@ -5936,9 +5839,6 @@ static const pixman_fast_path_t sse2_fast_paths[] =
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, sse2_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, sse2_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, x8r8g8b8, x8r8g8b8, sse2_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8b8g8r8, a8b8g8r8, sse2_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, a8b8g8r8, x8b8g8r8, sse2_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (SRC, x8b8g8r8, x8b8g8r8, sse2_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8r8g8b8, x8r8g8b8, sse2_8888_8888),
SIMPLE_BILINEAR_FAST_PATH (OVER, a8b8g8r8, x8b8g8r8, sse2_8888_8888),
@ -6156,13 +6056,19 @@ static void
sse2_src_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter)
{
pixman_image_t *image = iter->image;
int x = iter->x;
int y = iter->y;
int width = iter->width;
int height = iter->height;
#define FLAGS \
(FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | \
FAST_PATH_BITS_IMAGE | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST)
(FAST_PATH_STANDARD_FLAGS | FAST_PATH_ID_TRANSFORM | FAST_PATH_BITS_IMAGE)
if ((iter->iter_flags & ITER_NARROW) &&
(iter->image_flags & FLAGS) == FLAGS)
if ((iter->flags & ITER_NARROW) &&
(image->common.flags & FLAGS) == FLAGS &&
x >= 0 && y >= 0 &&
x + width <= image->bits.width &&
y + height <= image->bits.height)
{
const fetcher_info_t *f;
@ -6173,7 +6079,7 @@ sse2_src_iter_init (pixman_implementation_t *imp, pixman_iter_t *iter)
uint8_t *b = (uint8_t *)image->bits.bits;
int s = image->bits.rowstride * 4;
iter->bits = b + s * iter->y + iter->x * PIXMAN_FORMAT_BPP (f->format) / 8;
iter->bits = b + s * iter->y + x * PIXMAN_FORMAT_BPP (f->format) / 8;
iter->stride = s;
iter->get_scanline = f->get_scanline;
@ -6209,8 +6115,6 @@ _pixman_implementation_create_sse2 (pixman_implementation_t *fallback)
mask_ffff = create_mask_16_128 (0xffff);
mask_ff000000 = create_mask_2x32_128 (0xff000000, 0xff000000);
mask_alpha = create_mask_2x32_128 (0x00ff0000, 0x00000000);
mask_565_rb = create_mask_2x32_128 (0x00f800f8, 0x00f800f8);
mask_565_pack_multiplier = create_mask_2x32_128 (0x20000004, 0x20000004);
/* Set up function pointers */
imp->combine_32[PIXMAN_OP_OVER] = sse2_combine_over_u;

View File

@ -184,22 +184,6 @@ pixman_malloc_abc (unsigned int a,
return malloc (a * b * c);
}
static void
unorm_to_unorm_params (int in_width, int out_width, uint32_t *factor, int *shift)
{
int w = 0;
*factor = 0;
while (in_width != 0 && w < out_width)
{
*factor |= 1 << w;
w += in_width;
}
/* Did we generate too many bits? */
*shift = w - out_width;
}
/*
* This function expands images from ARGB8 format to ARGB16. To preserve
* precision, it needs to know the original source format. For example, if the
@ -229,15 +213,8 @@ pixman_expand (uint64_t * dst,
r_mask = ~(~0 << r_size),
g_mask = ~(~0 << g_size),
b_mask = ~(~0 << b_size);
uint32_t au_factor, ru_factor, gu_factor, bu_factor;
int au_shift, ru_shift, gu_shift, bu_shift;
int i;
unorm_to_unorm_params (a_size, 16, &au_factor, &au_shift);
unorm_to_unorm_params (r_size, 16, &ru_factor, &ru_shift);
unorm_to_unorm_params (g_size, 16, &gu_factor, &gu_shift);
unorm_to_unorm_params (b_size, 16, &bu_factor, &bu_shift);
/* Start at the end so that we can do the expansion in place
* when src == dst
*/
@ -250,7 +227,7 @@ pixman_expand (uint64_t * dst,
if (a_size)
{
a = (pixel >> a_shift) & a_mask;
a16 = a * au_factor >> au_shift;
a16 = unorm_to_unorm (a, a_size, 16);
}
else
{
@ -262,9 +239,9 @@ pixman_expand (uint64_t * dst,
r = (pixel >> r_shift) & r_mask;
g = (pixel >> g_shift) & g_mask;
b = (pixel >> b_shift) & b_mask;
r16 = r * ru_factor >> ru_shift;
g16 = g * gu_factor >> gu_shift;
b16 = b * bu_factor >> bu_shift;
r16 = unorm_to_unorm (r, r_size, 16);
g16 = unorm_to_unorm (g, g_size, 16);
b16 = unorm_to_unorm (b, b_size, 16);
}
else
{

View File

@ -32,10 +32,10 @@
#endif
#define PIXMAN_VERSION_MAJOR 0
#define PIXMAN_VERSION_MINOR 27
#define PIXMAN_VERSION_MICRO 1
#define PIXMAN_VERSION_MINOR 21
#define PIXMAN_VERSION_MICRO 7
#define PIXMAN_VERSION_STRING "0.27.1"
#define PIXMAN_VERSION_STRING "0.21.7"
#define PIXMAN_VERSION_ENCODE(major, minor, micro) ( \
((major) * 10000) \

View File

@ -25,7 +25,10 @@
* Based on fbmmx.c by Owen Taylor, Søren Sandmann and Nicholas Miell
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
#include "pixman-combine32.h"
#include <altivec.h>

View File

@ -1,237 +0,0 @@
/*
* Copyright © 2000 SuSE, Inc.
* Copyright © 2007 Red Hat, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that
* copyright notice and this permission notice appear in supporting
* documentation, and that the name of SuSE not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. SuSE makes no representations about the
* suitability of this software for any purpose. It is provided "as is"
* without express or implied warranty.
*
* SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
* BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include "pixman-private.h"
#if defined(USE_X86_MMX) || defined (USE_SSE2)
/* The CPU detection code needs to be in a file not compiled with
* "-mmmx -msse", as gcc would generate CMOV instructions otherwise
* that would lead to SIGILL instructions on old CPUs that don't have
* it.
*/
typedef enum
{
X86_MMX = (1 << 0),
X86_MMX_EXTENSIONS = (1 << 1),
X86_SSE = (1 << 2) | X86_MMX_EXTENSIONS,
X86_SSE2 = (1 << 3),
X86_CMOV = (1 << 4)
} cpu_features_t;
#ifdef HAVE_GETISAX
#include <sys/auxv.h>
static cpu_features_t
detect_cpu_features (void)
{
cpu_features_t features = 0;
unsigned int result = 0;
if (getisax (&result, 1))
{
if (result & AV_386_CMOV)
features |= X86_CMOV;
if (result & AV_386_MMX)
features |= X86_MMX;
if (result & AV_386_AMD_MMX)
features |= X86_MMX_EXTENSIONS;
if (result & AV_386_SSE)
features |= X86_SSE;
if (result & AV_386_SSE2)
features |= X86_SSE2;
}
return features;
}
#else
#define _PIXMAN_X86_64 \
(defined(__amd64__) || defined(__x86_64__) || defined(_M_AMD64))
static pixman_bool_t
have_cpuid (void)
{
#if _PIXMAN_X86_64 || defined (_MSC_VER)
return TRUE;
#elif defined (__GNUC__)
uint32_t result;
__asm__ volatile (
"pushf" "\n\t"
"pop %%eax" "\n\t"
"mov %%eax, %%ecx" "\n\t"
"xor $0x00200000, %%eax" "\n\t"
"push %%eax" "\n\t"
"popf" "\n\t"
"pushf" "\n\t"
"pop %%eax" "\n\t"
"xor %%ecx, %%eax" "\n\t"
"mov %%eax, %0" "\n\t"
: "=r" (result)
:
: "%eax", "%ecx");
return !!result;
#else
#error "Unknown compiler"
#endif
}
static void
pixman_cpuid (uint32_t feature,
uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d)
{
#if defined (__GNUC__)
#if _PIXMAN_X86_64
__asm__ volatile (
"cpuid" "\n\t"
: "=a" (*a), "=b" (*b), "=c" (*c), "=d" (*d)
: "a" (feature));
#else
/* On x86-32 we need to be careful about the handling of %ebx
* and %esp. We can't declare either one as clobbered
* since they are special registers (%ebx is the "PIC
* register" holding an offset to global data, %esp the
* stack pointer), so we need to make sure that %ebx is
* preserved, and that %esp has its original value when
* accessing the output operands.
*/
__asm__ volatile (
"xchg %%ebx, %1" "\n\t"
"cpuid" "\n\t"
"xchg %%ebx, %1" "\n\t"
: "=a" (*a), "=r" (*b), "=c" (*c), "=d" (*d)
: "a" (feature));
#endif
#elif defined (_MSC_VER)
int info[4];
__cpuid (info, feature);
*a = info[0];
*b = info[1];
*c = info[2];
*d = info[3];
#else
#error Unknown compiler
#endif
}
static cpu_features_t
detect_cpu_features (void)
{
uint32_t a, b, c, d;
cpu_features_t features = 0;
if (!have_cpuid())
return features;
/* Get feature bits */
pixman_cpuid (0x01, &a, &b, &c, &d);
if (d & (1 << 15))
features |= X86_CMOV;
if (d & (1 << 23))
features |= X86_MMX;
if (d & (1 << 25))
features |= X86_SSE;
if (d & (1 << 26))
features |= X86_SSE2;
/* Check for AMD specific features */
if ((features & X86_MMX) && !(features & X86_SSE))
{
char vendor[13];
/* Get vendor string */
memset (vendor, 0, sizeof vendor);
pixman_cpuid (0x00, &a, &b, &c, &d);
memcpy (vendor + 0, &b, 4);
memcpy (vendor + 4, &d, 4);
memcpy (vendor + 8, &c, 4);
if (strcmp (vendor, "AuthenticAMD") == 0 ||
strcmp (vendor, "Geode by NSC") == 0)
{
pixman_cpuid (0x80000000, &a, &b, &c, &d);
if (a >= 0x80000001)
{
pixman_cpuid (0x80000001, &a, &b, &c, &d);
if (d & (1 << 22))
features |= X86_MMX_EXTENSIONS;
}
}
}
return features;
}
#endif
static pixman_bool_t
have_feature (cpu_features_t feature)
{
static pixman_bool_t initialized;
static cpu_features_t features;
if (!initialized)
{
features = detect_cpu_features();
initialized = TRUE;
}
return (features & feature) == feature;
}
#endif
pixman_implementation_t *
_pixman_x86_get_implementations (pixman_implementation_t *imp)
{
#define MMX_BITS (X86_MMX | X86_MMX_EXTENSIONS)
#define SSE2_BITS (X86_MMX | X86_MMX_EXTENSIONS | X86_SSE | X86_SSE2)
#ifdef USE_X86_MMX
if (!_pixman_disabled ("mmx") && have_feature (MMX_BITS))
imp = _pixman_implementation_create_mmx (imp);
#endif
#ifdef USE_SSE2
if (!_pixman_disabled ("sse2") && have_feature (SSE2_BITS))
imp = _pixman_implementation_create_sse2 (imp);
#endif
return imp;
}

View File

@ -30,7 +30,7 @@
#include <stdlib.h>
pixman_implementation_t *global_implementation;
static pixman_implementation_t *global_implementation;
#ifdef TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR
static void __attribute__((constructor))
@ -40,6 +40,16 @@ pixman_constructor (void)
}
#endif
static force_inline pixman_implementation_t *
get_implementation (void)
{
#ifndef TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR
if (!global_implementation)
global_implementation = _pixman_choose_implementation ();
#endif
return global_implementation;
}
typedef struct operator_info_t operator_info_t;
struct operator_info_t
@ -224,19 +234,19 @@ clip_source_image (pixman_region32_t * region,
* returns FALSE if the final region is empty. Indistinguishable from
* an allocation failure, but rendering ignores those anyways.
*/
pixman_bool_t
_pixman_compute_composite_region32 (pixman_region32_t * region,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dest_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
static pixman_bool_t
pixman_compute_composite_region32 (pixman_region32_t * region,
pixman_image_t * src_image,
pixman_image_t * mask_image,
pixman_image_t * dest_image,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height)
{
region->extents.x1 = dest_x;
region->extents.x2 = dest_x + width;
@ -615,7 +625,7 @@ pixman_image_composite32 (pixman_op_t op,
pixman_region32_init (&region);
if (!_pixman_compute_composite_region32 (
if (!pixman_compute_composite_region32 (
&region, src, mask, dest,
src_x, src_y, mask_x, mask_y, dest_x, dest_y, width, height))
{
@ -1017,7 +1027,6 @@ pixman_format_supported_source (pixman_format_code_t format)
case PIXMAN_a2r10g10b10:
case PIXMAN_x2r10g10b10:
case PIXMAN_a8r8g8b8:
case PIXMAN_a8r8g8b8_sRGB:
case PIXMAN_x8r8g8b8:
case PIXMAN_a8b8g8r8:
case PIXMAN_x8b8g8r8:
@ -1115,7 +1124,7 @@ pixman_compute_composite_region (pixman_region16_t * region,
pixman_region32_init (&r32);
retval = _pixman_compute_composite_region32 (
retval = pixman_compute_composite_region32 (
&r32, src_image, mask_image, dest_image,
src_x, src_y, mask_x, mask_y, dest_x, dest_y,
width, height);

View File

@ -655,7 +655,6 @@ struct pixman_indexed
#define PIXMAN_TYPE_YV12 7
#define PIXMAN_TYPE_BGRA 8
#define PIXMAN_TYPE_RGBA 9
#define PIXMAN_TYPE_ARGB_SRGB 10
#define PIXMAN_FORMAT_COLOR(f) \
(PIXMAN_FORMAT_TYPE(f) == PIXMAN_TYPE_ARGB || \
@ -679,9 +678,6 @@ typedef enum {
PIXMAN_x2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,0,10,10,10),
PIXMAN_a2b10g10r10 = PIXMAN_FORMAT(32,PIXMAN_TYPE_ABGR,2,10,10,10),
/* sRGB formats */
PIXMAN_a8r8g8b8_sRGB = PIXMAN_FORMAT(32,PIXMAN_TYPE_ARGB_SRGB,8,8,8,8),
/* 24bpp formats */
PIXMAN_r8g8b8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ARGB,0,8,8,8),
PIXMAN_b8g8r8 = PIXMAN_FORMAT(24,PIXMAN_TYPE_ABGR,0,8,8,8),
@ -873,65 +869,6 @@ void pixman_image_composite32 (pixman_op_t op,
*/
void pixman_disable_out_of_bounds_workaround (void);
/*
* Glyphs
*/
typedef struct pixman_glyph_cache_t pixman_glyph_cache_t;
typedef struct
{
int x, y;
const void *glyph;
} pixman_glyph_t;
pixman_glyph_cache_t *pixman_glyph_cache_create (void);
void pixman_glyph_cache_destroy (pixman_glyph_cache_t *cache);
void pixman_glyph_cache_freeze (pixman_glyph_cache_t *cache);
void pixman_glyph_cache_thaw (pixman_glyph_cache_t *cache);
const void * pixman_glyph_cache_lookup (pixman_glyph_cache_t *cache,
void *font_key,
void *glyph_key);
const void * pixman_glyph_cache_insert (pixman_glyph_cache_t *cache,
void *font_key,
void *glyph_key,
int origin_x,
int origin_y,
pixman_image_t *glyph_image);
void pixman_glyph_cache_remove (pixman_glyph_cache_t *cache,
void *font_key,
void *glyph_key);
void pixman_glyph_get_extents (pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t *glyphs,
pixman_box32_t *extents);
pixman_format_code_t pixman_glyph_get_mask_format (pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t * glyphs);
void pixman_composite_glyphs (pixman_op_t op,
pixman_image_t *src,
pixman_image_t *dest,
pixman_format_code_t mask_format,
int32_t src_x,
int32_t src_y,
int32_t mask_x,
int32_t mask_y,
int32_t dest_x,
int32_t dest_y,
int32_t width,
int32_t height,
pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t *glyphs);
void pixman_composite_glyphs_no_mask (pixman_op_t op,
pixman_image_t *src,
pixman_image_t *dest,
int32_t src_x,
int32_t src_y,
int32_t dest_x,
int32_t dest_y,
pixman_glyph_cache_t *cache,
int n_glyphs,
pixman_glyph_t *glyphs);
/*
* Trapezoids
*/