102 #undef PROFILE_THE_BEAST
144 static const vector
unsigned char
145 perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
146 0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
147 perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
148 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
149 perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
150 0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
151 perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
152 0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
154 #define vec_merge3(x2, x1, x0, y0, y1, y2) \
156 __typeof__(x0) o0, o2, o3; \
157 o0 = vec_mergeh(x0, x1); \
158 y0 = vec_perm(o0, x2, perm_rgb_0); \
159 o2 = vec_perm(o0, x2, perm_rgb_1); \
160 o3 = vec_mergel(x0, x1); \
161 y1 = vec_perm(o3, o2, perm_rgb_2); \
162 y2 = vec_perm(o3, o2, perm_rgb_3); \
165 #define vec_mstbgr24(x0, x1, x2, ptr) \
167 __typeof__(x0) _0, _1, _2; \
168 vec_merge3(x0, x1, x2, _0, _1, _2); \
169 vec_st(_0, 0, ptr++); \
170 vec_st(_1, 0, ptr++); \
171 vec_st(_2, 0, ptr++); \
174 #define vec_mstrgb24(x0, x1, x2, ptr) \
176 __typeof__(x0) _0, _1, _2; \
177 vec_merge3(x2, x1, x0, _0, _1, _2); \
178 vec_st(_0, 0, ptr++); \
179 vec_st(_1, 0, ptr++); \
180 vec_st(_2, 0, ptr++); \
187 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr) \
190 _0 = vec_mergeh(x0, x1); \
191 _1 = vec_mergeh(x2, x3); \
192 _2 = (T) vec_mergeh((vector unsigned short) _0, \
193 (vector unsigned short) _1); \
194 _3 = (T) vec_mergel((vector unsigned short) _0, \
195 (vector unsigned short) _1); \
196 vec_st(_2, 0 * 16, (T *) ptr); \
197 vec_st(_3, 1 * 16, (T *) ptr); \
198 _0 = vec_mergel(x0, x1); \
199 _1 = vec_mergel(x2, x3); \
200 _2 = (T) vec_mergeh((vector unsigned short) _0, \
201 (vector unsigned short) _1); \
202 _3 = (T) vec_mergel((vector unsigned short) _0, \
203 (vector unsigned short) _1); \
204 vec_st(_2, 2 * 16, (T *) ptr); \
205 vec_st(_3, 3 * 16, (T *) ptr); \
223 (vector signed short) \
224 vec_perm(x, (__typeof__(x)) { 0 }, \
225 ((vector unsigned char) { \
226 0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03, \
227 0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 }))
230 (vector signed short) \
231 vec_perm(x, (__typeof__(x)) { 0 }, \
232 ((vector unsigned char) { \
233 0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B, \
234 0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F }))
236 #define vec_clip_s16(x) \
237 vec_max(vec_min(x, ((vector signed short) { \
238 235, 235, 235, 235, 235, 235, 235, 235 })), \
239 ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 }))
241 #define vec_packclp(x, y) \
242 (vector unsigned char) \
243 vec_packs((vector unsigned short) \
244 vec_max(x, ((vector signed short) { 0 })), \
245 (vector unsigned short) \
246 vec_max(y, ((vector signed short) { 0 })))
251 vector
signed short U, vector
signed short V,
252 vector
signed short *
R, vector
signed short *
G,
253 vector
signed short *
B)
255 vector
signed short vx, ux, uvx;
257 Y = vec_mradds(Y, c->CY, c->OY);
258 U = vec_sub(U, (vector
signed short)
259 vec_splat((vector
signed short) { 128 }, 0));
260 V = vec_sub(V, (vector
signed short)
261 vec_splat((vector
signed short) { 128 }, 0));
264 ux = vec_sl(U, c->CSHIFT);
265 *B = vec_mradds(ux, c->CBU, Y);
268 vx = vec_sl(V, c->CSHIFT);
269 *R = vec_mradds(vx, c->CRV, Y);
272 uvx = vec_mradds(U, c->CGU, Y);
273 *G = vec_mradds(V, c->CGV, uvx);
282 #define DEFCSP420_CVT(name, out_pixels) \
283 static int altivec_ ## name(SwsContext *c, const unsigned char **in, \
284 int *instrides, int srcSliceY, int srcSliceH, \
285 unsigned char **oplanes, int *outstrides) \
290 int instrides_scl[3]; \
291 vector unsigned char y0, y1; \
293 vector signed char u, v; \
295 vector signed short Y0, Y1, Y2, Y3; \
296 vector signed short U, V; \
297 vector signed short vx, ux, uvx; \
298 vector signed short vx0, ux0, uvx0; \
299 vector signed short vx1, ux1, uvx1; \
300 vector signed short R0, G0, B0; \
301 vector signed short R1, G1, B1; \
302 vector unsigned char R, G, B; \
304 const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
305 vector unsigned char align_perm; \
307 vector signed short lCY = c->CY; \
308 vector signed short lOY = c->OY; \
309 vector signed short lCRV = c->CRV; \
310 vector signed short lCBU = c->CBU; \
311 vector signed short lCGU = c->CGU; \
312 vector signed short lCGV = c->CGV; \
313 vector unsigned short lCSHIFT = c->CSHIFT; \
315 const ubyte *y1i = in[0]; \
316 const ubyte *y2i = in[0] + instrides[0]; \
317 const ubyte *ui = in[1]; \
318 const ubyte *vi = in[2]; \
320 vector unsigned char *oute = \
321 (vector unsigned char *) \
322 (oplanes[0] + srcSliceY * outstrides[0]); \
323 vector unsigned char *outo = \
324 (vector unsigned char *) \
325 (oplanes[0] + srcSliceY * outstrides[0] + outstrides[0]); \
328 instrides_scl[0] = instrides[0] * 2 - w; \
330 instrides_scl[1] = instrides[1] - w / 2; \
332 instrides_scl[2] = instrides[2] - w / 2; \
334 for (i = 0; i < h / 2; i++) { \
335 vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0); \
336 vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1); \
338 for (j = 0; j < w / 16; j++) { \
339 y1ivP = (const vector unsigned char *) y1i; \
340 y2ivP = (const vector unsigned char *) y2i; \
341 uivP = (const vector unsigned char *) ui; \
342 vivP = (const vector unsigned char *) vi; \
344 align_perm = vec_lvsl(0, y1i); \
345 y0 = (vector unsigned char) \
346 vec_perm(y1ivP[0], y1ivP[1], align_perm); \
348 align_perm = vec_lvsl(0, y2i); \
349 y1 = (vector unsigned char) \
350 vec_perm(y2ivP[0], y2ivP[1], align_perm); \
352 align_perm = vec_lvsl(0, ui); \
353 u = (vector signed char) \
354 vec_perm(uivP[0], uivP[1], align_perm); \
356 align_perm = vec_lvsl(0, vi); \
357 v = (vector signed char) \
358 vec_perm(vivP[0], vivP[1], align_perm); \
360 u = (vector signed char) \
362 (vector signed char) \
363 vec_splat((vector signed char) { 128 }, 0)); \
364 v = (vector signed char) \
366 (vector signed char) \
367 vec_splat((vector signed char) { 128 }, 0)); \
369 U = vec_unpackh(u); \
370 V = vec_unpackh(v); \
377 Y0 = vec_mradds(Y0, lCY, lOY); \
378 Y1 = vec_mradds(Y1, lCY, lOY); \
379 Y2 = vec_mradds(Y2, lCY, lOY); \
380 Y3 = vec_mradds(Y3, lCY, lOY); \
383 ux = vec_sl(U, lCSHIFT); \
384 ux = vec_mradds(ux, lCBU, (vector signed short) { 0 }); \
385 ux0 = vec_mergeh(ux, ux); \
386 ux1 = vec_mergel(ux, ux); \
389 vx = vec_sl(V, lCSHIFT); \
390 vx = vec_mradds(vx, lCRV, (vector signed short) { 0 }); \
391 vx0 = vec_mergeh(vx, vx); \
392 vx1 = vec_mergel(vx, vx); \
395 uvx = vec_mradds(U, lCGU, (vector signed short) { 0 }); \
396 uvx = vec_mradds(V, lCGV, uvx); \
397 uvx0 = vec_mergeh(uvx, uvx); \
398 uvx1 = vec_mergel(uvx, uvx); \
400 R0 = vec_add(Y0, vx0); \
401 G0 = vec_add(Y0, uvx0); \
402 B0 = vec_add(Y0, ux0); \
403 R1 = vec_add(Y1, vx1); \
404 G1 = vec_add(Y1, uvx1); \
405 B1 = vec_add(Y1, ux1); \
407 R = vec_packclp(R0, R1); \
408 G = vec_packclp(G0, G1); \
409 B = vec_packclp(B0, B1); \
411 out_pixels(R, G, B, oute); \
413 R0 = vec_add(Y2, vx0); \
414 G0 = vec_add(Y2, uvx0); \
415 B0 = vec_add(Y2, ux0); \
416 R1 = vec_add(Y3, vx1); \
417 G1 = vec_add(Y3, uvx1); \
418 B1 = vec_add(Y3, ux1); \
419 R = vec_packclp(R0, R1); \
420 G = vec_packclp(G0, G1); \
421 B = vec_packclp(B0, B1); \
424 out_pixels(R, G, B, outo); \
432 outo += (outstrides[0]) >> 4; \
433 oute += (outstrides[0]) >> 4; \
435 ui += instrides_scl[1]; \
436 vi += instrides_scl[2]; \
437 y1i += instrides_scl[0]; \
438 y2i += instrides_scl[0]; \
443 #define out_abgr(a, b, c, ptr) \
444 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr)
445 #define out_bgra(a, b, c, ptr) \
446 vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr)
447 #define out_rgba(a, b, c, ptr) \
448 vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr)
449 #define out_argb(a, b, c, ptr) \
450 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr)
451 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr)
452 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr)
463 static const vector
unsigned char
465 0x10, 0x04, 0x10, 0x04,
466 0x10, 0x08, 0x10, 0x08,
467 0x10, 0x0c, 0x10, 0x0c },
469 0x10, 0x06, 0x10, 0x06,
470 0x10, 0x0A, 0x10, 0x0A,
471 0x10, 0x0E, 0x10, 0x0E },
473 0x10, 0x05, 0x10, 0x07,
474 0x10, 0x09, 0x10, 0x0B,
475 0x10, 0x0D, 0x10, 0x0F };
481 int *instrides,
int srcSliceY,
int srcSliceH,
482 unsigned char **oplanes,
int *outstrides)
487 vector
unsigned char uyvy;
488 vector
signed short Y,
U,
V;
489 vector
signed short R0, G0,
B0,
R1, G1,
B1;
490 vector
unsigned char R,
G,
B;
491 vector
unsigned char *out;
495 out = (vector
unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
497 for (i = 0; i < h; i++)
498 for (j = 0; j < w / 16; j++) {
499 uyvy = vec_ld(0, img);
501 U = (vector
signed short)
502 vec_perm(uyvy, (vector
unsigned char) { 0 },
demux_u);
503 V = (vector
signed short)
504 vec_perm(uyvy, (vector
unsigned char) { 0 },
demux_v);
505 Y = (vector
signed short)
506 vec_perm(uyvy, (vector
unsigned char) { 0 },
demux_y);
510 uyvy = vec_ld(16, img);
512 U = (vector
signed short)
513 vec_perm(uyvy, (vector
unsigned char) { 0 },
demux_u);
514 V = (vector
signed short)
515 vec_perm(uyvy, (vector
unsigned char) { 0 },
demux_v);
516 Y = (vector
signed short)
517 vec_perm(uyvy, (vector
unsigned char) { 0 },
demux_y);
551 if ((c->
srcW & 0xf) != 0)
561 if ((c->
srcH & 0x1) != 0)
567 return altivec_yuv2_rgb24;
570 return altivec_yuv2_bgr24;
573 return altivec_yuv2_argb;
576 return altivec_yuv2_abgr;
579 return altivec_yuv2_rgba;
582 return altivec_yuv2_bgra;
583 default:
return NULL;
592 default:
return NULL;
600 const int inv_table[4],
607 vector
signed short vec;
610 buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;
611 buf.tmp[1] = -256 * brightness;
612 buf.tmp[2] = (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);
613 buf.tmp[3] = (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);
614 buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));
615 buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));
617 c->CSHIFT = (vector
unsigned short) vec_splat_u16(2);
618 c->CY = vec_splat((vector
signed short) buf.vec, 0);
619 c->OY = vec_splat((vector
signed short) buf.vec, 1);
620 c->CRV = vec_splat((vector
signed short) buf.vec, 2);
621 c->CBU = vec_splat((vector
signed short) buf.vec, 3);
622 c->CGU = vec_splat((vector
signed short) buf.vec, 4);
623 c->CGV = vec_splat((vector
signed short) buf.vec, 5);
628 const int16_t *lumFilter,
629 const int16_t **lumSrc,
631 const int16_t *chrFilter,
632 const int16_t **chrUSrc,
633 const int16_t **chrVSrc,
635 const int16_t **alpSrc,
641 vector
signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1,
U,
V;
642 vector
signed short R0, G0,
B0,
R1, G1,
B1;
644 vector
unsigned char R,
G,
B;
645 vector
unsigned char *out, *nout;
647 vector
signed short RND = vec_splat_s16(1 << 3);
648 vector
unsigned short SCL = vec_splat_u16(4);
651 vector
signed short *YCoeffs, *CCoeffs;
653 YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
654 CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
656 out = (vector
unsigned char *) dest;
658 for (i = 0; i < dstW; i += 16) {
662 for (j = 0; j < lumFilterSize; j++) {
663 X0 = vec_ld(0, &lumSrc[j][i]);
664 X1 = vec_ld(16, &lumSrc[j][i]);
665 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
666 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
672 for (j = 0; j < chrFilterSize; j++) {
673 X = vec_ld(0, &chrUSrc[j][i / 2]);
674 U = vec_mradds(X, CCoeffs[j], U);
675 X = vec_ld(0, &chrVSrc[j][i / 2]);
676 V = vec_mradds(X, CCoeffs[j], V);
680 Y0 = vec_sra(Y0, SCL);
681 Y1 = vec_sra(Y1, SCL);
699 U0 = vec_mergeh(U, U);
700 V0 = vec_mergeh(V, V);
702 U1 = vec_mergel(U, U);
703 V1 = vec_mergel(V, V);
735 static int printed_error_message;
736 if (!printed_error_message) {
738 "altivec_yuv2packedX doesn't support %s output\n",
740 printed_error_message = 1;
753 for (j = 0; j < lumFilterSize; j++) {
754 X0 = vec_ld(0, &lumSrc[j][i]);
755 X1 = vec_ld(16, &lumSrc[j][i]);
756 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
757 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
763 for (j = 0; j < chrFilterSize; j++) {
764 X = vec_ld(0, &chrUSrc[j][i / 2]);
765 U = vec_mradds(X, CCoeffs[j], U);
766 X = vec_ld(0, &chrVSrc[j][i / 2]);
767 V = vec_mradds(X, CCoeffs[j], V);
771 Y0 = vec_sra(Y0, SCL);
772 Y1 = vec_sra(Y1, SCL);
790 U0 = vec_mergeh(U, U);
791 V0 = vec_mergeh(V, V);
793 U1 = vec_mergel(U, U);
794 V1 = vec_mergel(V, V);
803 nout = (vector
unsigned char *) scratch;
826 "altivec_yuv2packedX doesn't support %s output\n",
831 memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
835 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \
836 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, \
837 const int16_t *lumFilter, \
838 const int16_t **lumSrc, \
840 const int16_t *chrFilter, \
841 const int16_t **chrUSrc, \
842 const int16_t **chrVSrc, \
844 const int16_t **alpSrc, \
845 uint8_t *dest, int dstW, int dstY) \
847 ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \
848 chrFilter, chrUSrc, chrVSrc, \
849 chrFilterSize, alpSrc, \
850 dest, dstW, dstY, pixfmt); \
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
const char * sws_format_name(enum AVPixelFormat format)
#define YUV2PACKEDX_WRAPPER(suffix, pixfmt)
av_cold void ff_yuv2rgb_init_tables_altivec(SwsContext *c, const int inv_table[4], int brightness, int contrast, int saturation)
#define out_bgra(a, b, c, ptr)
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_CPU_FLAG_ALTIVEC
static const vector unsigned char demux_v
Macro definitions for various function/variable attributes.
int srcH
Height of source luma/alpha planes.
#define out_abgr(a, b, c, ptr)
static int altivec_uyvy_rgb32(SwsContext *c, const unsigned char **in, int *instrides, int srcSliceY, int srcSliceH, unsigned char **oplanes, int *outstrides)
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
av_cold SwsFunc ff_yuv2rgb_init_altivec(SwsContext *c)
external api for the swscale stuff
enum AVPixelFormat dstFormat
Destination pixel format.
static av_always_inline void ff_yuv2packedX_altivec(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize, const int16_t *chrFilter, const int16_t **chrUSrc, const int16_t **chrVSrc, int chrFilterSize, const int16_t **alpSrc, uint8_t *dest, int dstW, int dstY, enum AVPixelFormat target)
#define R0(v, w, x, y, z, i)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
void av_log(void *avcl, int level, const char *fmt,...)
static void cvtyuvtoRGB(SwsContext *c, vector signed short Y, vector signed short U, vector signed short V, vector signed short *R, vector signed short *G, vector signed short *B)
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
static const vector unsigned char perm_rgb_2
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
as above, but U and V bytes are swapped
static const vector unsigned char demux_y
static const vector unsigned char perm_rgb_3
#define DEFCSP420_CVT(name, out_pixels)
static const vector unsigned char perm_rgb_0
packed RGB 8:8:8, 24bpp, BGRBGR...
static const vector unsigned char demux_u
#define out_rgb24(a, b, c, ptr)
#define out_argb(a, b, c, ptr)
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
#define vec_packclp(x, y)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
#define DECLARE_ALIGNED(n, t, v)
#define out_bgr24(a, b, c, ptr)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
enum AVPixelFormat srcFormat
Source pixel format.
#define out_rgba(a, b, c, ptr)
static const vector unsigned char perm_rgb_1
int srcW
Width of source luma/alpha planes.
AVPixelFormat
Pixel format.