* qatar/master: ARM: enable UAL syntax in asm.S v4l2: don't leak video standard string on error. swscale: Remove disabled code. avfilter: Surround function only used in debug mode by appropriate #ifdef. vf_crop: Replace #ifdef DEBUG + av_log() by av_dlog(). build: remove BUILD_ROOT variable vp8: use av_clip_uintp2() where possible Merged-by: Michael Niedermayer <michaelni@gmx.at>tags/n0.8
| @@ -208,15 +208,15 @@ ffservertest: ffserver$(EXESUF) tests/vsynth1/00.pgm tests/data/asynth1.sw | |||
| tests/vsynth1/00.pgm: tests/videogen$(HOSTEXESUF) | |||
| @mkdir -p tests/vsynth1 | |||
| $(M)$(BUILD_ROOT)/$< 'tests/vsynth1/' | |||
| $(M)./$< 'tests/vsynth1/' | |||
| tests/vsynth2/00.pgm: tests/rotozoom$(HOSTEXESUF) | |||
| @mkdir -p tests/vsynth2 | |||
| $(M)$(BUILD_ROOT)/$< 'tests/vsynth2/' $(SRC_PATH)/tests/lena.pnm | |||
| $(M)./$< 'tests/vsynth2/' $(SRC_PATH)/tests/lena.pnm | |||
| tests/data/asynth1.sw: tests/audiogen$(HOSTEXESUF) | |||
| @mkdir -p tests/data | |||
| $(M)$(BUILD_ROOT)/$< $@ | |||
| $(M)./$< $@ | |||
| tests/data/asynth1.sw tests/vsynth%/00.pgm: TAG = GEN | |||
| @@ -12,12 +12,6 @@ vpath %.S $(SRC_DIR) | |||
| vpath %.asm $(SRC_DIR) | |||
| vpath %.v $(SRC_DIR) | |||
| ifeq ($(SRC_DIR),$(SRC_PATH_BARE)) | |||
| BUILD_ROOT_REL = . | |||
| else | |||
| BUILD_ROOT_REL = .. | |||
| endif | |||
| ifndef V | |||
| Q = @ | |||
| ECHO = printf "$(1)\t%s\n" $(2) | |||
| @@ -33,7 +27,7 @@ endif | |||
| ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale | |||
| IFLAGS := -I$(BUILD_ROOT_REL) -I$(SRC_PATH) | |||
| IFLAGS := -I. -I$(SRC_PATH) | |||
| CPPFLAGS := $(IFLAGS) $(CPPFLAGS) | |||
| CFLAGS += $(ECFLAGS) | |||
| YASMFLAGS += $(IFLAGS) -Pconfig.asm | |||
| @@ -85,7 +79,7 @@ FFLIBS := $(FFLIBS-yes) $(FFLIBS) | |||
| TESTPROGS += $(TESTPROGS-yes) | |||
| FFEXTRALIBS := $(addprefix -l,$(addsuffix $(BUILDSUF),$(FFLIBS))) $(EXTRALIBS) | |||
| FFLDFLAGS := $(addprefix -L$(BUILD_ROOT)/lib,$(ALLFFLIBS)) $(LDFLAGS) | |||
| FFLDFLAGS := $(addprefix -Llib,$(ALLFFLIBS)) $(LDFLAGS) | |||
| EXAMPLES := $(addprefix $(SUBDIR),$(addsuffix -example$(EXESUF),$(EXAMPLES))) | |||
| OBJS := $(addprefix $(SUBDIR),$(sort $(OBJS))) | |||
| @@ -94,7 +88,7 @@ TESTPROGS := $(addprefix $(SUBDIR),$(addsuffix -test$(EXESUF),$(TESTPROGS))) | |||
| HOSTOBJS := $(addprefix $(SUBDIR),$(addsuffix .o,$(HOSTPROGS))) | |||
| HOSTPROGS := $(addprefix $(SUBDIR),$(addsuffix $(HOSTEXESUF),$(HOSTPROGS))) | |||
| DEP_LIBS := $(foreach NAME,$(FFLIBS),$(BUILD_ROOT_REL)/lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME)) | |||
| DEP_LIBS := $(foreach NAME,$(FFLIBS),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME)) | |||
| ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h)) | |||
| SKIPHEADERS += $(addprefix $(ARCH)/,$(ARCH_HEADERS)) | |||
| @@ -3046,7 +3046,7 @@ enabled extra_warnings && check_cflags -Winline | |||
| # add some linker flags | |||
| check_ldflags -Wl,--warn-common | |||
| check_ldflags '-Wl,-rpath-link,\$(BUILD_ROOT)/libpostproc -Wl,-rpath-link,\$(BUILD_ROOT)/libswscale -Wl,-rpath-link,\$(BUILD_ROOT)/libavfilter -Wl,-rpath-link,\$(BUILD_ROOT)/libavdevice -Wl,-rpath-link,\$(BUILD_ROOT)/libavformat -Wl,-rpath-link,\$(BUILD_ROOT)/libavcodec -Wl,-rpath-link,\$(BUILD_ROOT)/libavutil' | |||
| check_ldflags -Wl,-rpath-link,libpostproc -Wl,-rpath-link,libswscale -Wl,-rpath-link,libavfilter -Wl,-rpath-link,libavdevice -Wl,-rpath-link,libavformat -Wl,-rpath-link,libavcodec -Wl,-rpath-link,libavutil | |||
| test_ldflags -Wl,-Bsymbolic && append SHFLAGS -Wl,-Bsymbolic | |||
| echo "X{};" > $TMPV | |||
| @@ -3316,7 +3316,6 @@ DATADIR=\$(DESTDIR)$datadir | |||
| MANDIR=\$(DESTDIR)$mandir | |||
| SRC_PATH="$source_path" | |||
| SRC_PATH_BARE=$source_path | |||
| BUILD_ROOT="$PWD" | |||
| CC_IDENT=$cc_ident | |||
| ARCH=$arch | |||
| CC=$cc | |||
| @@ -26,6 +26,8 @@ | |||
| # define ELF @ | |||
| #endif | |||
| .syntax unified | |||
| .macro require8 val=1 | |||
| ELF .eabi_attribute 24, \val | |||
| .endm | |||
| @@ -21,7 +21,6 @@ | |||
| #include "config.h" | |||
| #include "asm.S" | |||
| .syntax unified | |||
| /* | |||
| * VFP is a floating point coprocessor used in some ARM cores. VFP11 has 1 cycle | |||
| * throughput for almost all the instructions (except for double precision | |||
| @@ -21,8 +21,6 @@ | |||
| #include "config.h" | |||
| #include "asm.S" | |||
| .syntax unified | |||
| /** | |||
| * ARM VFP optimized float to int16 conversion. | |||
| * Assume that len is a positive number and is multiple of 8, destination | |||
| @@ -95,7 +95,7 @@ function ff_dct_unquantize_h263_armv5te, export=1 | |||
| strh lr, [r0], #2 | |||
| subs r3, r3, #8 | |||
| ldrgtd r4, [r0, #0] /* load data early to avoid load/use pipeline stall */ | |||
| ldrdgt r4, [r0, #0] /* load data early to avoid load/use pipeline stall */ | |||
| bgt 1b | |||
| adds r3, r3, #2 | |||
| @@ -20,8 +20,6 @@ | |||
| #include "asm.S" | |||
| .syntax unified | |||
| .macro rac_get_prob h, bs, buf, cw, pr, t0, t1 | |||
| adds \bs, \bs, \t0 | |||
| lsl \cw, \cw, \t0 | |||
| @@ -166,12 +166,12 @@ static void get_quants(VP8Context *s) | |||
| } else | |||
| base_qi = yac_qi; | |||
| s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + ydc_delta , 0, 127)]; | |||
| s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi , 0, 127)]; | |||
| s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip(base_qi + y2dc_delta, 0, 127)]; | |||
| s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip(base_qi + y2ac_delta, 0, 127)] / 100; | |||
| s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + uvdc_delta, 0, 127)]; | |||
| s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi + uvac_delta, 0, 127)]; | |||
| s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta , 7)]; | |||
| s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi , 7)]; | |||
| s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)]; | |||
| s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] / 100; | |||
| s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)]; | |||
| s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)]; | |||
| s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8); | |||
| s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132); | |||
| @@ -501,7 +501,6 @@ static int v4l2_set_parameters(AVFormatContext *s1, AVFormatParameters *ap) | |||
| return AVERROR(EIO); | |||
| } | |||
| } | |||
| av_freep(&s->standard); | |||
| if (ap->time_base.num && ap->time_base.den) { | |||
| av_log(s1, AV_LOG_DEBUG, "Setting time per frame to %d/%d\n", | |||
| @@ -670,6 +669,7 @@ static int v4l2_read_header(AVFormatContext *s1, AVFormatParameters *ap) | |||
| out: | |||
| av_freep(&s->video_size); | |||
| av_freep(&s->pixel_format); | |||
| av_freep(&s->standard); | |||
| return res; | |||
| } | |||
| @@ -264,11 +264,9 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) | |||
| crop->x &= ~((1 << crop->hsub) - 1); | |||
| crop->y &= ~((1 << crop->vsub) - 1); | |||
| #ifdef DEBUG | |||
| av_log(ctx, AV_LOG_DEBUG, | |||
| "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n", | |||
| (int)crop->var_values[VAR_N], crop->var_values[VAR_T], crop->x, crop->y, crop->x+crop->w, crop->y+crop->h); | |||
| #endif | |||
| av_dlog(ctx, "n:%d t:%f x:%d y:%d x+w:%d y+h:%d\n", | |||
| (int)crop->var_values[VAR_N], crop->var_values[VAR_T], crop->x, | |||
| crop->y, crop->x+crop->w, crop->y+crop->h); | |||
| ref2->data[0] += crop->y * ref2->linesize[0]; | |||
| ref2->data[0] += crop->x * crop->max_step[0]; | |||
| @@ -447,159 +447,7 @@ static int altivec_##name (SwsContext *c, \ | |||
| #define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr) | |||
| DEFCSP420_CVT (yuv2_abgr, out_abgr) | |||
| #if 1 | |||
| DEFCSP420_CVT (yuv2_bgra, out_bgra) | |||
| #else | |||
| static int altivec_yuv2_bgra32 (SwsContext *c, | |||
| unsigned char **in, int *instrides, | |||
| int srcSliceY, int srcSliceH, | |||
| unsigned char **oplanes, int *outstrides) | |||
| { | |||
| int w = c->srcW; | |||
| int h = srcSliceH; | |||
| int i,j; | |||
| int instrides_scl[3]; | |||
| vector unsigned char y0,y1; | |||
| vector signed char u,v; | |||
| vector signed short Y0,Y1,Y2,Y3; | |||
| vector signed short U,V; | |||
| vector signed short vx,ux,uvx; | |||
| vector signed short vx0,ux0,uvx0; | |||
| vector signed short vx1,ux1,uvx1; | |||
| vector signed short R0,G0,B0; | |||
| vector signed short R1,G1,B1; | |||
| vector unsigned char R,G,B; | |||
| vector unsigned char *uivP, *vivP; | |||
| vector unsigned char align_perm; | |||
| vector signed short | |||
| lCY = c->CY, | |||
| lOY = c->OY, | |||
| lCRV = c->CRV, | |||
| lCBU = c->CBU, | |||
| lCGU = c->CGU, | |||
| lCGV = c->CGV; | |||
| vector unsigned short lCSHIFT = c->CSHIFT; | |||
| ubyte *y1i = in[0]; | |||
| ubyte *y2i = in[0]+w; | |||
| ubyte *ui = in[1]; | |||
| ubyte *vi = in[2]; | |||
| vector unsigned char *oute | |||
| = (vector unsigned char *) | |||
| (oplanes[0]+srcSliceY*outstrides[0]); | |||
| vector unsigned char *outo | |||
| = (vector unsigned char *) | |||
| (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); | |||
| instrides_scl[0] = instrides[0]; | |||
| instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ | |||
| instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ | |||
| for (i=0;i<h/2;i++) { | |||
| vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); | |||
| vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); | |||
| for (j=0;j<w/16;j++) { | |||
| y0 = vec_ldl (0,y1i); | |||
| y1 = vec_ldl (0,y2i); | |||
| uivP = (vector unsigned char *)ui; | |||
| vivP = (vector unsigned char *)vi; | |||
| align_perm = vec_lvsl (0, ui); | |||
| u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm); | |||
| align_perm = vec_lvsl (0, vi); | |||
| v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm); | |||
| u = (vector signed char) | |||
| vec_sub (u,(vector signed char) | |||
| vec_splat((vector signed char){128},0)); | |||
| v = (vector signed char) | |||
| vec_sub (v, (vector signed char) | |||
| vec_splat((vector signed char){128},0)); | |||
| U = vec_unpackh (u); | |||
| V = vec_unpackh (v); | |||
| Y0 = vec_unh (y0); | |||
| Y1 = vec_unl (y0); | |||
| Y2 = vec_unh (y1); | |||
| Y3 = vec_unl (y1); | |||
| Y0 = vec_mradds (Y0, lCY, lOY); | |||
| Y1 = vec_mradds (Y1, lCY, lOY); | |||
| Y2 = vec_mradds (Y2, lCY, lOY); | |||
| Y3 = vec_mradds (Y3, lCY, lOY); | |||
| /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ | |||
| ux = vec_sl (U, lCSHIFT); | |||
| ux = vec_mradds (ux, lCBU, (vector signed short){0}); | |||
| ux0 = vec_mergeh (ux,ux); | |||
| ux1 = vec_mergel (ux,ux); | |||
| /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ | |||
| vx = vec_sl (V, lCSHIFT); | |||
| vx = vec_mradds (vx, lCRV, (vector signed short){0}); | |||
| vx0 = vec_mergeh (vx,vx); | |||
| vx1 = vec_mergel (vx,vx); | |||
| /* uvx = ((CGU*u) + (CGV*v))>>15 */ | |||
| uvx = vec_mradds (U, lCGU, (vector signed short){0}); | |||
| uvx = vec_mradds (V, lCGV, uvx); | |||
| uvx0 = vec_mergeh (uvx,uvx); | |||
| uvx1 = vec_mergel (uvx,uvx); | |||
| R0 = vec_add (Y0,vx0); | |||
| G0 = vec_add (Y0,uvx0); | |||
| B0 = vec_add (Y0,ux0); | |||
| R1 = vec_add (Y1,vx1); | |||
| G1 = vec_add (Y1,uvx1); | |||
| B1 = vec_add (Y1,ux1); | |||
| R = vec_packclp (R0,R1); | |||
| G = vec_packclp (G0,G1); | |||
| B = vec_packclp (B0,B1); | |||
| out_argb(R,G,B,oute); | |||
| R0 = vec_add (Y2,vx0); | |||
| G0 = vec_add (Y2,uvx0); | |||
| B0 = vec_add (Y2,ux0); | |||
| R1 = vec_add (Y3,vx1); | |||
| G1 = vec_add (Y3,uvx1); | |||
| B1 = vec_add (Y3,ux1); | |||
| R = vec_packclp (R0,R1); | |||
| G = vec_packclp (G0,G1); | |||
| B = vec_packclp (B0,B1); | |||
| out_argb(R,G,B,outo); | |||
| y1i += 16; | |||
| y2i += 16; | |||
| ui += 8; | |||
| vi += 8; | |||
| } | |||
| outo += (outstrides[0])>>4; | |||
| oute += (outstrides[0])>>4; | |||
| ui += instrides_scl[1]; | |||
| vi += instrides_scl[2]; | |||
| y1i += instrides_scl[0]; | |||
| y2i += instrides_scl[0]; | |||
| } | |||
| return srcSliceH; | |||
| } | |||
| #endif | |||
| DEFCSP420_CVT (yuv2_rgba, out_rgba) | |||
| DEFCSP420_CVT (yuv2_argb, out_argb) | |||
| DEFCSP420_CVT (yuv2_rgb24, out_rgb24) | |||
| @@ -574,7 +574,6 @@ static inline void planar2x_c(const uint8_t *src, uint8_t *dst, int srcWidth, | |||
| } | |||
| // last line | |||
| #if 1 | |||
| dst[0]= src[0]; | |||
| for (x=0; x<srcWidth-1; x++) { | |||
| @@ -582,12 +581,6 @@ static inline void planar2x_c(const uint8_t *src, uint8_t *dst, int srcWidth, | |||
| dst[2*x+2]= ( src[x] + 3*src[x+1])>>2; | |||
| } | |||
| dst[2*srcWidth-1]= src[srcWidth-1]; | |||
| #else | |||
| for (x=0; x<srcWidth; x++) { | |||
| dst[2*x+0]= | |||
| dst[2*x+1]= src[x]; | |||
| } | |||
| #endif | |||
| } | |||
| /** | |||
| @@ -366,28 +366,6 @@ YUV2RGBFUNC(yuv2rgb_c_16, uint16_t, 0) | |||
| PUTRGB(dst_1,py_1,3); | |||
| CLOSEYUV2RGBFUNC(8) | |||
| #if 0 // Currently unused | |||
| // This is exactly the same code as yuv2rgb_c_32 except for the types of | |||
| // r, g, b, dst_1, dst_2 | |||
| YUV2RGBFUNC(yuv2rgb_c_8, uint8_t, 0) | |||
| LOADCHROMA(0); | |||
| PUTRGB(dst_1,py_1,0); | |||
| PUTRGB(dst_2,py_2,0); | |||
| LOADCHROMA(1); | |||
| PUTRGB(dst_2,py_2,1); | |||
| PUTRGB(dst_1,py_1,1); | |||
| LOADCHROMA(2); | |||
| PUTRGB(dst_1,py_1,2); | |||
| PUTRGB(dst_2,py_2,2); | |||
| LOADCHROMA(3); | |||
| PUTRGB(dst_2,py_2,3); | |||
| PUTRGB(dst_1,py_1,3); | |||
| CLOSEYUV2RGBFUNC(8) | |||
| #endif | |||
| // r, g, b, dst_1, dst_2 | |||
| YUV2RGBFUNC(yuv2rgb_c_12_ordered_dither, uint16_t, 0) | |||
| const uint8_t *d16 = dither_4x4_16[y&3]; | |||
| @@ -441,36 +419,6 @@ YUV2RGBFUNC(yuv2rgb_c_8_ordered_dither, uint8_t, 0) | |||
| PUTRGB8(dst_1,py_1,3,6); | |||
| CLOSEYUV2RGBFUNC(8) | |||
| #if 0 // Currently unused | |||
| // This is exactly the same code as yuv2rgb_c_32 except for the types of | |||
| // r, g, b, dst_1, dst_2 | |||
| YUV2RGBFUNC(yuv2rgb_c_4, uint8_t, 0) | |||
| int acc; | |||
| #define PUTRGB4(dst,src,i) \ | |||
| Y = src[2*i]; \ | |||
| acc = r[Y] + g[Y] + b[Y]; \ | |||
| Y = src[2*i+1]; \ | |||
| acc |= (r[Y] + g[Y] + b[Y])<<4; \ | |||
| dst[i] = acc; | |||
| LOADCHROMA(0); | |||
| PUTRGB4(dst_1,py_1,0); | |||
| PUTRGB4(dst_2,py_2,0); | |||
| LOADCHROMA(1); | |||
| PUTRGB4(dst_2,py_2,1); | |||
| PUTRGB4(dst_1,py_1,1); | |||
| LOADCHROMA(2); | |||
| PUTRGB4(dst_1,py_1,2); | |||
| PUTRGB4(dst_2,py_2,2); | |||
| LOADCHROMA(3); | |||
| PUTRGB4(dst_2,py_2,3); | |||
| PUTRGB4(dst_1,py_1,3); | |||
| CLOSEYUV2RGBFUNC(4) | |||
| #endif | |||
| YUV2RGBFUNC(yuv2rgb_c_4_ordered_dither, uint8_t, 0) | |||
| const uint8_t *d64 = dither_8x8_73[y&7]; | |||
| const uint8_t *d128 = dither_8x8_220[y&7]; | |||
| @@ -500,28 +448,6 @@ YUV2RGBFUNC(yuv2rgb_c_4_ordered_dither, uint8_t, 0) | |||
| PUTRGB4D(dst_1,py_1,3,6); | |||
| CLOSEYUV2RGBFUNC(4) | |||
| #if 0 // Currently unused | |||
| // This is exactly the same code as yuv2rgb_c_32 except for the types of | |||
| // r, g, b, dst_1, dst_2 | |||
| YUV2RGBFUNC(yuv2rgb_c_4b, uint8_t, 0) | |||
| LOADCHROMA(0); | |||
| PUTRGB(dst_1,py_1,0); | |||
| PUTRGB(dst_2,py_2,0); | |||
| LOADCHROMA(1); | |||
| PUTRGB(dst_2,py_2,1); | |||
| PUTRGB(dst_1,py_1,1); | |||
| LOADCHROMA(2); | |||
| PUTRGB(dst_1,py_1,2); | |||
| PUTRGB(dst_2,py_2,2); | |||
| LOADCHROMA(3); | |||
| PUTRGB(dst_2,py_2,3); | |||
| PUTRGB(dst_1,py_1,3); | |||
| CLOSEYUV2RGBFUNC(8) | |||
| #endif | |||
| YUV2RGBFUNC(yuv2rgb_c_4b_ordered_dither, uint8_t, 0) | |||
| const uint8_t *d64 = dither_8x8_73[y&7]; | |||
| const uint8_t *d128 = dither_8x8_220[y&7]; | |||