Browse Source

x86: replace explicit REP_RETs with RETs

From x86inc:
> On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
> a branch or a branch target. So switch to a 2-byte form of ret in that case.
> We can automatically detect "follows a branch", but not a branch target.
> (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)

x86inc can automatically determine whether to use REP_RET rather than
REP in most of these cases, so impact is minimal. Additionally, a few
REP_RETs were used unnecessary, despite the return being nowhere near a
branch.

The only CPUs affected were AMD K10s, made between 2007 and 2011, 16
years ago and 12 years ago, respectively.

In the future, everyone involved with x86inc should consider dropping
REP_RETs altogether.
Lynne 2 years ago
parent
commit
bbe95f7353

+ 5 - 5
libavcodec/x86/aacpsdsp.asm

@@ -49,7 +49,7 @@ align 16
     add  dstq, mmsize
     add  dstq, mmsize
     add    nq, mmsize*2
     add    nq, mmsize*2
     jl .loop
     jl .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 INIT_XMM sse
 INIT_XMM sse
@@ -83,7 +83,7 @@ align 16
     add   src2q, mmsize
     add   src2q, mmsize
     add      nq, mmsize*2
     add      nq, mmsize*2
     jl .loop
     jl .loop
-    REP_RET
+    RET
 
 
 ;***********************************************************************
 ;***********************************************************************
 ;void ff_ps_stereo_interpolate_sse3(float (*l)[2], float (*r)[2],
 ;void ff_ps_stereo_interpolate_sse3(float (*l)[2], float (*r)[2],
@@ -116,7 +116,7 @@ align 16
     movhps [rq+nq], m2
     movhps [rq+nq], m2
     add      nq, 8
     add      nq, 8
     jl .loop
     jl .loop
-    REP_RET
+    RET
 
 
 ;***************************************************************************
 ;***************************************************************************
 ;void ps_stereo_interpolate_ipdopd_sse3(float (*l)[2], float (*r)[2],
 ;void ps_stereo_interpolate_ipdopd_sse3(float (*l)[2], float (*r)[2],
@@ -164,7 +164,7 @@ align 16
     movhps [rq+nq], m2
     movhps [rq+nq], m2
     add      nq, 8
     add      nq, 8
     jl .loop
     jl .loop
-    REP_RET
+    RET
 
 
 ;**********************************************************
 ;**********************************************************
 ;void ps_hybrid_analysis_ileave_sse(float out[2][38][64],
 ;void ps_hybrid_analysis_ileave_sse(float out[2][38][64],
@@ -484,7 +484,7 @@ align 16
     add    outq, strideq
     add    outq, strideq
     add      nq, 64
     add      nq, 64
     jl .loop
     jl .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 INIT_XMM sse
 INIT_XMM sse

+ 3 - 3
libavcodec/x86/ac3dsp.asm

@@ -60,7 +60,7 @@ cglobal ac3_exponent_min, 3, 4, 2, exp, reuse_blks, expn, offset
     sub        expnq, mmsize
     sub        expnq, mmsize
     jg .nextexp
     jg .nextexp
 .end:
 .end:
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %define LOOP_ALIGN ALIGN 16
 %define LOOP_ALIGN ALIGN 16
@@ -126,7 +126,7 @@ cglobal float_to_fixed24, 3, 3, 9, dst, src, len
     sub      lenq, 16
     sub      lenq, 16
 %endif
 %endif
     ja .loop
     ja .loop
-    REP_RET
+    RET
 
 
 ;------------------------------------------------------------------------------
 ;------------------------------------------------------------------------------
 ; int ff_ac3_compute_mantissa_size(uint16_t mant_cnt[6][16])
 ; int ff_ac3_compute_mantissa_size(uint16_t mant_cnt[6][16])
@@ -220,7 +220,7 @@ cglobal ac3_extract_exponents, 3, 3, 4, exp, coef, len
 
 
     add     lenq, 4
     add     lenq, 4
     jl .loop
     jl .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %if HAVE_SSE2_EXTERNAL
 %if HAVE_SSE2_EXTERNAL

+ 2 - 2
libavcodec/x86/alacdsp.asm

@@ -100,7 +100,7 @@ align 16
 
 
     add     lenq, mmsize*2
     add     lenq, mmsize*2
     jl .loop
     jl .loop
-    REP_RET
+    RET
 
 
 %if ARCH_X86_64
 %if ARCH_X86_64
 cglobal alac_append_extra_bits_mono, 2, 5, 3, buf, exbuf, exbits, ch, len
 cglobal alac_append_extra_bits_mono, 2, 5, 3, buf, exbuf, exbits, ch, len
@@ -130,4 +130,4 @@ align 16
 
 
     add     lenq, mmsize*2
     add     lenq, mmsize*2
     jl .loop
     jl .loop
-    REP_RET
+    RET

+ 1 - 1
libavcodec/x86/audiodsp.asm

@@ -123,7 +123,7 @@ cglobal vector_clip_int32%5, 5,5,%1, dst, src, min, max, len
     add     dstq, mmsize*4*(%2+%3)
     add     dstq, mmsize*4*(%2+%3)
     sub     lend, mmsize*(%2+%3)
     sub     lend, mmsize*(%2+%3)
     jg .loop
     jg .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 INIT_XMM sse2
 INIT_XMM sse2

+ 7 - 7
libavcodec/x86/dirac_dwt.asm

@@ -75,7 +75,7 @@ cglobal vertical_compose53iL0_%1, 4,4,1, b0, b1, b2, width
     COMPOSE_53iL0 m0, m1, [b2q+2*widthq], m2
     COMPOSE_53iL0 m0, m1, [b2q+2*widthq], m2
     mova    [b1q+2*widthq], m0
     mova    [b1q+2*widthq], m0
     jg      .loop
     jg      .loop
-    REP_RET
+    RET
 
 
 ; void vertical_compose_dirac53iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
 ; void vertical_compose_dirac53iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
 ;                                  int width)
 ;                                  int width)
@@ -93,7 +93,7 @@ cglobal vertical_compose_dirac53iH0_%1, 4,4,1, b0, b1, b2, width
     paddw   m0, [b1q+2*widthq]
     paddw   m0, [b1q+2*widthq]
     mova    [b1q+2*widthq], m0
     mova    [b1q+2*widthq], m0
     jg      .loop
     jg      .loop
-    REP_RET
+    RET
 
 
 ; void vertical_compose_dd97iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
 ; void vertical_compose_dd97iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
 ;                               IDWTELEM *b3, IDWTELEM *b4, int width)
 ;                               IDWTELEM *b3, IDWTELEM *b4, int width)
@@ -110,7 +110,7 @@ cglobal vertical_compose_dd97iH0_%1, 6,6,5, b0, b1, b2, b3, b4, width
     COMPOSE_DD97iH0 [b2q+2*widthq], [b3q+2*widthq], [b4q+2*widthq]
     COMPOSE_DD97iH0 [b2q+2*widthq], [b3q+2*widthq], [b4q+2*widthq]
     mova    [b2q+2*widthq], m1
     mova    [b2q+2*widthq], m1
     jg      .loop
     jg      .loop
-    REP_RET
+    RET
 
 
 ; void vertical_compose_dd137iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
 ; void vertical_compose_dd137iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
 ;                                IDWTELEM *b3, IDWTELEM *b4, int width)
 ;                                IDWTELEM *b3, IDWTELEM *b4, int width)
@@ -139,7 +139,7 @@ cglobal vertical_compose_dd137iL0_%1, 6,6,6, b0, b1, b2, b3, b4, width
     psubw   m5, m1
     psubw   m5, m1
     mova    [b2q+2*widthq], m5
     mova    [b2q+2*widthq], m5
     jg      .loop
     jg      .loop
-    REP_RET
+    RET
 
 
 ; void vertical_compose_haar(IDWTELEM *b0, IDWTELEM *b1, int width)
 ; void vertical_compose_haar(IDWTELEM *b0, IDWTELEM *b1, int width)
 cglobal vertical_compose_haar_%1, 3,4,3, b0, b1, width
 cglobal vertical_compose_haar_%1, 3,4,3, b0, b1, width
@@ -159,7 +159,7 @@ cglobal vertical_compose_haar_%1, 3,4,3, b0, b1, width
     paddw   m2, m0
     paddw   m2, m0
     mova    [b1q+2*widthq], m2
     mova    [b1q+2*widthq], m2
     jg      .loop
     jg      .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 ; extend the left and right edges of the tmp array by %1 and %2 respectively
 ; extend the left and right edges of the tmp array by %1 and %2 respectively
@@ -225,7 +225,7 @@ cglobal horizontal_compose_haar%2i_%1, 3,6,4, b, tmp, w, x, w2, b_w2
     cmp     xq, w2q
     cmp     xq, w2q
     jl      .highpass_loop
     jl      .highpass_loop
 .end:
 .end:
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 
 
@@ -290,7 +290,7 @@ cglobal horizontal_compose_dd97i_ssse3, 3,6,8, b, tmp, w, x, w2, b_w2
     cmp     xd, w2d
     cmp     xd, w2d
     jl      .highpass_loop
     jl      .highpass_loop
 .end:
 .end:
-    REP_RET
+    RET
 
 
 
 
 INIT_XMM
 INIT_XMM

+ 4 - 4
libavcodec/x86/fft.asm

@@ -475,7 +475,7 @@ cglobal fft_calc, 2,5,8
     mov     r0, r1
     mov     r0, r1
     mov     r1, r3
     mov     r1, r3
     FFT_DISPATCH _interleave %+ SUFFIX, r1
     FFT_DISPATCH _interleave %+ SUFFIX, r1
-    REP_RET
+    RET
 
 
 %endif
 %endif
 
 
@@ -510,7 +510,7 @@ cglobal fft_calc, 2,5,8
     add      r2, mmsize*2
     add      r2, mmsize*2
     jl       .loop
     jl       .loop
 .end:
 .end:
-    REP_RET
+    RET
 
 
 cglobal fft_permute, 2,7,1
 cglobal fft_permute, 2,7,1
     mov     r4,  [r0 + FFTContext.revtab]
     mov     r4,  [r0 + FFTContext.revtab]
@@ -543,7 +543,7 @@ cglobal fft_permute, 2,7,1
     movaps  [r1 + r2 + 16], xmm1
     movaps  [r1 + r2 + 16], xmm1
     add     r2, 32
     add     r2, 32
     jl      .loopcopy
     jl      .loopcopy
-    REP_RET
+    RET
 
 
 INIT_XMM sse
 INIT_XMM sse
 cglobal imdct_calc, 3,5,3
 cglobal imdct_calc, 3,5,3
@@ -583,7 +583,7 @@ cglobal imdct_calc, 3,5,3
     sub     r3, mmsize
     sub     r3, mmsize
     add     r2, mmsize
     add     r2, mmsize
     jl      .loop
     jl      .loop
-    REP_RET
+    RET
 
 
 %ifdef PIC
 %ifdef PIC
 %define SECTION_REL - $$
 %define SECTION_REL - $$

+ 4 - 4
libavcodec/x86/flacdsp.asm

@@ -79,7 +79,7 @@ ALIGN 16
     movd   [decodedq+4], m1
     movd   [decodedq+4], m1
     jg .loop_sample
     jg .loop_sample
 .ret:
 .ret:
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %if HAVE_XOP_EXTERNAL
 %if HAVE_XOP_EXTERNAL
@@ -133,7 +133,7 @@ align 16
     mova [outq + lenq], m%2
     mova [outq + lenq], m%2
     add      lenq, 16
     add      lenq, 16
     jl .loop
     jl .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 INIT_XMM sse2
 INIT_XMM sse2
@@ -177,7 +177,7 @@ align 16
     add      outq, mmsize*2
     add      outq, mmsize*2
     sub      lend, mmsize/4
     sub      lend, mmsize/4
     jg .loop
     jg .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 INIT_XMM sse2
 INIT_XMM sse2
@@ -302,7 +302,7 @@ align 16
     add      outq, mmsize*REPCOUNT
     add      outq, mmsize*REPCOUNT
     sub      lend, mmsize/4
     sub      lend, mmsize/4
     jg .loop
     jg .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 INIT_XMM ssse3
 INIT_XMM ssse3

+ 9 - 9
libavcodec/x86/h264_chromamc.asm

@@ -112,7 +112,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0
     jne .at_least_one_non_zero
     jne .at_least_one_non_zero
     ; mx == 0 AND my == 0 - no filter needed
     ; mx == 0 AND my == 0 - no filter needed
     mv0_pixels_mc8
     mv0_pixels_mc8
-    REP_RET
+    RET
 
 
 .at_least_one_non_zero:
 .at_least_one_non_zero:
 %ifidn %2, rv40
 %ifidn %2, rv40
@@ -192,7 +192,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0
     add           r1, r2
     add           r1, r2
     dec           r3d
     dec           r3d
     jne .next1drow
     jne .next1drow
-    REP_RET
+    RET
 
 
 .both_non_zero: ; general case, bilinear
 .both_non_zero: ; general case, bilinear
     movd          m4, r4d         ; x
     movd          m4, r4d         ; x
@@ -365,7 +365,7 @@ cglobal %1_%2_chroma_mc4, 6, 6 + extra_regs, 0
     add           r0, r2
     add           r0, r2
     sub          r3d, 2
     sub          r3d, 2
     jnz .next2rows
     jnz .next2rows
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %macro chroma_mc2_mmx_func 2
 %macro chroma_mc2_mmx_func 2
@@ -407,7 +407,7 @@ cglobal %1_%2_chroma_mc2, 6, 7, 0
     add           r0, r2
     add           r0, r2
     sub          r3d, 1
     sub          r3d, 1
     jnz .nextrow
     jnz .nextrow
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %define rnd_1d_h264 pw_4
 %define rnd_1d_h264 pw_4
@@ -453,7 +453,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7, 8
     jne .at_least_one_non_zero
     jne .at_least_one_non_zero
     ; mx == 0 AND my == 0 - no filter needed
     ; mx == 0 AND my == 0 - no filter needed
     mv0_pixels_mc8
     mv0_pixels_mc8
-    REP_RET
+    RET
 
 
 .at_least_one_non_zero:
 .at_least_one_non_zero:
     test         r5d, r5d
     test         r5d, r5d
@@ -514,7 +514,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7, 8
     sub          r3d, 2
     sub          r3d, 2
     lea           r0, [r0+r2*2]
     lea           r0, [r0+r2*2]
     jg .next2rows
     jg .next2rows
-    REP_RET
+    RET
 
 
 .my_is_zero:
 .my_is_zero:
     mov          r5d, r4d
     mov          r5d, r4d
@@ -551,7 +551,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7, 8
     lea           r0, [r0+r2*2]
     lea           r0, [r0+r2*2]
     lea           r1, [r1+r2*2]
     lea           r1, [r1+r2*2]
     jg .next2xrows
     jg .next2xrows
-    REP_RET
+    RET
 
 
 .mx_is_zero:
 .mx_is_zero:
     mov          r4d, r5d
     mov          r4d, r5d
@@ -588,7 +588,7 @@ cglobal %1_%2_chroma_mc8%3, 6, 7, 8
     sub          r3d, 2
     sub          r3d, 2
     lea           r0, [r0+r2*2]
     lea           r0, [r0+r2*2]
     jg .next2yrows
     jg .next2yrows
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %macro chroma_mc4_ssse3_func 2
 %macro chroma_mc4_ssse3_func 2
@@ -638,7 +638,7 @@ cglobal %1_%2_chroma_mc4, 6, 7, 0
     sub          r3d, 2
     sub          r3d, 2
     lea           r0, [r0+r2*2]
     lea           r0, [r0+r2*2]
     jg .next2rows
     jg .next2rows
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %define CHROMAMC_AVG NOTHING
 %define CHROMAMC_AVG NOTHING

+ 5 - 5
libavcodec/x86/h264_chromamc_10bit.asm

@@ -67,7 +67,7 @@ cglobal %1_h264_chroma_mc8_10, 6,7,8
     jne .at_least_one_non_zero
     jne .at_least_one_non_zero
     ; mx == 0 AND my == 0 - no filter needed
     ; mx == 0 AND my == 0 - no filter needed
     MV0_PIXELS_MC8
     MV0_PIXELS_MC8
-    REP_RET
+    RET
 
 
 .at_least_one_non_zero:
 .at_least_one_non_zero:
     mov          r6d, 2
     mov          r6d, 2
@@ -102,7 +102,7 @@ cglobal %1_h264_chroma_mc8_10, 6,7,8
     add           r1, r2
     add           r1, r2
     dec           r3d
     dec           r3d
     jne .next1drow
     jne .next1drow
-    REP_RET
+    RET
 
 
 .xy_interpolation: ; general case, bilinear
 .xy_interpolation: ; general case, bilinear
     movd          m4, r4m         ; x
     movd          m4, r4m         ; x
@@ -144,7 +144,7 @@ cglobal %1_h264_chroma_mc8_10, 6,7,8
     add           r0, r2
     add           r0, r2
     dec          r3d
     dec          r3d
     jne .next2drow
     jne .next2drow
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 ;-----------------------------------------------------------------------------
 ;-----------------------------------------------------------------------------
@@ -194,7 +194,7 @@ cglobal %1_h264_chroma_mc4_10, 6,6,7
     MC4_OP m6, m0
     MC4_OP m6, m0
     sub   r3d, 2
     sub   r3d, 2
     jnz .next2rows
     jnz .next2rows
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 ;-----------------------------------------------------------------------------
 ;-----------------------------------------------------------------------------
@@ -234,7 +234,7 @@ cglobal %1_h264_chroma_mc2_10, 6,7
     add           r0, r2
     add           r0, r2
     dec          r3d
     dec          r3d
     jnz .nextrow
     jnz .nextrow
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 %macro NOTHING 2-3
 %macro NOTHING 2-3

+ 3 - 3
libavcodec/x86/h264_deblock_10bit.asm

@@ -372,7 +372,7 @@ cglobal deblock_v_luma_10, 5,5,15
     add         r4, 2
     add         r4, 2
     dec         r3
     dec         r3
     jg .loop
     jg .loop
-    REP_RET
+    RET
 
 
 cglobal deblock_h_luma_10, 5,7,15
 cglobal deblock_h_luma_10, 5,7,15
     shl        r2d, 2
     shl        r2d, 2
@@ -411,7 +411,7 @@ cglobal deblock_h_luma_10, 5,7,15
     lea         r5, [r5+r1*8]
     lea         r5, [r5+r1*8]
     dec         r6
     dec         r6
     jg .loop
     jg .loop
-    REP_RET
+    RET
 %endmacro
 %endmacro
 
 
 INIT_XMM sse2
 INIT_XMM sse2
@@ -648,7 +648,7 @@ cglobal deblock_v_luma_intra_10, 4,7,16
     add     r4, mmsize
     add     r4, mmsize
     dec     r6
     dec     r6
     jg .loop
     jg .loop
-    REP_RET
+    RET
 
 
 ;-----------------------------------------------------------------------------
 ;-----------------------------------------------------------------------------
 ; void ff_deblock_h_luma_intra_10(uint16_t *pix, int stride, int alpha,
 ; void ff_deblock_h_luma_intra_10(uint16_t *pix, int stride, int alpha,

Some files were not shown because too many files changed in this diff