Browse Source

Convert asm keyword into __asm__.

Neither the asm() nor the __asm__() keyword is part of the C99
standard, but while GCC accepts the former in C89 syntax, it is not
accepted in C99 unless GNU extensions are turned on (with -fasm). The
latter form is accepted in any syntax as an extension (without
requiring further command-line options).

Sun Studio C99 compiler also does not accept asm() while accepting
__asm__(), albeit reporting warnings that it's not valid C99 syntax.

Originally committed as revision 15627 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diego Pettenò 16 years ago
parent
commit
be449fca79

+ 3 - 3
configure

@@ -448,7 +448,7 @@ check_asm(){
     asm="$2"
     shift 2
     check_cc "$@" <<EOF && enable $name || disable $name
-int foo(void){ asm volatile($asm); }
+int foo(void){ __asm__ volatile($asm); }
 EOF
 }
 
@@ -1574,7 +1574,7 @@ if enabled x86; then
     # base pointer is cleared in the inline assembly code.
     check_exec_crash <<EOF && enable ebp_available
     volatile int i=0;
-    asm volatile (
+    __asm__ volatile (
         "xorl %%ebp, %%ebp"
     ::: "%ebp");
     return i;
@@ -1934,7 +1934,7 @@ VHOOKCFLAGS="-fPIC"
 # Find out if the .align argument is a power of two or not.
 if test $asmalign_pot = "unknown"; then
     disable asmalign_pot
-    echo 'asm (".align 3");' | check_cc && enable asmalign_pot
+    echo '__asm__ (".align 3");' | check_cc && enable asmalign_pot
 fi
 
 enabled_any $DECODER_LIST      && enable decoders

+ 3 - 3
doc/optimization.txt

@@ -154,17 +154,17 @@ The minimum guaranteed alignment is written in the .h files, for example:
 General Tips:
 -------------
 Use asm loops like:
-asm(
+__asm__(
     "1: ....
     ...
     "jump_instruciton ....
 Do not use C loops:
 do{
-    asm(
+    __asm__(
         ...
 }while()
 
-Use asm() instead of intrinsics. The latter requires a good optimizing compiler
+Use __asm__() instead of intrinsics. The latter requires a good optimizing compiler
 which gcc is not.
 
 

+ 51 - 51
libavcodec/alpha/asm.h

@@ -105,21 +105,21 @@ struct unaligned_long { uint64_t l; } __attribute__((packed));
 #define implver         __builtin_alpha_implver
 #define rpcc            __builtin_alpha_rpcc
 #else
-#define prefetch(p)     asm volatile("ldl $31,%0"  : : "m"(*(const char *) (p)) : "memory")
-#define prefetch_en(p)  asm volatile("ldq $31,%0"  : : "m"(*(const char *) (p)) : "memory")
-#define prefetch_m(p)   asm volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
-#define prefetch_men(p) asm volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
-#define cmpbge(a, b) ({ uint64_t __r; asm ("cmpbge  %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
-#define extql(a, b)  ({ uint64_t __r; asm ("extql   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
-#define extwl(a, b)  ({ uint64_t __r; asm ("extwl   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
-#define extqh(a, b)  ({ uint64_t __r; asm ("extqh   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
-#define zap(a, b)    ({ uint64_t __r; asm ("zap     %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
-#define zapnot(a, b) ({ uint64_t __r; asm ("zapnot  %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
-#define amask(a)     ({ uint64_t __r; asm ("amask   %1,%0"      : "=r" (__r) : "rI"  (a));           __r; })
-#define implver()    ({ uint64_t __r; asm ("implver %0"         : "=r" (__r));                       __r; })
-#define rpcc()       ({ uint64_t __r; asm volatile ("rpcc %0"   : "=r" (__r));                       __r; })
+#define prefetch(p)     __asm__ volatile("ldl $31,%0"  : : "m"(*(const char *) (p)) : "memory")
+#define prefetch_en(p)  __asm__ volatile("ldq $31,%0"  : : "m"(*(const char *) (p)) : "memory")
+#define prefetch_m(p)   __asm__ volatile("lds $f31,%0" : : "m"(*(const char *) (p)) : "memory")
+#define prefetch_men(p) __asm__ volatile("ldt $f31,%0" : : "m"(*(const char *) (p)) : "memory")
+#define cmpbge(a, b) ({ uint64_t __r; __asm__ ("cmpbge  %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define extql(a, b)  ({ uint64_t __r; __asm__ ("extql   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define extwl(a, b)  ({ uint64_t __r; __asm__ ("extwl   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define extqh(a, b)  ({ uint64_t __r; __asm__ ("extqh   %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define zap(a, b)    ({ uint64_t __r; __asm__ ("zap     %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define zapnot(a, b) ({ uint64_t __r; __asm__ ("zapnot  %r1,%2,%0"  : "=r" (__r) : "rJ"  (a), "rI" (b)); __r; })
+#define amask(a)     ({ uint64_t __r; __asm__ ("amask   %1,%0"      : "=r" (__r) : "rI"  (a));           __r; })
+#define implver()    ({ uint64_t __r; __asm__ ("implver %0"         : "=r" (__r));                       __r; })
+#define rpcc()       ({ uint64_t __r; __asm__ volatile ("rpcc %0"   : "=r" (__r));                       __r; })
 #endif
-#define wh64(p) asm volatile("wh64 (%0)" : : "r"(p) : "memory")
+#define wh64(p) __asm__ volatile("wh64 (%0)" : : "r"(p) : "memory")
 
 #if GNUC_PREREQ(3,3) && defined(__alpha_max__)
 #define minub8  __builtin_alpha_minub8
@@ -136,19 +136,19 @@ struct unaligned_long { uint64_t l; } __attribute__((packed));
 #define unpkbl  __builtin_alpha_unpkbl
 #define unpkbw  __builtin_alpha_unpkbw
 #else
-#define minub8(a, b) ({ uint64_t __r; asm (".arch ev6; minub8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define minsb8(a, b) ({ uint64_t __r; asm (".arch ev6; minsb8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define minuw4(a, b) ({ uint64_t __r; asm (".arch ev6; minuw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define minsw4(a, b) ({ uint64_t __r; asm (".arch ev6; minsw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define maxub8(a, b) ({ uint64_t __r; asm (".arch ev6; maxub8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define maxsb8(a, b) ({ uint64_t __r; asm (".arch ev6; maxsb8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define maxuw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxuw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define maxsw4(a, b) ({ uint64_t __r; asm (".arch ev6; maxsw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
-#define perr(a, b)   ({ uint64_t __r; asm (".arch ev6; perr    %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
-#define pklb(a)      ({ uint64_t __r; asm (".arch ev6; pklb    %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
-#define pkwb(a)      ({ uint64_t __r; asm (".arch ev6; pkwb    %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
-#define unpkbl(a)    ({ uint64_t __r; asm (".arch ev6; unpkbl  %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
-#define unpkbw(a)    ({ uint64_t __r; asm (".arch ev6; unpkbw  %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
+#define minub8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minub8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define minsb8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minsb8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define minuw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minuw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define minsw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; minsw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxub8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxub8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxsb8(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxsb8  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxuw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxuw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define maxsw4(a, b) ({ uint64_t __r; __asm__ (".arch ev6; maxsw4  %r1,%2,%0"  : "=r" (__r) : "%rJ" (a), "rI" (b)); __r; })
+#define perr(a, b)   ({ uint64_t __r; __asm__ (".arch ev6; perr    %r1,%r2,%0" : "=r" (__r) : "%rJ" (a), "rJ" (b)); __r; })
+#define pklb(a)      ({ uint64_t __r; __asm__ (".arch ev6; pklb    %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
+#define pkwb(a)      ({ uint64_t __r; __asm__ (".arch ev6; pkwb    %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
+#define unpkbl(a)    ({ uint64_t __r; __asm__ (".arch ev6; unpkbl  %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
+#define unpkbw(a)    ({ uint64_t __r; __asm__ (".arch ev6; unpkbw  %r1,%0"     : "=r" (__r) : "rJ"  (a));           __r; })
 #endif
 
 #elif defined(__DECC)           /* Digital/Compaq/hp "ccc" compiler */
@@ -158,31 +158,31 @@ struct unaligned_long { uint64_t l; } __attribute__((packed));
 #define ldl(p) (*(const int32_t *)  (p))
 #define stq(l, p) do { *(uint64_t *) (p) = (l); } while (0)
 #define stl(l, p) do { *(int32_t *)  (p) = (l); } while (0)
-#define ldq_u(a)     asm ("ldq_u   %v0,0(%a0)", a)
+#define ldq_u(a)     __asm__ ("ldq_u   %v0,0(%a0)", a)
 #define uldq(a)      (*(const __unaligned uint64_t *) (a))
-#define cmpbge(a, b) asm ("cmpbge  %a0,%a1,%v0", a, b)
-#define extql(a, b)  asm ("extql   %a0,%a1,%v0", a, b)
-#define extwl(a, b)  asm ("extwl   %a0,%a1,%v0", a, b)
-#define extqh(a, b)  asm ("extqh   %a0,%a1,%v0", a, b)
-#define zap(a, b)    asm ("zap     %a0,%a1,%v0", a, b)
-#define zapnot(a, b) asm ("zapnot  %a0,%a1,%v0", a, b)
-#define amask(a)     asm ("amask   %a0,%v0", a)
-#define implver()    asm ("implver %v0")
-#define rpcc()       asm ("rpcc           %v0")
-#define minub8(a, b) asm ("minub8  %a0,%a1,%v0", a, b)
-#define minsb8(a, b) asm ("minsb8  %a0,%a1,%v0", a, b)
-#define minuw4(a, b) asm ("minuw4  %a0,%a1,%v0", a, b)
-#define minsw4(a, b) asm ("minsw4  %a0,%a1,%v0", a, b)
-#define maxub8(a, b) asm ("maxub8  %a0,%a1,%v0", a, b)
-#define maxsb8(a, b) asm ("maxsb8  %a0,%a1,%v0", a, b)
-#define maxuw4(a, b) asm ("maxuw4  %a0,%a1,%v0", a, b)
-#define maxsw4(a, b) asm ("maxsw4  %a0,%a1,%v0", a, b)
-#define perr(a, b)   asm ("perr    %a0,%a1,%v0", a, b)
-#define pklb(a)      asm ("pklb    %a0,%v0", a)
-#define pkwb(a)      asm ("pkwb    %a0,%v0", a)
-#define unpkbl(a)    asm ("unpkbl  %a0,%v0", a)
-#define unpkbw(a)    asm ("unpkbw  %a0,%v0", a)
-#define wh64(a)      asm ("wh64    %a0", a)
+#define cmpbge(a, b) __asm__ ("cmpbge  %a0,%a1,%v0", a, b)
+#define extql(a, b)  __asm__ ("extql   %a0,%a1,%v0", a, b)
+#define extwl(a, b)  __asm__ ("extwl   %a0,%a1,%v0", a, b)
+#define extqh(a, b)  __asm__ ("extqh   %a0,%a1,%v0", a, b)
+#define zap(a, b)    __asm__ ("zap     %a0,%a1,%v0", a, b)
+#define zapnot(a, b) __asm__ ("zapnot  %a0,%a1,%v0", a, b)
+#define amask(a)     __asm__ ("amask   %a0,%v0", a)
+#define implver()    __asm__ ("implver %v0")
+#define rpcc()       __asm__ ("rpcc           %v0")
+#define minub8(a, b) __asm__ ("minub8  %a0,%a1,%v0", a, b)
+#define minsb8(a, b) __asm__ ("minsb8  %a0,%a1,%v0", a, b)
+#define minuw4(a, b) __asm__ ("minuw4  %a0,%a1,%v0", a, b)
+#define minsw4(a, b) __asm__ ("minsw4  %a0,%a1,%v0", a, b)
+#define maxub8(a, b) __asm__ ("maxub8  %a0,%a1,%v0", a, b)
+#define maxsb8(a, b) __asm__ ("maxsb8  %a0,%a1,%v0", a, b)
+#define maxuw4(a, b) __asm__ ("maxuw4  %a0,%a1,%v0", a, b)
+#define maxsw4(a, b) __asm__ ("maxsw4  %a0,%a1,%v0", a, b)
+#define perr(a, b)   __asm__ ("perr    %a0,%a1,%v0", a, b)
+#define pklb(a)      __asm__ ("pklb    %a0,%v0", a)
+#define pkwb(a)      __asm__ ("pkwb    %a0,%v0", a)
+#define unpkbl(a)    __asm__ ("unpkbl  %a0,%v0", a)
+#define unpkbw(a)    __asm__ ("unpkbw  %a0,%v0", a)
+#define wh64(a)      __asm__ ("wh64    %a0", a)
 
 #else
 #error "Unknown compiler!"

+ 2 - 2
libavcodec/armv4l/dsputil_arm.c

@@ -66,7 +66,7 @@ CALL_2X_PIXELS(put_no_rnd_pixels16_xy2_arm, put_no_rnd_pixels8_xy2_arm, 8)
 
 static void add_pixels_clamped_ARM(short *block, unsigned char *dest, int line_size)
 {
-    asm volatile (
+    __asm__ volatile (
                   "mov r10, #8 \n\t"
 
                   "1: \n\t"
@@ -206,7 +206,7 @@ static void simple_idct_ipp_add(uint8_t *dest, int line_size, DCTELEM *block)
 #ifdef HAVE_ARMV5TE
 static void prefetch_arm(void *mem, int stride, int h)
 {
-    asm volatile(
+    __asm__ volatile(
         "1:              \n\t"
         "subs %0, %0, #1 \n\t"
         "pld  [%1]       \n\t"

+ 5 - 5
libavcodec/armv4l/dsputil_iwmmxt.c

@@ -22,7 +22,7 @@
 #include "libavcodec/dsputil.h"
 
 #define DEF(x, y) x ## _no_rnd_ ## y ##_iwmmxt
-#define SET_RND(regd)  asm volatile ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12");
+#define SET_RND(regd)  __asm__ volatile ("mov r12, #1 \n\t tbcsth " #regd ", r12":::"r12");
 #define WAVG2B "wavg2b"
 #include "dsputil_iwmmxt_rnd.h"
 #undef DEF
@@ -30,7 +30,7 @@
 #undef WAVG2B
 
 #define DEF(x, y) x ## _ ## y ##_iwmmxt
-#define SET_RND(regd)  asm volatile ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12");
+#define SET_RND(regd)  __asm__ volatile ("mov r12, #2 \n\t tbcsth " #regd ", r12":::"r12");
 #define WAVG2B "wavg2br"
 #include "dsputil_iwmmxt_rnd.h"
 #undef DEF
@@ -39,7 +39,7 @@
 
 // need scheduling
 #define OP(AVG)                                         \
-    asm volatile (                                      \
+    __asm__ volatile (                                      \
         /* alignment */                                 \
         "and r12, %[pixels], #7 \n\t"                   \
         "bic %[pixels], %[pixels], #7 \n\t"             \
@@ -89,7 +89,7 @@ void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_s
 {
     uint8_t *pixels2 = pixels + line_size;
 
-    asm volatile (
+    __asm__ volatile (
         "mov            r12, #4                 \n\t"
         "1:                                     \n\t"
         "pld            [%[pixels], %[line_size2]]              \n\t"
@@ -125,7 +125,7 @@ void add_pixels_clamped_iwmmxt(const DCTELEM *block, uint8_t *pixels, int line_s
 
 static void clear_blocks_iwmmxt(DCTELEM *blocks)
 {
-    asm volatile(
+    __asm__ volatile(
                 "wzero wr0                      \n\t"
                 "mov r1, #(128 * 6 / 32)        \n\t"
                 "1:                             \n\t"

+ 15 - 15
libavcodec/armv4l/dsputil_iwmmxt_rnd.h

@@ -26,7 +26,7 @@
 void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
 {
     int stride = line_size;
-    asm volatile (
+    __asm__ volatile (
         "and r12, %[pixels], #7 \n\t"
         "bic %[pixels], %[pixels], #7 \n\t"
         "tmcr wcgr1, r12 \n\t"
@@ -60,7 +60,7 @@ void DEF(put, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz
 void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
 {
     int stride = line_size;
-    asm volatile (
+    __asm__ volatile (
         "and r12, %[pixels], #7 \n\t"
         "bic %[pixels], %[pixels], #7 \n\t"
         "tmcr wcgr1, r12 \n\t"
@@ -102,7 +102,7 @@ void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, const int line_siz
 void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
 {
     int stride = line_size;
-    asm volatile (
+    __asm__ volatile (
         "and r12, %[pixels], #7 \n\t"
         "bic %[pixels], %[pixels], #7 \n\t"
         "tmcr wcgr1, r12 \n\t"
@@ -142,7 +142,7 @@ void DEF(put, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_si
 void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, const int line_size, int h)
 {
     int stride = line_size;
-    asm volatile (
+    __asm__ volatile (
         "pld [%[pixels]]                \n\t"
         "pld [%[pixels], #32]           \n\t"
         "pld [%[block]]                 \n\t"
@@ -201,7 +201,7 @@ void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "pld [%[pixels], #32]           \n\t"
         "and r12, %[pixels], #7         \n\t"
@@ -250,7 +250,7 @@ void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "pld [%[pixels], #32]           \n\t"
         "and r12, %[pixels], #7         \n\t"
@@ -311,7 +311,7 @@ void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, const int line_
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "pld [%[pixels], #32]           \n\t"
         "pld [%[block]]                 \n\t"
@@ -372,7 +372,7 @@ void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, const int line
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "pld [%[pixels], #32]           \n\t"
         "pld [%[block]]                 \n\t"
@@ -448,7 +448,7 @@ void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, const int line_
     int stride = line_size;
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
-    asm volatile(
+    __asm__ volatile(
         "pld            [%[pixels]]                             \n\t"
         "pld            [%[pixels], #32]                        \n\t"
         "and            r12, %[pixels], #7                      \n\t"
@@ -502,7 +502,7 @@ void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line
     int stride = line_size;
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "pld [%[pixels], #32]           \n\t"
         "and r12, %[pixels], #7         \n\t"
@@ -559,7 +559,7 @@ void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, const int line
     int stride = line_size;
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "pld [%[pixels], #32]           \n\t"
         "and r12, %[pixels], #7         \n\t"
@@ -627,7 +627,7 @@ void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "mov r12, #2                    \n\t"
         "pld [%[pixels], #32]           \n\t"
@@ -721,7 +721,7 @@ void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[pixels]]                \n\t"
         "mov r12, #2                    \n\t"
         "pld [%[pixels], #32]           \n\t"
@@ -863,7 +863,7 @@ void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, const int line
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[block]]                 \n\t"
         "pld [%[block], #32]            \n\t"
         "pld [%[pixels]]                \n\t"
@@ -967,7 +967,7 @@ void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, const int lin
     // [wr0 wr1 wr2 wr3] for previous line
     // [wr4 wr5 wr6 wr7] for current line
     SET_RND(wr15); // =2 for rnd  and  =1 for no_rnd version
-    asm volatile(
+    __asm__ volatile(
         "pld [%[block]]                 \n\t"
         "pld [%[block], #32]            \n\t"
         "pld [%[pixels]]                \n\t"

+ 3 - 3
libavcodec/armv4l/float_arm_vfp.c

@@ -42,7 +42,7 @@
 static void vector_fmul_vfp(float *dst, const float *src, int len)
 {
     int tmp;
-    asm volatile(
+    __asm__ volatile(
         "fmrx       %[tmp], fpscr\n\t"
         "orr        %[tmp], %[tmp], #(3 << 16)\n\t" /* set vector size to 4 */
         "fmxr       fpscr, %[tmp]\n\t"
@@ -90,7 +90,7 @@ static void vector_fmul_vfp(float *dst, const float *src, int len)
 static void vector_fmul_reverse_vfp(float *dst, const float *src0, const float *src1, int len)
 {
     src1 += len;
-    asm volatile(
+    __asm__ volatile(
         "fldmdbs    %[src1]!, {s0-s3}\n\t"
         "fldmias    %[src0]!, {s8-s11}\n\t"
         "fldmdbs    %[src1]!, {s4-s7}\n\t"
@@ -149,7 +149,7 @@ static void vector_fmul_reverse_vfp(float *dst, const float *src0, const float *
  */
 void float_to_int16_vfp(int16_t *dst, const float *src, int len)
 {
-    asm volatile(
+    __asm__ volatile(
         "fldmias    %[src]!, {s16-s23}\n\t"
         "ftosis     s0, s16\n\t"
         "ftosis     s1, s17\n\t"

+ 7 - 7
libavcodec/armv4l/mathops.h

@@ -25,7 +25,7 @@
 #ifdef FRAC_BITS
 #   define MULL(a, b) \
         ({  int lo, hi;\
-         asm("smull %0, %1, %2, %3     \n\t"\
+         __asm__("smull %0, %1, %2, %3     \n\t"\
              "mov   %0, %0,     lsr %4\n\t"\
              "add   %1, %0, %1, lsl %5\n\t"\
              : "=&r"(lo), "=&r"(hi)\
@@ -37,21 +37,21 @@
 static inline av_const int MULH(int a, int b)
 {
     int r;
-    asm ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
+    __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b));
     return r;
 }
 #define MULH MULH
 #else
 #define MULH(a, b) \
     ({ int lo, hi;\
-     asm ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\
+     __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a));\
      hi; })
 #endif
 
 static inline av_const int64_t MUL64(int a, int b)
 {
     union { uint64_t x; unsigned hl[2]; } x;
-    asm ("smull %0, %1, %2, %3"
+    __asm__ ("smull %0, %1, %2, %3"
          : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b));
     return x.x;
 }
@@ -60,7 +60,7 @@ static inline av_const int64_t MUL64(int a, int b)
 static inline av_const int64_t MAC64(int64_t d, int a, int b)
 {
     union { uint64_t x; unsigned hl[2]; } x = { d };
-    asm ("smlal %0, %1, %2, %3"
+    __asm__ ("smlal %0, %1, %2, %3"
          : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b));
     return x.x;
 }
@@ -71,11 +71,11 @@ static inline av_const int64_t MAC64(int64_t d, int a, int b)
 
 /* signed 16x16 -> 32 multiply add accumulate */
 #   define MAC16(rt, ra, rb) \
-        asm ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
+        __asm__ ("smlabb %0, %2, %3, %0" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
 /* signed 16x16 -> 32 multiply */
 #   define MUL16(ra, rb)                                                \
         ({ int __rt;                                                    \
-         asm ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb));  \
+         __asm__ ("smulbb %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb));  \
          __rt; })
 
 #endif

+ 1 - 1
libavcodec/armv4l/mpegvideo_armv5te.c

@@ -65,7 +65,7 @@ static inline void dct_unquantize_h263_helper_c(DCTELEM *block, int qmul, int qa
 ({ DCTELEM *xblock = xxblock; \
    int xqmul = xxqmul, xqadd = xxqadd, xcount = xxcount, xtmp; \
    int xdata1, xdata2; \
-asm volatile( \
+__asm__ volatile( \
         "subs %[count], %[count], #2       \n\t" \
         "ble 2f                            \n\t" \
         "ldrd r4, [%[block], #0]           \n\t" \

+ 1 - 1
libavcodec/armv4l/mpegvideo_iwmmxt.c

@@ -48,7 +48,7 @@ static void dct_unquantize_h263_intra_iwmmxt(MpegEncContext *s,
     else
         nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
 
-    asm volatile (
+    __asm__ volatile (
 /*      "movd %1, %%mm6                 \n\t" //qmul */
 /*      "packssdw %%mm6, %%mm6          \n\t" */
 /*      "packssdw %%mm6, %%mm6          \n\t" */

Some files were not shown because too many files changed in this diff