simple_idct_armv5te.S 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * Simple IDCT
  3. *
  4. * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
  5. * Copyright (c) 2006 Mans Rullgard <mru@inprovide.com>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  24. #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  25. #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  26. #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  27. #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  28. #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  29. #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  30. #define ROW_SHIFT 11
  31. #define COL_SHIFT 20
  32. #define W13 (W1 | (W3 << 16))
  33. #define W26 (W2 | (W6 << 16))
  34. #define W57 (W5 | (W7 << 16))
  35. .text
  36. .align
  37. w13: .long W13
  38. w26: .long W26
  39. w57: .long W57
  40. .align
  41. .type idct_row_armv5te, %function
  42. .func idct_row_armv5te
  43. idct_row_armv5te:
  44. str lr, [sp, #-4]!
  45. ldrd v1, [a1, #8]
  46. ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */
  47. orrs v1, v1, v2
  48. cmpeq v1, a4
  49. cmpeq v1, a3, lsr #16
  50. beq row_dc_only
  51. mov v1, #(1<<(ROW_SHIFT-1))
  52. mov ip, #16384
  53. sub ip, ip, #1 /* ip = W4 */
  54. smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */
  55. ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */
  56. smultb a2, ip, a4
  57. smulbb lr, ip, a4
  58. add v2, v1, a2
  59. sub v3, v1, a2
  60. sub v4, v1, lr
  61. add v1, v1, lr
  62. ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
  63. ldr lr, [pc, #(w57-.-8)] /* lr = W5 | (W7 << 16) */
  64. smulbt v5, ip, a3
  65. smultt v6, lr, a4
  66. smlatt v5, ip, a4, v5
  67. smultt a2, ip, a3
  68. smulbt v7, lr, a3
  69. sub v6, v6, a2
  70. smulbt a2, ip, a4
  71. smultt fp, lr, a3
  72. sub v7, v7, a2
  73. smulbt a2, lr, a4
  74. ldrd a3, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
  75. sub fp, fp, a2
  76. orrs a2, a3, a4
  77. beq 1f
  78. smlabt v5, lr, a3, v5
  79. smlabt v6, ip, a3, v6
  80. smlatt v5, lr, a4, v5
  81. smlabt v6, lr, a4, v6
  82. smlatt v7, lr, a3, v7
  83. smlatt fp, ip, a3, fp
  84. smulbt a2, ip, a4
  85. smlatt v7, ip, a4, v7
  86. sub fp, fp, a2
  87. ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */
  88. mov a2, #16384
  89. sub a2, a2, #1 /* a2 = W4 */
  90. smulbb a2, a2, a3 /* a2 = W4*row[4] */
  91. smultb lr, ip, a4 /* lr = W6*row[6] */
  92. add v1, v1, a2 /* v1 += W4*row[4] */
  93. add v1, v1, lr /* v1 += W6*row[6] */
  94. add v4, v4, a2 /* v4 += W4*row[4] */
  95. sub v4, v4, lr /* v4 -= W6*row[6] */
  96. smulbb lr, ip, a4 /* lr = W2*row[6] */
  97. sub v2, v2, a2 /* v2 -= W4*row[4] */
  98. sub v2, v2, lr /* v2 -= W2*row[6] */
  99. sub v3, v3, a2 /* v3 -= W4*row[4] */
  100. add v3, v3, lr /* v3 += W2*row[6] */
  101. 1: add a2, v1, v5
  102. mov a3, a2, lsr #11
  103. bic a3, a3, #0x1f0000
  104. sub a2, v2, v6
  105. mov a2, a2, lsr #11
  106. add a3, a3, a2, lsl #16
  107. add a2, v3, v7
  108. mov a4, a2, lsr #11
  109. bic a4, a4, #0x1f0000
  110. add a2, v4, fp
  111. mov a2, a2, lsr #11
  112. add a4, a4, a2, lsl #16
  113. strd a3, [a1]
  114. sub a2, v4, fp
  115. mov a3, a2, lsr #11
  116. bic a3, a3, #0x1f0000
  117. sub a2, v3, v7
  118. mov a2, a2, lsr #11
  119. add a3, a3, a2, lsl #16
  120. add a2, v2, v6
  121. mov a4, a2, lsr #11
  122. bic a4, a4, #0x1f0000
  123. sub a2, v1, v5
  124. mov a2, a2, lsr #11
  125. add a4, a4, a2, lsl #16
  126. strd a3, [a1, #8]
  127. ldr pc, [sp], #4
  128. row_dc_only:
  129. orr a3, a3, a3, lsl #16
  130. bic a3, a3, #0xe000
  131. mov a3, a3, lsl #3
  132. mov a4, a3
  133. strd a3, [a1]
  134. strd a3, [a1, #8]
  135. ldr pc, [sp], #4
  136. .endfunc
  137. .macro idct_col
  138. ldr a4, [a1] /* a4 = col[1:0] */
  139. mov ip, #16384
  140. sub ip, ip, #1 /* ip = W4 */
  141. #if 0
  142. mov v1, #(1<<(COL_SHIFT-1))
  143. smlabt v2, ip, a4, v1 /* v2 = W4*col[1] + (1<<(COL_SHIFT-1)) */
  144. smlabb v1, ip, a4, v1 /* v1 = W4*col[0] + (1<<(COL_SHIFT-1)) */
  145. ldr a4, [a1, #(16*4)]
  146. #else
  147. mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */
  148. add v2, v1, a4, asr #16
  149. rsb v2, v2, v2, lsl #14
  150. mov a4, a4, lsl #16
  151. add v1, v1, a4, asr #16
  152. ldr a4, [a1, #(16*4)]
  153. rsb v1, v1, v1, lsl #14
  154. #endif
  155. smulbb lr, ip, a4
  156. smulbt a3, ip, a4
  157. sub v3, v1, lr
  158. sub v5, v1, lr
  159. add v7, v1, lr
  160. add v1, v1, lr
  161. sub v4, v2, a3
  162. sub v6, v2, a3
  163. add fp, v2, a3
  164. ldr ip, [pc, #(w26-.-8)]
  165. ldr a4, [a1, #(16*2)]
  166. add v2, v2, a3
  167. smulbb lr, ip, a4
  168. smultb a3, ip, a4
  169. add v1, v1, lr
  170. sub v7, v7, lr
  171. add v3, v3, a3
  172. sub v5, v5, a3
  173. smulbt lr, ip, a4
  174. smultt a3, ip, a4
  175. add v2, v2, lr
  176. sub fp, fp, lr
  177. add v4, v4, a3
  178. ldr a4, [a1, #(16*6)]
  179. sub v6, v6, a3
  180. smultb lr, ip, a4
  181. smulbb a3, ip, a4
  182. add v1, v1, lr
  183. sub v7, v7, lr
  184. sub v3, v3, a3
  185. add v5, v5, a3
  186. smultt lr, ip, a4
  187. smulbt a3, ip, a4
  188. add v2, v2, lr
  189. sub fp, fp, lr
  190. sub v4, v4, a3
  191. add v6, v6, a3
  192. stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp}
  193. ldr ip, [pc, #(w13-.-8)]
  194. ldr a4, [a1, #(16*1)]
  195. ldr lr, [pc, #(w57-.-8)]
  196. smulbb v1, ip, a4
  197. smultb v3, ip, a4
  198. smulbb v5, lr, a4
  199. smultb v7, lr, a4
  200. smulbt v2, ip, a4
  201. smultt v4, ip, a4
  202. smulbt v6, lr, a4
  203. smultt fp, lr, a4
  204. rsb v4, v4, #0
  205. ldr a4, [a1, #(16*3)]
  206. rsb v3, v3, #0
  207. smlatb v1, ip, a4, v1
  208. smlatb v3, lr, a4, v3
  209. smulbb a3, ip, a4
  210. smulbb a2, lr, a4
  211. sub v5, v5, a3
  212. sub v7, v7, a2
  213. smlatt v2, ip, a4, v2
  214. smlatt v4, lr, a4, v4
  215. smulbt a3, ip, a4
  216. smulbt a2, lr, a4
  217. sub v6, v6, a3
  218. ldr a4, [a1, #(16*5)]
  219. sub fp, fp, a2
  220. smlabb v1, lr, a4, v1
  221. smlabb v3, ip, a4, v3
  222. smlatb v5, lr, a4, v5
  223. smlatb v7, ip, a4, v7
  224. smlabt v2, lr, a4, v2
  225. smlabt v4, ip, a4, v4
  226. smlatt v6, lr, a4, v6
  227. ldr a3, [a1, #(16*7)]
  228. smlatt fp, ip, a4, fp
  229. smlatb v1, lr, a3, v1
  230. smlabb v3, lr, a3, v3
  231. smlatb v5, ip, a3, v5
  232. smulbb a4, ip, a3
  233. smlatt v2, lr, a3, v2
  234. sub v7, v7, a4
  235. smlabt v4, lr, a3, v4
  236. smulbt a4, ip, a3
  237. smlatt v6, ip, a3, v6
  238. sub fp, fp, a4
  239. .endm
  240. .align
  241. .type idct_col_armv5te, %function
  242. .func idct_col_armv5te
  243. idct_col_armv5te:
  244. str lr, [sp, #-4]!
  245. idct_col
  246. ldmfd sp!, {a3, a4}
  247. adds a2, a3, v1
  248. mov a2, a2, lsr #20
  249. orrmi a2, a2, #0xf000
  250. add ip, a4, v2
  251. mov ip, ip, asr #20
  252. orr a2, a2, ip, lsl #16
  253. str a2, [a1]
  254. subs a3, a3, v1
  255. mov a2, a3, lsr #20
  256. orrmi a2, a2, #0xf000
  257. sub a4, a4, v2
  258. mov a4, a4, asr #20
  259. orr a2, a2, a4, lsl #16
  260. ldmfd sp!, {a3, a4}
  261. str a2, [a1, #(16*7)]
  262. subs a2, a3, v3
  263. mov a2, a2, lsr #20
  264. orrmi a2, a2, #0xf000
  265. sub ip, a4, v4
  266. mov ip, ip, asr #20
  267. orr a2, a2, ip, lsl #16
  268. str a2, [a1, #(16*1)]
  269. adds a3, a3, v3
  270. mov a2, a3, lsr #20
  271. orrmi a2, a2, #0xf000
  272. add a4, a4, v4
  273. mov a4, a4, asr #20
  274. orr a2, a2, a4, lsl #16
  275. ldmfd sp!, {a3, a4}
  276. str a2, [a1, #(16*6)]
  277. adds a2, a3, v5
  278. mov a2, a2, lsr #20
  279. orrmi a2, a2, #0xf000
  280. add ip, a4, v6
  281. mov ip, ip, asr #20
  282. orr a2, a2, ip, lsl #16
  283. str a2, [a1, #(16*2)]
  284. subs a3, a3, v5
  285. mov a2, a3, lsr #20
  286. orrmi a2, a2, #0xf000
  287. sub a4, a4, v6
  288. mov a4, a4, asr #20
  289. orr a2, a2, a4, lsl #16
  290. ldmfd sp!, {a3, a4}
  291. str a2, [a1, #(16*5)]
  292. adds a2, a3, v7
  293. mov a2, a2, lsr #20
  294. orrmi a2, a2, #0xf000
  295. add ip, a4, fp
  296. mov ip, ip, asr #20
  297. orr a2, a2, ip, lsl #16
  298. str a2, [a1, #(16*3)]
  299. subs a3, a3, v7
  300. mov a2, a3, lsr #20
  301. orrmi a2, a2, #0xf000
  302. sub a4, a4, fp
  303. mov a4, a4, asr #20
  304. orr a2, a2, a4, lsl #16
  305. str a2, [a1, #(16*4)]
  306. ldr pc, [sp], #4
  307. .endfunc
  308. .align
  309. .type idct_col_put_armv5te, %function
  310. .func idct_col_put_armv5te
  311. idct_col_put_armv5te:
  312. str lr, [sp, #-4]!
  313. idct_col
  314. ldmfd sp!, {a3, a4}
  315. ldr lr, [sp, #32]
  316. add a2, a3, v1
  317. movs a2, a2, asr #20
  318. movmi a2, #0
  319. cmp a2, #255
  320. movgt a2, #255
  321. add ip, a4, v2
  322. movs ip, ip, asr #20
  323. movmi ip, #0
  324. cmp ip, #255
  325. movgt ip, #255
  326. orr a2, a2, ip, lsl #8
  327. sub a3, a3, v1
  328. movs a3, a3, asr #20
  329. movmi a3, #0
  330. cmp a3, #255
  331. movgt a3, #255
  332. sub a4, a4, v2
  333. movs a4, a4, asr #20
  334. movmi a4, #0
  335. cmp a4, #255
  336. ldr v1, [sp, #28]
  337. movgt a4, #255
  338. strh a2, [v1]
  339. add a2, v1, #2
  340. str a2, [sp, #28]
  341. orr a2, a3, a4, lsl #8
  342. rsb v2, lr, lr, lsl #3
  343. ldmfd sp!, {a3, a4}
  344. strh a2, [v2, v1]!
  345. sub a2, a3, v3
  346. movs a2, a2, asr #20
  347. movmi a2, #0
  348. cmp a2, #255
  349. movgt a2, #255
  350. sub ip, a4, v4
  351. movs ip, ip, asr #20
  352. movmi ip, #0
  353. cmp ip, #255
  354. movgt ip, #255
  355. orr a2, a2, ip, lsl #8
  356. strh a2, [v1, lr]!
  357. add a3, a3, v3
  358. movs a2, a3, asr #20
  359. movmi a2, #0
  360. cmp a2, #255
  361. movgt a2, #255
  362. add a4, a4, v4
  363. movs a4, a4, asr #20
  364. movmi a4, #0
  365. cmp a4, #255
  366. movgt a4, #255
  367. orr a2, a2, a4, lsl #8
  368. ldmfd sp!, {a3, a4}
  369. strh a2, [v2, -lr]!
  370. add a2, a3, v5
  371. movs a2, a2, asr #20
  372. movmi a2, #0
  373. cmp a2, #255
  374. movgt a2, #255
  375. add ip, a4, v6
  376. movs ip, ip, asr #20
  377. movmi ip, #0
  378. cmp ip, #255
  379. movgt ip, #255
  380. orr a2, a2, ip, lsl #8
  381. strh a2, [v1, lr]!
  382. sub a3, a3, v5
  383. movs a2, a3, asr #20
  384. movmi a2, #0
  385. cmp a2, #255
  386. movgt a2, #255
  387. sub a4, a4, v6
  388. movs a4, a4, asr #20
  389. movmi a4, #0
  390. cmp a4, #255
  391. movgt a4, #255
  392. orr a2, a2, a4, lsl #8
  393. ldmfd sp!, {a3, a4}
  394. strh a2, [v2, -lr]!
  395. add a2, a3, v7
  396. movs a2, a2, asr #20
  397. movmi a2, #0
  398. cmp a2, #255
  399. movgt a2, #255
  400. add ip, a4, fp
  401. movs ip, ip, asr #20
  402. movmi ip, #0
  403. cmp ip, #255
  404. movgt ip, #255
  405. orr a2, a2, ip, lsl #8
  406. strh a2, [v1, lr]
  407. sub a3, a3, v7
  408. movs a2, a3, asr #20
  409. movmi a2, #0
  410. cmp a2, #255
  411. movgt a2, #255
  412. sub a4, a4, fp
  413. movs a4, a4, asr #20
  414. movmi a4, #0
  415. cmp a4, #255
  416. movgt a4, #255
  417. orr a2, a2, a4, lsl #8
  418. strh a2, [v2, -lr]
  419. ldr pc, [sp], #4
  420. .endfunc
  421. .align
  422. .type idct_col_add_armv5te, %function
  423. .func idct_col_add_armv5te
  424. idct_col_add_armv5te:
  425. str lr, [sp, #-4]!
  426. idct_col
  427. ldr lr, [sp, #36]
  428. ldmfd sp!, {a3, a4}
  429. ldrh ip, [lr]
  430. add a2, a3, v1
  431. mov a2, a2, asr #20
  432. sub a3, a3, v1
  433. and v1, ip, #255
  434. adds a2, a2, v1
  435. movmi a2, #0
  436. cmp a2, #255
  437. movgt a2, #255
  438. add v1, a4, v2
  439. mov v1, v1, asr #20
  440. adds v1, v1, ip, lsr #8
  441. movmi v1, #0
  442. cmp v1, #255
  443. movgt v1, #255
  444. orr a2, a2, v1, lsl #8
  445. ldr v1, [sp, #32]
  446. sub a4, a4, v2
  447. rsb v2, v1, v1, lsl #3
  448. ldrh ip, [v2, lr]!
  449. strh a2, [lr]
  450. mov a3, a3, asr #20
  451. and a2, ip, #255
  452. adds a3, a3, a2
  453. movmi a3, #0
  454. cmp a3, #255
  455. movgt a3, #255
  456. mov a4, a4, asr #20
  457. adds a4, a4, ip, lsr #8
  458. movmi a4, #0
  459. cmp a4, #255
  460. movgt a4, #255
  461. add a2, lr, #2
  462. str a2, [sp, #28]
  463. orr a2, a3, a4, lsl #8
  464. strh a2, [v2]
  465. ldmfd sp!, {a3, a4}
  466. ldrh ip, [lr, v1]!
  467. sub a2, a3, v3
  468. mov a2, a2, asr #20
  469. add a3, a3, v3
  470. and v3, ip, #255
  471. adds a2, a2, v3
  472. movmi a2, #0
  473. cmp a2, #255
  474. movgt a2, #255
  475. sub v3, a4, v4
  476. mov v3, v3, asr #20
  477. adds v3, v3, ip, lsr #8
  478. movmi v3, #0
  479. cmp v3, #255
  480. movgt v3, #255
  481. orr a2, a2, v3, lsl #8
  482. add a4, a4, v4
  483. ldrh ip, [v2, -v1]!
  484. strh a2, [lr]
  485. mov a3, a3, asr #20
  486. and a2, ip, #255
  487. adds a3, a3, a2
  488. movmi a3, #0
  489. cmp a3, #255
  490. movgt a3, #255
  491. mov a4, a4, asr #20
  492. adds a4, a4, ip, lsr #8
  493. movmi a4, #0
  494. cmp a4, #255
  495. movgt a4, #255
  496. orr a2, a3, a4, lsl #8
  497. strh a2, [v2]
  498. ldmfd sp!, {a3, a4}
  499. ldrh ip, [lr, v1]!
  500. add a2, a3, v5
  501. mov a2, a2, asr #20
  502. sub a3, a3, v5
  503. and v3, ip, #255
  504. adds a2, a2, v3
  505. movmi a2, #0
  506. cmp a2, #255
  507. movgt a2, #255
  508. add v3, a4, v6
  509. mov v3, v3, asr #20
  510. adds v3, v3, ip, lsr #8
  511. movmi v3, #0
  512. cmp v3, #255
  513. movgt v3, #255
  514. orr a2, a2, v3, lsl #8
  515. sub a4, a4, v6
  516. ldrh ip, [v2, -v1]!
  517. strh a2, [lr]
  518. mov a3, a3, asr #20
  519. and a2, ip, #255
  520. adds a3, a3, a2
  521. movmi a3, #0
  522. cmp a3, #255
  523. movgt a3, #255
  524. mov a4, a4, asr #20
  525. adds a4, a4, ip, lsr #8
  526. movmi a4, #0
  527. cmp a4, #255
  528. movgt a4, #255
  529. orr a2, a3, a4, lsl #8
  530. strh a2, [v2]
  531. ldmfd sp!, {a3, a4}
  532. ldrh ip, [lr, v1]!
  533. add a2, a3, v7
  534. mov a2, a2, asr #20
  535. sub a3, a3, v7
  536. and v3, ip, #255
  537. adds a2, a2, v3
  538. movmi a2, #0
  539. cmp a2, #255
  540. movgt a2, #255
  541. add v3, a4, fp
  542. mov v3, v3, asr #20
  543. adds v3, v3, ip, lsr #8
  544. movmi v3, #0
  545. cmp v3, #255
  546. movgt v3, #255
  547. orr a2, a2, v3, lsl #8
  548. sub a4, a4, fp
  549. ldrh ip, [v2, -v1]!
  550. strh a2, [lr]
  551. mov a3, a3, asr #20
  552. and a2, ip, #255
  553. adds a3, a3, a2
  554. movmi a3, #0
  555. cmp a3, #255
  556. movgt a3, #255
  557. mov a4, a4, asr #20
  558. adds a4, a4, ip, lsr #8
  559. movmi a4, #0
  560. cmp a4, #255
  561. movgt a4, #255
  562. orr a2, a3, a4, lsl #8
  563. strh a2, [v2]
  564. ldr pc, [sp], #4
  565. .endfunc
  566. .align
  567. .global simple_idct_armv5te
  568. .type simple_idct_armv5te, %function
  569. .func simple_idct_armv5te
  570. simple_idct_armv5te:
  571. stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
  572. bl idct_row_armv5te
  573. add a1, a1, #16
  574. bl idct_row_armv5te
  575. add a1, a1, #16
  576. bl idct_row_armv5te
  577. add a1, a1, #16
  578. bl idct_row_armv5te
  579. add a1, a1, #16
  580. bl idct_row_armv5te
  581. add a1, a1, #16
  582. bl idct_row_armv5te
  583. add a1, a1, #16
  584. bl idct_row_armv5te
  585. add a1, a1, #16
  586. bl idct_row_armv5te
  587. sub a1, a1, #(16*7)
  588. bl idct_col_armv5te
  589. add a1, a1, #4
  590. bl idct_col_armv5te
  591. add a1, a1, #4
  592. bl idct_col_armv5te
  593. add a1, a1, #4
  594. bl idct_col_armv5te
  595. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  596. .endfunc
  597. .align
  598. .global simple_idct_add_armv5te
  599. .type simple_idct_add_armv5te, %function
  600. .func simple_idct_add_armv5te
  601. simple_idct_add_armv5te:
  602. stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
  603. mov a1, a3
  604. bl idct_row_armv5te
  605. add a1, a1, #16
  606. bl idct_row_armv5te
  607. add a1, a1, #16
  608. bl idct_row_armv5te
  609. add a1, a1, #16
  610. bl idct_row_armv5te
  611. add a1, a1, #16
  612. bl idct_row_armv5te
  613. add a1, a1, #16
  614. bl idct_row_armv5te
  615. add a1, a1, #16
  616. bl idct_row_armv5te
  617. add a1, a1, #16
  618. bl idct_row_armv5te
  619. sub a1, a1, #(16*7)
  620. bl idct_col_add_armv5te
  621. add a1, a1, #4
  622. bl idct_col_add_armv5te
  623. add a1, a1, #4
  624. bl idct_col_add_armv5te
  625. add a1, a1, #4
  626. bl idct_col_add_armv5te
  627. add sp, sp, #8
  628. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  629. .endfunc
  630. .align
  631. .global simple_idct_put_armv5te
  632. .type simple_idct_put_armv5te, %function
  633. .func simple_idct_put_armv5te
  634. simple_idct_put_armv5te:
  635. stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
  636. mov a1, a3
  637. bl idct_row_armv5te
  638. add a1, a1, #16
  639. bl idct_row_armv5te
  640. add a1, a1, #16
  641. bl idct_row_armv5te
  642. add a1, a1, #16
  643. bl idct_row_armv5te
  644. add a1, a1, #16
  645. bl idct_row_armv5te
  646. add a1, a1, #16
  647. bl idct_row_armv5te
  648. add a1, a1, #16
  649. bl idct_row_armv5te
  650. add a1, a1, #16
  651. bl idct_row_armv5te
  652. sub a1, a1, #(16*7)
  653. bl idct_col_put_armv5te
  654. add a1, a1, #4
  655. bl idct_col_put_armv5te
  656. add a1, a1, #4
  657. bl idct_col_put_armv5te
  658. add a1, a1, #4
  659. bl idct_col_put_armv5te
  660. add sp, sp, #8
  661. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  662. .endfunc