postprocess_altivec_template.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211
  1. /*
  2. * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
  3. *
  4. * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/avutil.h"
  23. #include "libavutil/mem_internal.h"
  24. #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
  25. do { \
  26. __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
  27. __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
  28. __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
  29. __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
  30. tempA1 = vec_mergeh (src_a, src_e); \
  31. tempB1 = vec_mergel (src_a, src_e); \
  32. tempC1 = vec_mergeh (src_b, src_f); \
  33. tempD1 = vec_mergel (src_b, src_f); \
  34. tempE1 = vec_mergeh (src_c, src_g); \
  35. tempF1 = vec_mergel (src_c, src_g); \
  36. tempG1 = vec_mergeh (src_d, src_h); \
  37. tempH1 = vec_mergel (src_d, src_h); \
  38. tempA2 = vec_mergeh (tempA1, tempE1); \
  39. tempB2 = vec_mergel (tempA1, tempE1); \
  40. tempC2 = vec_mergeh (tempB1, tempF1); \
  41. tempD2 = vec_mergel (tempB1, tempF1); \
  42. tempE2 = vec_mergeh (tempC1, tempG1); \
  43. tempF2 = vec_mergel (tempC1, tempG1); \
  44. tempG2 = vec_mergeh (tempD1, tempH1); \
  45. tempH2 = vec_mergel (tempD1, tempH1); \
  46. src_a = vec_mergeh (tempA2, tempE2); \
  47. src_b = vec_mergel (tempA2, tempE2); \
  48. src_c = vec_mergeh (tempB2, tempF2); \
  49. src_d = vec_mergel (tempB2, tempF2); \
  50. src_e = vec_mergeh (tempC2, tempG2); \
  51. src_f = vec_mergel (tempC2, tempG2); \
  52. src_g = vec_mergeh (tempD2, tempH2); \
  53. src_h = vec_mergel (tempD2, tempH2); \
  54. } while (0)
  55. static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
  56. /*
  57. this code makes no assumption on src or stride.
  58. One could remove the recomputation of the perm
  59. vector by assuming (stride % 16) == 0, unfortunately
  60. this is not always true.
  61. */
  62. short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
  63. DECLARE_ALIGNED(16, short, data)[8] =
  64. {
  65. data_0,
  66. data_0 * 2 + 1,
  67. c->QP * 2,
  68. c->QP * 4
  69. };
  70. int numEq;
  71. uint8_t *src2 = src;
  72. vector signed short v_dcOffset;
  73. vector signed short v2QP;
  74. vector unsigned short v4QP;
  75. vector unsigned short v_dcThreshold;
  76. const int properStride = (stride % 16);
  77. const int srcAlign = ((unsigned long)src2 % 16);
  78. const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
  79. const vector signed int zero = vec_splat_s32(0);
  80. const vector signed short mask = vec_splat_s16(1);
  81. vector signed int v_numEq = vec_splat_s32(0);
  82. vector signed short v_data = vec_ld(0, data);
  83. vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
  84. v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
  85. //FIXME avoid this mess if possible
  86. register int j0 = 0,
  87. j1 = stride,
  88. j2 = 2 * stride,
  89. j3 = 3 * stride,
  90. j4 = 4 * stride,
  91. j5 = 5 * stride,
  92. j6 = 6 * stride,
  93. j7 = 7 * stride;
  94. vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
  95. v_srcA4, v_srcA5, v_srcA6, v_srcA7;
  96. v_dcOffset = vec_splat(v_data, 0);
  97. v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
  98. v2QP = vec_splat(v_data, 2);
  99. v4QP = (vector unsigned short)vec_splat(v_data, 3);
  100. src2 += stride * 4;
  101. #define LOAD_LINE(i) \
  102. { \
  103. vector unsigned char perm##i = vec_lvsl(j##i, src2); \
  104. vector unsigned char v_srcA2##i; \
  105. vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
  106. if (two_vectors) \
  107. v_srcA2##i = vec_ld(j##i + 16, src2); \
  108. v_srcA##i = \
  109. vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
  110. v_srcAss##i = \
  111. (vector signed short)vec_mergeh((vector signed char)zero, \
  112. (vector signed char)v_srcA##i); }
  113. #define LOAD_LINE_ALIGNED(i) \
  114. v_srcA##i = vec_ld(j##i, src2); \
  115. v_srcAss##i = \
  116. (vector signed short)vec_mergeh((vector signed char)zero, \
  117. (vector signed char)v_srcA##i)
  118. /* Special-casing the aligned case is worthwhile, as all calls from
  119. * the (transposed) horizontable deblocks will be aligned, in addition
  120. * to the naturally aligned vertical deblocks. */
  121. if (properStride && srcAlign) {
  122. LOAD_LINE_ALIGNED(0);
  123. LOAD_LINE_ALIGNED(1);
  124. LOAD_LINE_ALIGNED(2);
  125. LOAD_LINE_ALIGNED(3);
  126. LOAD_LINE_ALIGNED(4);
  127. LOAD_LINE_ALIGNED(5);
  128. LOAD_LINE_ALIGNED(6);
  129. LOAD_LINE_ALIGNED(7);
  130. } else {
  131. LOAD_LINE(0);
  132. LOAD_LINE(1);
  133. LOAD_LINE(2);
  134. LOAD_LINE(3);
  135. LOAD_LINE(4);
  136. LOAD_LINE(5);
  137. LOAD_LINE(6);
  138. LOAD_LINE(7);
  139. }
  140. #undef LOAD_LINE
  141. #undef LOAD_LINE_ALIGNED
  142. #define ITER(i, j) \
  143. const vector signed short v_diff##i = \
  144. vec_sub(v_srcAss##i, v_srcAss##j); \
  145. const vector signed short v_sum##i = \
  146. vec_add(v_diff##i, v_dcOffset); \
  147. const vector signed short v_comp##i = \
  148. (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
  149. v_dcThreshold); \
  150. const vector signed short v_part##i = vec_and(mask, v_comp##i);
  151. {
  152. ITER(0, 1)
  153. ITER(1, 2)
  154. ITER(2, 3)
  155. ITER(3, 4)
  156. ITER(4, 5)
  157. ITER(5, 6)
  158. ITER(6, 7)
  159. v_numEq = vec_sum4s(v_part0, v_numEq);
  160. v_numEq = vec_sum4s(v_part1, v_numEq);
  161. v_numEq = vec_sum4s(v_part2, v_numEq);
  162. v_numEq = vec_sum4s(v_part3, v_numEq);
  163. v_numEq = vec_sum4s(v_part4, v_numEq);
  164. v_numEq = vec_sum4s(v_part5, v_numEq);
  165. v_numEq = vec_sum4s(v_part6, v_numEq);
  166. }
  167. #undef ITER
  168. v_numEq = vec_sums(v_numEq, zero);
  169. v_numEq = vec_splat(v_numEq, 3);
  170. vec_ste(v_numEq, 0, &numEq);
  171. if (numEq > c->ppMode.flatnessThreshold){
  172. const vector unsigned char mmoP1 = (const vector unsigned char)
  173. {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
  174. 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
  175. const vector unsigned char mmoP2 = (const vector unsigned char)
  176. {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
  177. 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
  178. const vector unsigned char mmoP = (const vector unsigned char)
  179. vec_lvsl(8, (unsigned char*)0);
  180. vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
  181. vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
  182. vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
  183. vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
  184. vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
  185. vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
  186. vector signed short mmoDiff = vec_sub(mmoL, mmoR);
  187. vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
  188. if (vec_any_gt(mmoSum, v4QP))
  189. return 0;
  190. else
  191. return 1;
  192. }
  193. else return 2;
  194. }
  195. static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
  196. /*
  197. this code makes no assumption on src or stride.
  198. One could remove the recomputation of the perm
  199. vector by assuming (stride % 16) == 0, unfortunately
  200. this is not always true. Quite a lot of load/stores
  201. can be removed by assuming proper alignment of
  202. src & stride :-(
  203. */
  204. uint8_t *src2 = src;
  205. const vector signed int zero = vec_splat_s32(0);
  206. const int properStride = (stride % 16);
  207. const int srcAlign = ((unsigned long)src2 % 16);
  208. DECLARE_ALIGNED(16, short, qp)[8] = {c->QP};
  209. vector signed short vqp = vec_ld(0, qp);
  210. vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
  211. vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9;
  212. vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9;
  213. vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
  214. vector unsigned char perml0, perml1, perml2, perml3, perml4,
  215. perml5, perml6, perml7, perml8, perml9;
  216. register int j0 = 0,
  217. j1 = stride,
  218. j2 = 2 * stride,
  219. j3 = 3 * stride,
  220. j4 = 4 * stride,
  221. j5 = 5 * stride,
  222. j6 = 6 * stride,
  223. j7 = 7 * stride,
  224. j8 = 8 * stride,
  225. j9 = 9 * stride;
  226. vqp = vec_splat(vqp, 0);
  227. src2 += stride*3;
  228. #define LOAD_LINE(i) \
  229. perml##i = vec_lvsl(i * stride, src2); \
  230. vbA##i = vec_ld(i * stride, src2); \
  231. vbB##i = vec_ld(i * stride + 16, src2); \
  232. vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
  233. vb##i = \
  234. (vector signed short)vec_mergeh((vector unsigned char)zero, \
  235. (vector unsigned char)vbT##i)
  236. #define LOAD_LINE_ALIGNED(i) \
  237. vbT##i = vec_ld(j##i, src2); \
  238. vb##i = \
  239. (vector signed short)vec_mergeh((vector signed char)zero, \
  240. (vector signed char)vbT##i)
  241. /* Special-casing the aligned case is worthwhile, as all calls from
  242. * the (transposed) horizontable deblocks will be aligned, in addition
  243. * to the naturally aligned vertical deblocks. */
  244. if (properStride && srcAlign) {
  245. LOAD_LINE_ALIGNED(0);
  246. LOAD_LINE_ALIGNED(1);
  247. LOAD_LINE_ALIGNED(2);
  248. LOAD_LINE_ALIGNED(3);
  249. LOAD_LINE_ALIGNED(4);
  250. LOAD_LINE_ALIGNED(5);
  251. LOAD_LINE_ALIGNED(6);
  252. LOAD_LINE_ALIGNED(7);
  253. LOAD_LINE_ALIGNED(8);
  254. LOAD_LINE_ALIGNED(9);
  255. } else {
  256. LOAD_LINE(0);
  257. LOAD_LINE(1);
  258. LOAD_LINE(2);
  259. LOAD_LINE(3);
  260. LOAD_LINE(4);
  261. LOAD_LINE(5);
  262. LOAD_LINE(6);
  263. LOAD_LINE(7);
  264. LOAD_LINE(8);
  265. LOAD_LINE(9);
  266. }
  267. #undef LOAD_LINE
  268. #undef LOAD_LINE_ALIGNED
  269. {
  270. const vector unsigned short v_2 = vec_splat_u16(2);
  271. const vector unsigned short v_4 = vec_splat_u16(4);
  272. const vector signed short v_diff01 = vec_sub(vb0, vb1);
  273. const vector unsigned short v_cmp01 =
  274. (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
  275. const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
  276. const vector signed short v_diff89 = vec_sub(vb8, vb9);
  277. const vector unsigned short v_cmp89 =
  278. (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
  279. const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
  280. const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
  281. const vector signed short temp02 = vec_add(vb2, vb3);
  282. const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
  283. const vector signed short v_sumsB0 = vec_add(temp02, temp03);
  284. const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
  285. const vector signed short v_sumsB1 = vec_add(temp11, vb4);
  286. const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
  287. const vector signed short v_sumsB2 = vec_add(temp21, vb5);
  288. const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
  289. const vector signed short v_sumsB3 = vec_add(temp31, vb6);
  290. const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
  291. const vector signed short v_sumsB4 = vec_add(temp41, vb7);
  292. const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
  293. const vector signed short v_sumsB5 = vec_add(temp51, vb8);
  294. const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
  295. const vector signed short v_sumsB6 = vec_add(temp61, v_last);
  296. const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
  297. const vector signed short v_sumsB7 = vec_add(temp71, v_last);
  298. const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
  299. const vector signed short v_sumsB8 = vec_add(temp81, v_last);
  300. const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
  301. const vector signed short v_sumsB9 = vec_add(temp91, v_last);
  302. #define COMPUTE_VR(i, j, k) \
  303. const vector signed short temps1##i = \
  304. vec_add(v_sumsB##i, v_sumsB##k); \
  305. const vector signed short temps2##i = \
  306. vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
  307. const vector signed short vr##j = vec_sra(temps2##i, v_4)
  308. COMPUTE_VR(0, 1, 2);
  309. COMPUTE_VR(1, 2, 3);
  310. COMPUTE_VR(2, 3, 4);
  311. COMPUTE_VR(3, 4, 5);
  312. COMPUTE_VR(4, 5, 6);
  313. COMPUTE_VR(5, 6, 7);
  314. COMPUTE_VR(6, 7, 8);
  315. COMPUTE_VR(7, 8, 9);
  316. const vector signed char neg1 = vec_splat_s8(-1);
  317. const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  318. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
  319. #define PACK_AND_STORE(i) \
  320. { const vector unsigned char perms##i = \
  321. vec_lvsr(i * stride, src2); \
  322. const vector unsigned char vf##i = \
  323. vec_packsu(vr##i, (vector signed short)zero); \
  324. const vector unsigned char vg##i = \
  325. vec_perm(vf##i, vbT##i, permHH); \
  326. const vector unsigned char mask##i = \
  327. vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
  328. const vector unsigned char vg2##i = \
  329. vec_perm(vg##i, vg##i, perms##i); \
  330. const vector unsigned char svA##i = \
  331. vec_sel(vbA##i, vg2##i, mask##i); \
  332. const vector unsigned char svB##i = \
  333. vec_sel(vg2##i, vbB##i, mask##i); \
  334. vec_st(svA##i, i * stride, src2); \
  335. vec_st(svB##i, i * stride + 16, src2);}
  336. #define PACK_AND_STORE_ALIGNED(i) \
  337. { const vector unsigned char vf##i = \
  338. vec_packsu(vr##i, (vector signed short)zero); \
  339. const vector unsigned char vg##i = \
  340. vec_perm(vf##i, vbT##i, permHH); \
  341. vec_st(vg##i, i * stride, src2);}
  342. /* Special-casing the aligned case is worthwhile, as all calls from
  343. * the (transposed) horizontable deblocks will be aligned, in addition
  344. * to the naturally aligned vertical deblocks. */
  345. if (properStride && srcAlign) {
  346. PACK_AND_STORE_ALIGNED(1)
  347. PACK_AND_STORE_ALIGNED(2)
  348. PACK_AND_STORE_ALIGNED(3)
  349. PACK_AND_STORE_ALIGNED(4)
  350. PACK_AND_STORE_ALIGNED(5)
  351. PACK_AND_STORE_ALIGNED(6)
  352. PACK_AND_STORE_ALIGNED(7)
  353. PACK_AND_STORE_ALIGNED(8)
  354. } else {
  355. PACK_AND_STORE(1)
  356. PACK_AND_STORE(2)
  357. PACK_AND_STORE(3)
  358. PACK_AND_STORE(4)
  359. PACK_AND_STORE(5)
  360. PACK_AND_STORE(6)
  361. PACK_AND_STORE(7)
  362. PACK_AND_STORE(8)
  363. }
  364. #undef PACK_AND_STORE
  365. #undef PACK_AND_STORE_ALIGNED
  366. }
  367. }
  368. static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
  369. /*
  370. this code makes no assumption on src or stride.
  371. One could remove the recomputation of the perm
  372. vector by assuming (stride % 16) == 0, unfortunately
  373. this is not always true. Quite a lot of load/stores
  374. can be removed by assuming proper alignment of
  375. src & stride :-(
  376. */
  377. uint8_t *src2 = src + stride*3;
  378. const vector signed int zero = vec_splat_s32(0);
  379. DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP};
  380. vector signed short vqp = vec_splat(
  381. (vector signed short)vec_ld(0, qp), 0);
  382. #define LOAD_LINE(i) \
  383. const vector unsigned char perm##i = \
  384. vec_lvsl(i * stride, src2); \
  385. const vector unsigned char vbA##i = \
  386. vec_ld(i * stride, src2); \
  387. const vector unsigned char vbB##i = \
  388. vec_ld(i * stride + 16, src2); \
  389. const vector unsigned char vbT##i = \
  390. vec_perm(vbA##i, vbB##i, perm##i); \
  391. const vector signed short vb##i = \
  392. (vector signed short)vec_mergeh((vector unsigned char)zero, \
  393. (vector unsigned char)vbT##i)
  394. LOAD_LINE(1);
  395. LOAD_LINE(2);
  396. LOAD_LINE(3);
  397. LOAD_LINE(4);
  398. LOAD_LINE(5);
  399. LOAD_LINE(6);
  400. LOAD_LINE(7);
  401. LOAD_LINE(8);
  402. #undef LOAD_LINE
  403. const vector signed short v_1 = vec_splat_s16(1);
  404. const vector signed short v_2 = vec_splat_s16(2);
  405. const vector signed short v_5 = vec_splat_s16(5);
  406. const vector signed short v_32 = vec_sl(v_1,
  407. (vector unsigned short)v_5);
  408. /* middle energy */
  409. const vector signed short l3minusl6 = vec_sub(vb3, vb6);
  410. const vector signed short l5minusl4 = vec_sub(vb5, vb4);
  411. const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
  412. const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
  413. const vector signed short absmE = vec_abs(mE);
  414. /* left & right energy */
  415. const vector signed short l1minusl4 = vec_sub(vb1, vb4);
  416. const vector signed short l3minusl2 = vec_sub(vb3, vb2);
  417. const vector signed short l5minusl8 = vec_sub(vb5, vb8);
  418. const vector signed short l7minusl6 = vec_sub(vb7, vb6);
  419. const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
  420. const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
  421. const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
  422. const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
  423. /* d */
  424. const vector signed short ddiff = vec_sub(absmE,
  425. vec_min(vec_abs(lE),
  426. vec_abs(rE)));
  427. const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
  428. const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
  429. const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
  430. const vector signed short minusd = vec_sub((vector signed short)zero, d);
  431. const vector signed short finald = vec_sel(minusd,
  432. d,
  433. vec_cmpgt(vec_sub((vector signed short)zero, mE),
  434. (vector signed short)zero));
  435. /* q */
  436. const vector signed short qtimes2 = vec_sub(vb4, vb5);
  437. /* for a shift right to behave like /2, we need to add one
  438. to all negative integer */
  439. const vector signed short rounddown = vec_sel((vector signed short)zero,
  440. v_1,
  441. vec_cmplt(qtimes2, (vector signed short)zero));
  442. const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
  443. /* clamp */
  444. const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
  445. const vector signed short dclamp_P = vec_min(dclamp_P1, q);
  446. const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
  447. const vector signed short dclamp_N = vec_max(dclamp_N1, q);
  448. const vector signed short dclampedfinal = vec_sel(dclamp_N,
  449. dclamp_P,
  450. vec_cmpgt(q, (vector signed short)zero));
  451. const vector signed short dornotd = vec_sel((vector signed short)zero,
  452. dclampedfinal,
  453. vec_cmplt(absmE, vqp));
  454. /* add/subtract to l4 and l5 */
  455. const vector signed short vb4minusd = vec_sub(vb4, dornotd);
  456. const vector signed short vb5plusd = vec_add(vb5, dornotd);
  457. /* finally, stores */
  458. const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
  459. const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
  460. const vector signed char neg1 = vec_splat_s8(-1);
  461. const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  462. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
  463. #define STORE(i) \
  464. { const vector unsigned char perms##i = \
  465. vec_lvsr(i * stride, src2); \
  466. const vector unsigned char vg##i = \
  467. vec_perm(st##i, vbT##i, permHH); \
  468. const vector unsigned char mask##i = \
  469. vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
  470. const vector unsigned char vg2##i = \
  471. vec_perm(vg##i, vg##i, perms##i); \
  472. const vector unsigned char svA##i = \
  473. vec_sel(vbA##i, vg2##i, mask##i); \
  474. const vector unsigned char svB##i = \
  475. vec_sel(vg2##i, vbB##i, mask##i); \
  476. vec_st(svA##i, i * stride, src2); \
  477. vec_st(svB##i, i * stride + 16, src2);}
  478. STORE(4)
  479. STORE(5)
  480. }
  481. static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
  482. const vector signed int vsint32_8 = vec_splat_s32(8);
  483. const vector unsigned int vuint32_4 = vec_splat_u32(4);
  484. const vector signed char neg1 = vec_splat_s8(-1);
  485. const vector unsigned char permA1 = (vector unsigned char)
  486. {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
  487. 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
  488. const vector unsigned char permA2 = (vector unsigned char)
  489. {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
  490. 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
  491. const vector unsigned char permA1inc = (vector unsigned char)
  492. {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
  493. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  494. const vector unsigned char permA2inc = (vector unsigned char)
  495. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
  496. 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  497. const vector unsigned char magic = (vector unsigned char)
  498. {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
  499. 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  500. const vector unsigned char extractPerm = (vector unsigned char)
  501. {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
  502. 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
  503. const vector unsigned char extractPermInc = (vector unsigned char)
  504. {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
  505. 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
  506. const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
  507. const vector unsigned char tenRight = (vector unsigned char)
  508. {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  509. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
  510. const vector unsigned char eightLeft = (vector unsigned char)
  511. {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  512. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};
  513. /*
  514. this code makes no assumption on src or stride.
  515. One could remove the recomputation of the perm
  516. vector by assuming (stride % 16) == 0, unfortunately
  517. this is not always true. Quite a lot of load/stores
  518. can be removed by assuming proper alignment of
  519. src & stride :-(
  520. */
  521. uint8_t *srcCopy = src;
  522. DECLARE_ALIGNED(16, uint8_t, dt)[16] = { deringThreshold };
  523. const vector signed int zero = vec_splat_s32(0);
  524. vector unsigned char v_dt = vec_splat(vec_ld(0, dt), 0);
  525. #define LOAD_LINE(i) \
  526. const vector unsigned char perm##i = \
  527. vec_lvsl(i * stride, srcCopy); \
  528. vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
  529. vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
  530. vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
  531. LOAD_LINE(0);
  532. LOAD_LINE(1);
  533. LOAD_LINE(2);
  534. LOAD_LINE(3);
  535. LOAD_LINE(4);
  536. LOAD_LINE(5);
  537. LOAD_LINE(6);
  538. LOAD_LINE(7);
  539. LOAD_LINE(8);
  540. LOAD_LINE(9);
  541. #undef LOAD_LINE
  542. vector unsigned char v_avg;
  543. DECLARE_ALIGNED(16, signed int, S)[8];
  544. DECLARE_ALIGNED(16, int, tQP2)[4] = { c->QP/2 + 1 };
  545. vector signed int vQP2 = vec_ld(0, tQP2);
  546. vQP2 = vec_splat(vQP2, 0);
  547. {
  548. const vector unsigned char trunc_perm = (vector unsigned char)
  549. {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
  550. 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
  551. const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
  552. const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
  553. const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
  554. const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
  555. #define EXTRACT(op) do { \
  556. const vector unsigned char s_1 = vec_##op(trunc_src12, trunc_src34); \
  557. const vector unsigned char s_2 = vec_##op(trunc_src56, trunc_src78); \
  558. const vector unsigned char s_6 = vec_##op(s_1, s_2); \
  559. const vector unsigned char s_8h = vec_mergeh(s_6, s_6); \
  560. const vector unsigned char s_8l = vec_mergel(s_6, s_6); \
  561. const vector unsigned char s_9 = vec_##op(s_8h, s_8l); \
  562. const vector unsigned char s_9h = vec_mergeh(s_9, s_9); \
  563. const vector unsigned char s_9l = vec_mergel(s_9, s_9); \
  564. const vector unsigned char s_10 = vec_##op(s_9h, s_9l); \
  565. const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \
  566. const vector unsigned char s_10l = vec_mergel(s_10, s_10); \
  567. const vector unsigned char s_11 = vec_##op(s_10h, s_10l); \
  568. const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \
  569. const vector unsigned char s_11l = vec_mergel(s_11, s_11); \
  570. v_##op = vec_##op(s_11h, s_11l); \
  571. } while (0)
  572. vector unsigned char v_min;
  573. vector unsigned char v_max;
  574. EXTRACT(min);
  575. EXTRACT(max);
  576. #undef EXTRACT
  577. if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
  578. return;
  579. v_avg = vec_avg(v_min, v_max);
  580. }
  581. {
  582. const vector unsigned short mask1 = (vector unsigned short)
  583. {0x0001, 0x0002, 0x0004, 0x0008,
  584. 0x0010, 0x0020, 0x0040, 0x0080};
  585. const vector unsigned short mask2 = (vector unsigned short)
  586. {0x0100, 0x0200, 0x0000, 0x0000,
  587. 0x0000, 0x0000, 0x0000, 0x0000};
  588. const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
  589. const vector unsigned int vuint32_1 = vec_splat_u32(1);
  590. vector signed int sumA2;
  591. vector signed int sumB2;
  592. vector signed int sum0, sum1, sum2, sum3, sum4;
  593. vector signed int sum5, sum6, sum7, sum8, sum9;
  594. #define COMPARE(i) \
  595. do { \
  596. const vector unsigned char cmp = \
  597. (vector unsigned char)vec_cmpgt(src##i, v_avg); \
  598. const vector unsigned short cmpHi = \
  599. (vector unsigned short)vec_mergeh(cmp, cmp); \
  600. const vector unsigned short cmpLi = \
  601. (vector unsigned short)vec_mergel(cmp, cmp); \
  602. const vector signed short cmpHf = \
  603. (vector signed short)vec_and(cmpHi, mask1); \
  604. const vector signed short cmpLf = \
  605. (vector signed short)vec_and(cmpLi, mask2); \
  606. const vector signed int sump = vec_sum4s(cmpHf, zero); \
  607. const vector signed int sumq = vec_sum4s(cmpLf, sump); \
  608. sum##i = vec_sums(sumq, zero); \
  609. } while (0)
  610. COMPARE(0);
  611. COMPARE(1);
  612. COMPARE(2);
  613. COMPARE(3);
  614. COMPARE(4);
  615. COMPARE(5);
  616. COMPARE(6);
  617. COMPARE(7);
  618. COMPARE(8);
  619. COMPARE(9);
  620. #undef COMPARE
  621. {
  622. const vector signed int sump02 = vec_mergel(sum0, sum2);
  623. const vector signed int sump13 = vec_mergel(sum1, sum3);
  624. const vector signed int sumA = vec_mergel(sump02, sump13);
  625. const vector signed int sump46 = vec_mergel(sum4, sum6);
  626. const vector signed int sump57 = vec_mergel(sum5, sum7);
  627. const vector signed int sumB = vec_mergel(sump46, sump57);
  628. const vector signed int sump8A = vec_mergel(sum8, zero);
  629. const vector signed int sump9B = vec_mergel(sum9, zero);
  630. const vector signed int sumC = vec_mergel(sump8A, sump9B);
  631. const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
  632. const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
  633. const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
  634. const vector signed int t2A = vec_or(sumA, tA);
  635. const vector signed int t2B = vec_or(sumB, tB);
  636. const vector signed int t2C = vec_or(sumC, tC);
  637. const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
  638. vec_sl(t2A, vuint32_1));
  639. const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
  640. vec_sl(t2B, vuint32_1));
  641. const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
  642. vec_sl(t2C, vuint32_1));
  643. const vector signed int yA = vec_and(t2A, t3A);
  644. const vector signed int yB = vec_and(t2B, t3B);
  645. const vector signed int yC = vec_and(t2C, t3C);
  646. const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
  647. const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
  648. const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
  649. const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
  650. const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
  651. const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
  652. const vector signed int sumAp = vec_and(yA,
  653. vec_and(sumAd4,sumAd8));
  654. const vector signed int sumBp = vec_and(yB,
  655. vec_and(sumBd4,sumBd8));
  656. sumA2 = vec_or(sumAp,
  657. vec_sra(sumAp,
  658. vuint32_16));
  659. sumB2 = vec_or(sumBp,
  660. vec_sra(sumBp,
  661. vuint32_16));
  662. }
  663. vec_st(sumA2, 0, S);
  664. vec_st(sumB2, 16, S);
  665. }
  666. /* I'm not sure the following is actually faster
  667. than straight, unvectorized C code :-( */
  668. #define F_INIT() \
  669. vector unsigned char tenRightM = tenRight; \
  670. vector unsigned char permA1M = permA1; \
  671. vector unsigned char permA2M = permA2; \
  672. vector unsigned char extractPermM = extractPerm
  673. #define F2(i, j, k, l) \
  674. if (S[i] & (1 << (l+1))) { \
  675. const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \
  676. const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \
  677. const vector signed int a_sump = \
  678. (vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\
  679. vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \
  680. const vector signed int p = \
  681. (vector signed int)vec_perm(src##j, (vector unsigned char)zero, \
  682. extractPermM); \
  683. const vector signed int sum = vec_add(p, vQP2); \
  684. const vector signed int diff = vec_sub(p, vQP2); \
  685. vector signed int newpm; \
  686. vector unsigned char newpm2, mask; \
  687. F = vec_splat(F, 3); \
  688. if (vec_all_lt(sum, F)) \
  689. newpm = sum; \
  690. else if (vec_all_gt(diff, F)) \
  691. newpm = diff; \
  692. else newpm = F; \
  693. newpm2 = vec_splat((vector unsigned char)newpm, 15); \
  694. mask = vec_add(identity, tenRightM); \
  695. src##j = vec_perm(src##j, newpm2, mask); \
  696. } \
  697. permA1M = vec_add(permA1M, permA1inc); \
  698. permA2M = vec_add(permA2M, permA2inc); \
  699. tenRightM = vec_sro(tenRightM, eightLeft); \
  700. extractPermM = vec_add(extractPermM, extractPermInc)
  701. #define ITER(i, j, k) do { \
  702. F_INIT(); \
  703. F2(i, j, k, 0); \
  704. F2(i, j, k, 1); \
  705. F2(i, j, k, 2); \
  706. F2(i, j, k, 3); \
  707. F2(i, j, k, 4); \
  708. F2(i, j, k, 5); \
  709. F2(i, j, k, 6); \
  710. F2(i, j, k, 7); \
  711. } while (0)
  712. ITER(0, 1, 2);
  713. ITER(1, 2, 3);
  714. ITER(2, 3, 4);
  715. ITER(3, 4, 5);
  716. ITER(4, 5, 6);
  717. ITER(5, 6, 7);
  718. ITER(6, 7, 8);
  719. ITER(7, 8, 9);
  720. #define STORE_LINE(i) do { \
  721. const vector unsigned char permST = \
  722. vec_lvsr(i * stride, srcCopy); \
  723. const vector unsigned char maskST = \
  724. vec_perm((vector unsigned char)zero, \
  725. (vector unsigned char)neg1, permST); \
  726. src##i = vec_perm(src##i ,src##i, permST); \
  727. sA##i= vec_sel(sA##i, src##i, maskST); \
  728. sB##i= vec_sel(src##i, sB##i, maskST); \
  729. vec_st(sA##i, i * stride, srcCopy); \
  730. vec_st(sB##i, i * stride + 16, srcCopy); \
  731. } while (0)
  732. STORE_LINE(1);
  733. STORE_LINE(2);
  734. STORE_LINE(3);
  735. STORE_LINE(4);
  736. STORE_LINE(5);
  737. STORE_LINE(6);
  738. STORE_LINE(7);
  739. STORE_LINE(8);
  740. #undef STORE_LINE
  741. #undef ITER
  742. #undef F2
  743. }
  744. #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
  745. #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
  746. #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
  747. static inline void tempNoiseReducer_altivec(uint8_t *src, int stride,
  748. uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
  749. {
  750. const vector signed char neg1 = vec_splat_s8(-1);
  751. const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  752. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
  753. const vector signed int zero = vec_splat_s32(0);
  754. const vector signed short vsint16_1 = vec_splat_s16(1);
  755. vector signed int v_dp = zero;
  756. vector signed int v_sysdp = zero;
  757. int d, sysd, i;
  758. #define LOAD_LINE(src, i) \
  759. register int j##src##i = i * stride; \
  760. vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
  761. const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
  762. const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
  763. const vector unsigned char v_##src##A##i = \
  764. vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
  765. vector signed short v_##src##Ass##i = \
  766. (vector signed short)vec_mergeh((vector signed char)zero, \
  767. (vector signed char)v_##src##A##i)
  768. LOAD_LINE(src, 0);
  769. LOAD_LINE(src, 1);
  770. LOAD_LINE(src, 2);
  771. LOAD_LINE(src, 3);
  772. LOAD_LINE(src, 4);
  773. LOAD_LINE(src, 5);
  774. LOAD_LINE(src, 6);
  775. LOAD_LINE(src, 7);
  776. LOAD_LINE(tempBlurred, 0);
  777. LOAD_LINE(tempBlurred, 1);
  778. LOAD_LINE(tempBlurred, 2);
  779. LOAD_LINE(tempBlurred, 3);
  780. LOAD_LINE(tempBlurred, 4);
  781. LOAD_LINE(tempBlurred, 5);
  782. LOAD_LINE(tempBlurred, 6);
  783. LOAD_LINE(tempBlurred, 7);
  784. #undef LOAD_LINE
  785. #define ACCUMULATE_DIFFS(i) do { \
  786. vector signed short v_d = vec_sub(v_tempBlurredAss##i, \
  787. v_srcAss##i); \
  788. v_dp = vec_msums(v_d, v_d, v_dp); \
  789. v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp); \
  790. } while (0)
  791. ACCUMULATE_DIFFS(0);
  792. ACCUMULATE_DIFFS(1);
  793. ACCUMULATE_DIFFS(2);
  794. ACCUMULATE_DIFFS(3);
  795. ACCUMULATE_DIFFS(4);
  796. ACCUMULATE_DIFFS(5);
  797. ACCUMULATE_DIFFS(6);
  798. ACCUMULATE_DIFFS(7);
  799. #undef ACCUMULATE_DIFFS
  800. tempBlurredPast[127]= maxNoise[0];
  801. tempBlurredPast[128]= maxNoise[1];
  802. tempBlurredPast[129]= maxNoise[2];
  803. v_dp = vec_sums(v_dp, zero);
  804. v_sysdp = vec_sums(v_sysdp, zero);
  805. v_dp = vec_splat(v_dp, 3);
  806. v_sysdp = vec_splat(v_sysdp, 3);
  807. vec_ste(v_dp, 0, &d);
  808. vec_ste(v_sysdp, 0, &sysd);
  809. i = d;
  810. d = (4*d
  811. +(*(tempBlurredPast-256))
  812. +(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
  813. +(*(tempBlurredPast+256))
  814. +4)>>3;
  815. *tempBlurredPast=i;
  816. if (d > maxNoise[1]) {
  817. if (d < maxNoise[2]) {
  818. #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
  819. OP(0);
  820. OP(1);
  821. OP(2);
  822. OP(3);
  823. OP(4);
  824. OP(5);
  825. OP(6);
  826. OP(7);
  827. #undef OP
  828. } else {
  829. #define OP(i) v_tempBlurredAss##i = v_srcAss##i;
  830. OP(0);
  831. OP(1);
  832. OP(2);
  833. OP(3);
  834. OP(4);
  835. OP(5);
  836. OP(6);
  837. OP(7);
  838. #undef OP
  839. }
  840. } else {
  841. if (d < maxNoise[0]) {
  842. const vector signed short vsint16_7 = vec_splat_s16(7);
  843. const vector signed short vsint16_4 = vec_splat_s16(4);
  844. const vector unsigned short vuint16_3 = vec_splat_u16(3);
  845. #define OP(i) do { \
  846. const vector signed short v_temp = \
  847. vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \
  848. const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \
  849. v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3); \
  850. } while (0)
  851. OP(0);
  852. OP(1);
  853. OP(2);
  854. OP(3);
  855. OP(4);
  856. OP(5);
  857. OP(6);
  858. OP(7);
  859. #undef OP
  860. } else {
  861. const vector signed short vsint16_3 = vec_splat_s16(3);
  862. const vector signed short vsint16_2 = vec_splat_s16(2);
  863. #define OP(i) do { \
  864. const vector signed short v_temp = \
  865. vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \
  866. const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \
  867. v_tempBlurredAss##i = \
  868. vec_sr(v_temp2, (vector unsigned short)vsint16_2); \
  869. } while (0)
  870. OP(0);
  871. OP(1);
  872. OP(2);
  873. OP(3);
  874. OP(4);
  875. OP(5);
  876. OP(6);
  877. OP(7);
  878. #undef OP
  879. }
  880. }
  881. #define PACK_AND_STORE(src, i) do { \
  882. const vector unsigned char perms = vec_lvsr(i * stride, src); \
  883. const vector unsigned char vf = \
  884. vec_packsu(v_tempBlurredAss##1, (vector signed short)zero); \
  885. const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \
  886. const vector unsigned char mask = \
  887. vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \
  888. const vector unsigned char vg2 = vec_perm(vg, vg, perms); \
  889. const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \
  890. const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \
  891. vec_st(svA, i * stride, src); \
  892. vec_st(svB, i * stride + 16, src); \
  893. } while (0)
  894. PACK_AND_STORE(src, 0);
  895. PACK_AND_STORE(src, 1);
  896. PACK_AND_STORE(src, 2);
  897. PACK_AND_STORE(src, 3);
  898. PACK_AND_STORE(src, 4);
  899. PACK_AND_STORE(src, 5);
  900. PACK_AND_STORE(src, 6);
  901. PACK_AND_STORE(src, 7);
  902. PACK_AND_STORE(tempBlurred, 0);
  903. PACK_AND_STORE(tempBlurred, 1);
  904. PACK_AND_STORE(tempBlurred, 2);
  905. PACK_AND_STORE(tempBlurred, 3);
  906. PACK_AND_STORE(tempBlurred, 4);
  907. PACK_AND_STORE(tempBlurred, 5);
  908. PACK_AND_STORE(tempBlurred, 6);
  909. PACK_AND_STORE(tempBlurred, 7);
  910. #undef PACK_AND_STORE
  911. }
  912. static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
  913. const vector unsigned char zero = vec_splat_u8(0);
  914. #define LOAD_DOUBLE_LINE(i, j) \
  915. vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
  916. vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
  917. vector unsigned char srcA##i = vec_ld(i * stride, src); \
  918. vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
  919. vector unsigned char srcC##i = vec_ld(j * stride, src); \
  920. vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
  921. vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
  922. vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
  923. LOAD_DOUBLE_LINE(0, 1);
  924. LOAD_DOUBLE_LINE(2, 3);
  925. LOAD_DOUBLE_LINE(4, 5);
  926. LOAD_DOUBLE_LINE(6, 7);
  927. #undef LOAD_DOUBLE_LINE
  928. vector unsigned char tempA = vec_mergeh(src0, zero);
  929. vector unsigned char tempB = vec_mergel(src0, zero);
  930. vector unsigned char tempC = vec_mergeh(src1, zero);
  931. vector unsigned char tempD = vec_mergel(src1, zero);
  932. vector unsigned char tempE = vec_mergeh(src2, zero);
  933. vector unsigned char tempF = vec_mergel(src2, zero);
  934. vector unsigned char tempG = vec_mergeh(src3, zero);
  935. vector unsigned char tempH = vec_mergel(src3, zero);
  936. vector unsigned char tempI = vec_mergeh(src4, zero);
  937. vector unsigned char tempJ = vec_mergel(src4, zero);
  938. vector unsigned char tempK = vec_mergeh(src5, zero);
  939. vector unsigned char tempL = vec_mergel(src5, zero);
  940. vector unsigned char tempM = vec_mergeh(src6, zero);
  941. vector unsigned char tempN = vec_mergel(src6, zero);
  942. vector unsigned char tempO = vec_mergeh(src7, zero);
  943. vector unsigned char tempP = vec_mergel(src7, zero);
  944. vector unsigned char temp0 = vec_mergeh(tempA, tempI);
  945. vector unsigned char temp1 = vec_mergel(tempA, tempI);
  946. vector unsigned char temp2 = vec_mergeh(tempB, tempJ);
  947. vector unsigned char temp3 = vec_mergel(tempB, tempJ);
  948. vector unsigned char temp4 = vec_mergeh(tempC, tempK);
  949. vector unsigned char temp5 = vec_mergel(tempC, tempK);
  950. vector unsigned char temp6 = vec_mergeh(tempD, tempL);
  951. vector unsigned char temp7 = vec_mergel(tempD, tempL);
  952. vector unsigned char temp8 = vec_mergeh(tempE, tempM);
  953. vector unsigned char temp9 = vec_mergel(tempE, tempM);
  954. vector unsigned char temp10 = vec_mergeh(tempF, tempN);
  955. vector unsigned char temp11 = vec_mergel(tempF, tempN);
  956. vector unsigned char temp12 = vec_mergeh(tempG, tempO);
  957. vector unsigned char temp13 = vec_mergel(tempG, tempO);
  958. vector unsigned char temp14 = vec_mergeh(tempH, tempP);
  959. vector unsigned char temp15 = vec_mergel(tempH, tempP);
  960. tempA = vec_mergeh(temp0, temp8);
  961. tempB = vec_mergel(temp0, temp8);
  962. tempC = vec_mergeh(temp1, temp9);
  963. tempD = vec_mergel(temp1, temp9);
  964. tempE = vec_mergeh(temp2, temp10);
  965. tempF = vec_mergel(temp2, temp10);
  966. tempG = vec_mergeh(temp3, temp11);
  967. tempH = vec_mergel(temp3, temp11);
  968. tempI = vec_mergeh(temp4, temp12);
  969. tempJ = vec_mergel(temp4, temp12);
  970. tempK = vec_mergeh(temp5, temp13);
  971. tempL = vec_mergel(temp5, temp13);
  972. tempM = vec_mergeh(temp6, temp14);
  973. tempN = vec_mergel(temp6, temp14);
  974. tempO = vec_mergeh(temp7, temp15);
  975. tempP = vec_mergel(temp7, temp15);
  976. temp0 = vec_mergeh(tempA, tempI);
  977. temp1 = vec_mergel(tempA, tempI);
  978. temp2 = vec_mergeh(tempB, tempJ);
  979. temp3 = vec_mergel(tempB, tempJ);
  980. temp4 = vec_mergeh(tempC, tempK);
  981. temp5 = vec_mergel(tempC, tempK);
  982. temp6 = vec_mergeh(tempD, tempL);
  983. temp7 = vec_mergel(tempD, tempL);
  984. temp8 = vec_mergeh(tempE, tempM);
  985. temp9 = vec_mergel(tempE, tempM);
  986. temp10 = vec_mergeh(tempF, tempN);
  987. temp11 = vec_mergel(tempF, tempN);
  988. temp12 = vec_mergeh(tempG, tempO);
  989. temp13 = vec_mergel(tempG, tempO);
  990. temp14 = vec_mergeh(tempH, tempP);
  991. temp15 = vec_mergel(tempH, tempP);
  992. vec_st(temp0, 0, dst);
  993. vec_st(temp1, 16, dst);
  994. vec_st(temp2, 32, dst);
  995. vec_st(temp3, 48, dst);
  996. vec_st(temp4, 64, dst);
  997. vec_st(temp5, 80, dst);
  998. vec_st(temp6, 96, dst);
  999. vec_st(temp7, 112, dst);
  1000. vec_st(temp8, 128, dst);
  1001. vec_st(temp9, 144, dst);
  1002. vec_st(temp10, 160, dst);
  1003. vec_st(temp11, 176, dst);
  1004. vec_st(temp12, 192, dst);
  1005. vec_st(temp13, 208, dst);
  1006. vec_st(temp14, 224, dst);
  1007. vec_st(temp15, 240, dst);
  1008. }
  1009. static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
  1010. const vector unsigned char zero = vec_splat_u8(0);
  1011. const vector signed char neg1 = vec_splat_s8(-1);
  1012. #define LOAD_DOUBLE_LINE(i, j) \
  1013. vector unsigned char src##i = vec_ld(i * 16, src); \
  1014. vector unsigned char src##j = vec_ld(j * 16, src)
  1015. LOAD_DOUBLE_LINE(0, 1);
  1016. LOAD_DOUBLE_LINE(2, 3);
  1017. LOAD_DOUBLE_LINE(4, 5);
  1018. LOAD_DOUBLE_LINE(6, 7);
  1019. LOAD_DOUBLE_LINE(8, 9);
  1020. LOAD_DOUBLE_LINE(10, 11);
  1021. LOAD_DOUBLE_LINE(12, 13);
  1022. LOAD_DOUBLE_LINE(14, 15);
  1023. #undef LOAD_DOUBLE_LINE
  1024. vector unsigned char tempA = vec_mergeh(src0, src8);
  1025. vector unsigned char tempB;
  1026. vector unsigned char tempC = vec_mergeh(src1, src9);
  1027. vector unsigned char tempD;
  1028. vector unsigned char tempE = vec_mergeh(src2, src10);
  1029. vector unsigned char tempG = vec_mergeh(src3, src11);
  1030. vector unsigned char tempI = vec_mergeh(src4, src12);
  1031. vector unsigned char tempJ;
  1032. vector unsigned char tempK = vec_mergeh(src5, src13);
  1033. vector unsigned char tempL;
  1034. vector unsigned char tempM = vec_mergeh(src6, src14);
  1035. vector unsigned char tempO = vec_mergeh(src7, src15);
  1036. vector unsigned char temp0 = vec_mergeh(tempA, tempI);
  1037. vector unsigned char temp1 = vec_mergel(tempA, tempI);
  1038. vector unsigned char temp2;
  1039. vector unsigned char temp3;
  1040. vector unsigned char temp4 = vec_mergeh(tempC, tempK);
  1041. vector unsigned char temp5 = vec_mergel(tempC, tempK);
  1042. vector unsigned char temp6;
  1043. vector unsigned char temp7;
  1044. vector unsigned char temp8 = vec_mergeh(tempE, tempM);
  1045. vector unsigned char temp9 = vec_mergel(tempE, tempM);
  1046. vector unsigned char temp12 = vec_mergeh(tempG, tempO);
  1047. vector unsigned char temp13 = vec_mergel(tempG, tempO);
  1048. tempA = vec_mergeh(temp0, temp8);
  1049. tempB = vec_mergel(temp0, temp8);
  1050. tempC = vec_mergeh(temp1, temp9);
  1051. tempD = vec_mergel(temp1, temp9);
  1052. tempI = vec_mergeh(temp4, temp12);
  1053. tempJ = vec_mergel(temp4, temp12);
  1054. tempK = vec_mergeh(temp5, temp13);
  1055. tempL = vec_mergel(temp5, temp13);
  1056. temp0 = vec_mergeh(tempA, tempI);
  1057. temp1 = vec_mergel(tempA, tempI);
  1058. temp2 = vec_mergeh(tempB, tempJ);
  1059. temp3 = vec_mergel(tempB, tempJ);
  1060. temp4 = vec_mergeh(tempC, tempK);
  1061. temp5 = vec_mergel(tempC, tempK);
  1062. temp6 = vec_mergeh(tempD, tempL);
  1063. temp7 = vec_mergel(tempD, tempL);
  1064. #define STORE_DOUBLE_LINE(i, j) do { \
  1065. vector unsigned char dstAi = vec_ld(i * stride, dst); \
  1066. vector unsigned char dstBi = vec_ld(i * stride + 16, dst); \
  1067. vector unsigned char dstAj = vec_ld(j * stride, dst); \
  1068. vector unsigned char dstBj = vec_ld(j * stride+ 16, dst); \
  1069. vector unsigned char aligni = vec_lvsr(i * stride, dst); \
  1070. vector unsigned char alignj = vec_lvsr(j * stride, dst); \
  1071. vector unsigned char maski = \
  1072. vec_perm(zero, (vector unsigned char)neg1, aligni); \
  1073. vector unsigned char maskj = \
  1074. vec_perm(zero, (vector unsigned char)neg1, alignj); \
  1075. vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni); \
  1076. vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj); \
  1077. vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski); \
  1078. vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski); \
  1079. vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj); \
  1080. vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj); \
  1081. vec_st(dstAFi, i * stride, dst); \
  1082. vec_st(dstBFi, i * stride + 16, dst); \
  1083. vec_st(dstAFj, j * stride, dst); \
  1084. vec_st(dstBFj, j * stride + 16, dst); \
  1085. } while (0)
  1086. STORE_DOUBLE_LINE(0,1);
  1087. STORE_DOUBLE_LINE(2,3);
  1088. STORE_DOUBLE_LINE(4,5);
  1089. STORE_DOUBLE_LINE(6,7);
  1090. }