postprocess_altivec_template.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196
  1. /*
  2. AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
  3. based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. You should have received a copy of the GNU General Public License
  13. along with this program; if not, write to the Free Software
  14. Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  15. */
  16. #ifdef CONFIG_DARWIN
  17. #define AVV(x...) (x)
  18. #else
  19. #define AVV(x...) {x}
  20. #endif
  21. #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
  22. do { \
  23. __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
  24. __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
  25. __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
  26. __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
  27. tempA1 = vec_mergeh (src_a, src_e); \
  28. tempB1 = vec_mergel (src_a, src_e); \
  29. tempC1 = vec_mergeh (src_b, src_f); \
  30. tempD1 = vec_mergel (src_b, src_f); \
  31. tempE1 = vec_mergeh (src_c, src_g); \
  32. tempF1 = vec_mergel (src_c, src_g); \
  33. tempG1 = vec_mergeh (src_d, src_h); \
  34. tempH1 = vec_mergel (src_d, src_h); \
  35. tempA2 = vec_mergeh (tempA1, tempE1); \
  36. tempB2 = vec_mergel (tempA1, tempE1); \
  37. tempC2 = vec_mergeh (tempB1, tempF1); \
  38. tempD2 = vec_mergel (tempB1, tempF1); \
  39. tempE2 = vec_mergeh (tempC1, tempG1); \
  40. tempF2 = vec_mergel (tempC1, tempG1); \
  41. tempG2 = vec_mergeh (tempD1, tempH1); \
  42. tempH2 = vec_mergel (tempD1, tempH1); \
  43. src_a = vec_mergeh (tempA2, tempE2); \
  44. src_b = vec_mergel (tempA2, tempE2); \
  45. src_c = vec_mergeh (tempB2, tempF2); \
  46. src_d = vec_mergel (tempB2, tempF2); \
  47. src_e = vec_mergeh (tempC2, tempG2); \
  48. src_f = vec_mergel (tempC2, tempG2); \
  49. src_g = vec_mergeh (tempD2, tempH2); \
  50. src_h = vec_mergel (tempD2, tempH2); \
  51. } while (0)
  52. static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
  53. /*
  54. this code makes no assumption on src or stride.
  55. One could remove the recomputation of the perm
  56. vector by assuming (stride % 16) == 0, unfortunately
  57. this is not always true.
  58. */
  59. register int y;
  60. short __attribute__ ((aligned(16))) data[8];
  61. int numEq;
  62. uint8_t *src2 = src;
  63. vector signed short v_dcOffset;
  64. vector signed short v2QP;
  65. vector unsigned short v4QP;
  66. vector unsigned short v_dcThreshold;
  67. const int properStride = (stride % 16);
  68. const int srcAlign = ((unsigned long)src2 % 16);
  69. const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
  70. const vector signed int zero = vec_splat_s32(0);
  71. const vector signed short mask = vec_splat_s16(1);
  72. vector signed int v_numEq = vec_splat_s32(0);
  73. data[0] = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
  74. data[1] = data[0] * 2 + 1;
  75. data[2] = c->QP * 2;
  76. data[3] = c->QP * 4;
  77. vector signed short v_data = vec_ld(0, data);
  78. v_dcOffset = vec_splat(v_data, 0);
  79. v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
  80. v2QP = vec_splat(v_data, 2);
  81. v4QP = (vector unsigned short)vec_splat(v_data, 3);
  82. src2 += stride * 4;
  83. vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
  84. #define LOAD_LINE(i) \
  85. register int j##i = i * stride; \
  86. vector unsigned char perm##i = vec_lvsl(j##i, src2); \
  87. const vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
  88. vector unsigned char v_srcA2##i; \
  89. if (two_vectors) \
  90. v_srcA2##i = vec_ld(j##i + 16, src2); \
  91. const vector unsigned char v_srcA##i = \
  92. vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
  93. v_srcAss##i = \
  94. (vector signed short)vec_mergeh((vector signed char)zero, \
  95. (vector signed char)v_srcA##i)
  96. #define LOAD_LINE_ALIGNED(i) \
  97. register int j##i = i * stride; \
  98. const vector unsigned char v_srcA##i = vec_ld(j##i, src2); \
  99. v_srcAss##i = \
  100. (vector signed short)vec_mergeh((vector signed char)zero, \
  101. (vector signed char)v_srcA##i)
  102. // special casing the aligned case is worthwhile, as all call from
  103. // the (transposed) horizontable deblocks will be aligned, i naddition
  104. // to the naturraly aligned vertical deblocks.
  105. if (properStride && srcAlign) {
  106. LOAD_LINE_ALIGNED(0);
  107. LOAD_LINE_ALIGNED(1);
  108. LOAD_LINE_ALIGNED(2);
  109. LOAD_LINE_ALIGNED(3);
  110. LOAD_LINE_ALIGNED(4);
  111. LOAD_LINE_ALIGNED(5);
  112. LOAD_LINE_ALIGNED(6);
  113. LOAD_LINE_ALIGNED(7);
  114. } else {
  115. LOAD_LINE(0);
  116. LOAD_LINE(1);
  117. LOAD_LINE(2);
  118. LOAD_LINE(3);
  119. LOAD_LINE(4);
  120. LOAD_LINE(5);
  121. LOAD_LINE(6);
  122. LOAD_LINE(7);
  123. }
  124. #undef LOAD_LINE
  125. #undef LOAD_LINE_ALIGNED
  126. #define ITER(i, j) \
  127. const vector signed short v_diff##i = \
  128. vec_sub(v_srcAss##i, v_srcAss##j); \
  129. const vector signed short v_sum##i = \
  130. vec_add(v_diff##i, v_dcOffset); \
  131. const vector signed short v_comp##i = \
  132. (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
  133. v_dcThreshold); \
  134. const vector signed short v_part##i = vec_and(mask, v_comp##i); \
  135. v_numEq = vec_sum4s(v_part##i, v_numEq);
  136. ITER(0, 1);
  137. ITER(1, 2);
  138. ITER(2, 3);
  139. ITER(3, 4);
  140. ITER(4, 5);
  141. ITER(5, 6);
  142. ITER(6, 7);
  143. #undef ITER
  144. v_numEq = vec_sums(v_numEq, zero);
  145. v_numEq = vec_splat(v_numEq, 3);
  146. vec_ste(v_numEq, 0, &numEq);
  147. if (numEq > c->ppMode.flatnessThreshold)
  148. {
  149. const vector unsigned char mmoP1 = (const vector unsigned char)
  150. AVV(0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
  151. 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B);
  152. const vector unsigned char mmoP2 = (const vector unsigned char)
  153. AVV(0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
  154. 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
  155. const vector unsigned char mmoP = (const vector unsigned char)
  156. vec_lvsl(8, (unsigned char*)0);
  157. vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
  158. vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
  159. vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
  160. vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
  161. vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
  162. vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
  163. vector signed short mmoDiff = vec_sub(mmoL, mmoR);
  164. vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
  165. if (vec_any_gt(mmoSum, v4QP))
  166. return 0;
  167. else
  168. return 1;
  169. }
  170. else return 2;
  171. }
  172. static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
  173. /*
  174. this code makes no assumption on src or stride.
  175. One could remove the recomputation of the perm
  176. vector by assuming (stride % 16) == 0, unfortunately
  177. this is not always true. Quite a lot of load/stores
  178. can be removed by assuming proper alignement of
  179. src & stride :-(
  180. */
  181. uint8_t *src2 = src;
  182. const vector signed int zero = vec_splat_s32(0);
  183. const int properStride = (stride % 16);
  184. const int srcAlign = ((unsigned long)src2 % 16);
  185. short __attribute__ ((aligned(16))) qp[8];
  186. qp[0] = c->QP;
  187. vector signed short vqp = vec_ld(0, qp);
  188. vqp = vec_splat(vqp, 0);
  189. src2 += stride*3;
  190. vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
  191. vector unsigned char vbA0, vbA1, vbA2, vbA3, vbA4, vbA5, vbA6, vbA7, vbA8, vbA9;
  192. vector unsigned char vbB0, vbB1, vbB2, vbB3, vbB4, vbB5, vbB6, vbB7, vbB8, vbB9;
  193. vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
  194. #define LOAD_LINE(i) \
  195. const vector unsigned char perml##i = \
  196. vec_lvsl(i * stride, src2); \
  197. vbA##i = vec_ld(i * stride, src2); \
  198. vbB##i = vec_ld(i * stride + 16, src2); \
  199. vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
  200. vb##i = \
  201. (vector signed short)vec_mergeh((vector unsigned char)zero, \
  202. (vector unsigned char)vbT##i)
  203. #define LOAD_LINE_ALIGNED(i) \
  204. register int j##i = i * stride; \
  205. vbT##i = vec_ld(j##i, src2); \
  206. vb##i = \
  207. (vector signed short)vec_mergeh((vector signed char)zero, \
  208. (vector signed char)vbT##i)
  209. // special casing the aligned case is worthwhile, as all call from
  210. // the (transposed) horizontable deblocks will be aligned, in addition
  211. // to the naturraly aligned vertical deblocks.
  212. if (properStride && srcAlign) {
  213. LOAD_LINE_ALIGNED(0);
  214. LOAD_LINE_ALIGNED(1);
  215. LOAD_LINE_ALIGNED(2);
  216. LOAD_LINE_ALIGNED(3);
  217. LOAD_LINE_ALIGNED(4);
  218. LOAD_LINE_ALIGNED(5);
  219. LOAD_LINE_ALIGNED(6);
  220. LOAD_LINE_ALIGNED(7);
  221. LOAD_LINE_ALIGNED(8);
  222. LOAD_LINE_ALIGNED(9);
  223. } else {
  224. LOAD_LINE(0);
  225. LOAD_LINE(1);
  226. LOAD_LINE(2);
  227. LOAD_LINE(3);
  228. LOAD_LINE(4);
  229. LOAD_LINE(5);
  230. LOAD_LINE(6);
  231. LOAD_LINE(7);
  232. LOAD_LINE(8);
  233. LOAD_LINE(9);
  234. }
  235. #undef LOAD_LINE
  236. #undef LOAD_LINE_ALIGNED
  237. const vector unsigned short v_1 = vec_splat_u16(1);
  238. const vector unsigned short v_2 = vec_splat_u16(2);
  239. const vector unsigned short v_4 = vec_splat_u16(4);
  240. const vector signed short v_diff01 = vec_sub(vb0, vb1);
  241. const vector unsigned short v_cmp01 =
  242. (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
  243. const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
  244. const vector signed short v_diff89 = vec_sub(vb8, vb9);
  245. const vector unsigned short v_cmp89 =
  246. (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
  247. const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
  248. const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
  249. const vector signed short temp02 = vec_add(vb2, vb3);
  250. const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
  251. const vector signed short v_sumsB0 = vec_add(temp02, temp03);
  252. const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
  253. const vector signed short v_sumsB1 = vec_add(temp11, vb4);
  254. const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
  255. const vector signed short v_sumsB2 = vec_add(temp21, vb5);
  256. const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
  257. const vector signed short v_sumsB3 = vec_add(temp31, vb6);
  258. const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
  259. const vector signed short v_sumsB4 = vec_add(temp41, vb7);
  260. const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
  261. const vector signed short v_sumsB5 = vec_add(temp51, vb8);
  262. const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
  263. const vector signed short v_sumsB6 = vec_add(temp61, v_last);
  264. const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
  265. const vector signed short v_sumsB7 = vec_add(temp71, v_last);
  266. const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
  267. const vector signed short v_sumsB8 = vec_add(temp81, v_last);
  268. const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
  269. const vector signed short v_sumsB9 = vec_add(temp91, v_last);
  270. #define COMPUTE_VR(i, j, k) \
  271. const vector signed short temps1##i = \
  272. vec_add(v_sumsB##i, v_sumsB##k); \
  273. const vector signed short temps2##i = \
  274. vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
  275. const vector signed short vr##j = vec_sra(temps2##i, v_4)
  276. COMPUTE_VR(0, 1, 2);
  277. COMPUTE_VR(1, 2, 3);
  278. COMPUTE_VR(2, 3, 4);
  279. COMPUTE_VR(3, 4, 5);
  280. COMPUTE_VR(4, 5, 6);
  281. COMPUTE_VR(5, 6, 7);
  282. COMPUTE_VR(6, 7, 8);
  283. COMPUTE_VR(7, 8, 9);
  284. const vector signed char neg1 = vec_splat_s8(-1);
  285. const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  286. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
  287. #define PACK_AND_STORE(i) \
  288. const vector unsigned char perms##i = \
  289. vec_lvsr(i * stride, src2); \
  290. const vector unsigned char vf##i = \
  291. vec_packsu(vr##i, (vector signed short)zero); \
  292. const vector unsigned char vg##i = \
  293. vec_perm(vf##i, vbT##i, permHH); \
  294. const vector unsigned char mask##i = \
  295. vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
  296. const vector unsigned char vg2##i = \
  297. vec_perm(vg##i, vg##i, perms##i); \
  298. const vector unsigned char svA##i = \
  299. vec_sel(vbA##i, vg2##i, mask##i); \
  300. const vector unsigned char svB##i = \
  301. vec_sel(vg2##i, vbB##i, mask##i); \
  302. vec_st(svA##i, i * stride, src2); \
  303. vec_st(svB##i, i * stride + 16, src2)
  304. #define PACK_AND_STORE_ALIGNED(i) \
  305. const vector unsigned char vf##i = \
  306. vec_packsu(vr##i, (vector signed short)zero); \
  307. const vector unsigned char vg##i = \
  308. vec_perm(vf##i, vbT##i, permHH); \
  309. vec_st(vg##i, i * stride, src2)
  310. // special casing the aligned case is worthwhile, as all call from
  311. // the (transposed) horizontable deblocks will be aligned, in addition
  312. // to the naturraly aligned vertical deblocks.
  313. if (properStride && srcAlign) {
  314. PACK_AND_STORE_ALIGNED(1);
  315. PACK_AND_STORE_ALIGNED(2);
  316. PACK_AND_STORE_ALIGNED(3);
  317. PACK_AND_STORE_ALIGNED(4);
  318. PACK_AND_STORE_ALIGNED(5);
  319. PACK_AND_STORE_ALIGNED(6);
  320. PACK_AND_STORE_ALIGNED(7);
  321. PACK_AND_STORE_ALIGNED(8);
  322. } else {
  323. PACK_AND_STORE(1);
  324. PACK_AND_STORE(2);
  325. PACK_AND_STORE(3);
  326. PACK_AND_STORE(4);
  327. PACK_AND_STORE(5);
  328. PACK_AND_STORE(6);
  329. PACK_AND_STORE(7);
  330. PACK_AND_STORE(8);
  331. }
  332. #undef PACK_AND_STORE
  333. #undef PACK_AND_STORE_ALIGNED
  334. }
  335. static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
  336. /*
  337. this code makes no assumption on src or stride.
  338. One could remove the recomputation of the perm
  339. vector by assuming (stride % 16) == 0, unfortunately
  340. this is not always true. Quite a lot of load/stores
  341. can be removed by assuming proper alignement of
  342. src & stride :-(
  343. */
  344. uint8_t *src2 = src;
  345. const vector signed int zero = vec_splat_s32(0);
  346. short __attribute__ ((aligned(16))) qp[8];
  347. qp[0] = 8*c->QP;
  348. vector signed short vqp = vec_ld(0, qp);
  349. vqp = vec_splat(vqp, 0);
  350. #define LOAD_LINE(i) \
  351. const vector unsigned char perm##i = \
  352. vec_lvsl(i * stride, src2); \
  353. const vector unsigned char vbA##i = \
  354. vec_ld(i * stride, src2); \
  355. const vector unsigned char vbB##i = \
  356. vec_ld(i * stride + 16, src2); \
  357. const vector unsigned char vbT##i = \
  358. vec_perm(vbA##i, vbB##i, perm##i); \
  359. const vector signed short vb##i = \
  360. (vector signed short)vec_mergeh((vector unsigned char)zero, \
  361. (vector unsigned char)vbT##i)
  362. src2 += stride*3;
  363. LOAD_LINE(1);
  364. LOAD_LINE(2);
  365. LOAD_LINE(3);
  366. LOAD_LINE(4);
  367. LOAD_LINE(5);
  368. LOAD_LINE(6);
  369. LOAD_LINE(7);
  370. LOAD_LINE(8);
  371. #undef LOAD_LINE
  372. const vector signed short v_1 = vec_splat_s16(1);
  373. const vector signed short v_2 = vec_splat_s16(2);
  374. const vector signed short v_5 = vec_splat_s16(5);
  375. const vector signed short v_32 = vec_sl(v_1,
  376. (vector unsigned short)v_5);
  377. /* middle energy */
  378. const vector signed short l3minusl6 = vec_sub(vb3, vb6);
  379. const vector signed short l5minusl4 = vec_sub(vb5, vb4);
  380. const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
  381. const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
  382. const vector signed short absmE = vec_abs(mE);
  383. /* left & right energy */
  384. const vector signed short l1minusl4 = vec_sub(vb1, vb4);
  385. const vector signed short l3minusl2 = vec_sub(vb3, vb2);
  386. const vector signed short l5minusl8 = vec_sub(vb5, vb8);
  387. const vector signed short l7minusl6 = vec_sub(vb7, vb6);
  388. const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
  389. const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
  390. const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
  391. const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
  392. /* d */
  393. const vector signed short ddiff = vec_sub(absmE,
  394. vec_min(vec_abs(lE),
  395. vec_abs(rE)));
  396. const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
  397. const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
  398. const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
  399. const vector signed short minusd = vec_sub((vector signed short)zero, d);
  400. const vector signed short finald = vec_sel(minusd,
  401. d,
  402. vec_cmpgt(vec_sub((vector signed short)zero, mE),
  403. (vector signed short)zero));
  404. /* q */
  405. const vector signed short qtimes2 = vec_sub(vb4, vb5);
  406. /* for a shift right to behave like /2, we need to add one
  407. to all negative integer */
  408. const vector signed short rounddown = vec_sel((vector signed short)zero,
  409. v_1,
  410. vec_cmplt(qtimes2, (vector signed short)zero));
  411. const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
  412. /* clamp */
  413. const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
  414. const vector signed short dclamp_P = vec_min(dclamp_P1, q);
  415. const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
  416. const vector signed short dclamp_N = vec_max(dclamp_N1, q);
  417. const vector signed short dclampedfinal = vec_sel(dclamp_N,
  418. dclamp_P,
  419. vec_cmpgt(q, (vector signed short)zero));
  420. const vector signed short dornotd = vec_sel((vector signed short)zero,
  421. dclampedfinal,
  422. vec_cmplt(absmE, vqp));
  423. /* add/substract to l4 and l5 */
  424. const vector signed short vb4minusd = vec_sub(vb4, dornotd);
  425. const vector signed short vb5plusd = vec_add(vb5, dornotd);
  426. /* finally, stores */
  427. const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
  428. const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
  429. const vector signed char neg1 = vec_splat_s8(-1);
  430. const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  431. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
  432. #define STORE(i) \
  433. const vector unsigned char perms##i = \
  434. vec_lvsr(i * stride, src2); \
  435. const vector unsigned char vg##i = \
  436. vec_perm(st##i, vbT##i, permHH); \
  437. const vector unsigned char mask##i = \
  438. vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
  439. const vector unsigned char vg2##i = \
  440. vec_perm(vg##i, vg##i, perms##i); \
  441. const vector unsigned char svA##i = \
  442. vec_sel(vbA##i, vg2##i, mask##i); \
  443. const vector unsigned char svB##i = \
  444. vec_sel(vg2##i, vbB##i, mask##i); \
  445. vec_st(svA##i, i * stride, src2); \
  446. vec_st(svB##i, i * stride + 16, src2)
  447. STORE(4);
  448. STORE(5);
  449. }
  450. static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
  451. /*
  452. this code makes no assumption on src or stride.
  453. One could remove the recomputation of the perm
  454. vector by assuming (stride % 16) == 0, unfortunately
  455. this is not always true. Quite a lot of load/stores
  456. can be removed by assuming proper alignement of
  457. src & stride :-(
  458. */
  459. uint8_t *srcCopy = src;
  460. uint8_t __attribute__((aligned(16))) dt[16];
  461. const vector unsigned char vuint8_1 = vec_splat_u8(1);
  462. const vector signed int zero = vec_splat_s32(0);
  463. vector unsigned char v_dt;
  464. dt[0] = deringThreshold;
  465. v_dt = vec_splat(vec_ld(0, dt), 0);
  466. #define LOAD_LINE(i) \
  467. const vector unsigned char perm##i = \
  468. vec_lvsl(i * stride, srcCopy); \
  469. vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
  470. vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
  471. vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
  472. LOAD_LINE(0);
  473. LOAD_LINE(1);
  474. LOAD_LINE(2);
  475. LOAD_LINE(3);
  476. LOAD_LINE(4);
  477. LOAD_LINE(5);
  478. LOAD_LINE(6);
  479. LOAD_LINE(7);
  480. LOAD_LINE(8);
  481. LOAD_LINE(9);
  482. #undef LOAD_LINE
  483. vector unsigned char v_avg;
  484. {
  485. const vector unsigned char trunc_perm = (vector unsigned char)
  486. AVV(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
  487. 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18);
  488. const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
  489. const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
  490. const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
  491. const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
  492. #define EXTRACT(op) do { \
  493. const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \
  494. const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \
  495. const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \
  496. const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \
  497. const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \
  498. const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \
  499. const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \
  500. const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \
  501. const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \
  502. const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \
  503. const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \
  504. const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \
  505. const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \
  506. const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \
  507. v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0)
  508. vector unsigned char v_min;
  509. vector unsigned char v_max;
  510. EXTRACT(min);
  511. EXTRACT(max);
  512. #undef EXTRACT
  513. if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
  514. return;
  515. v_avg = vec_avg(v_min, v_max);
  516. }
  517. signed int __attribute__((aligned(16))) S[8];
  518. {
  519. const vector unsigned short mask1 = (vector unsigned short)
  520. AVV(0x0001, 0x0002, 0x0004, 0x0008,
  521. 0x0010, 0x0020, 0x0040, 0x0080);
  522. const vector unsigned short mask2 = (vector unsigned short)
  523. AVV(0x0100, 0x0200, 0x0000, 0x0000,
  524. 0x0000, 0x0000, 0x0000, 0x0000);
  525. const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
  526. const vector unsigned int vuint32_1 = vec_splat_u32(1);
  527. #define COMPARE(i) \
  528. vector signed int sum##i; \
  529. do { \
  530. const vector unsigned char cmp##i = \
  531. (vector unsigned char)vec_cmpgt(src##i, v_avg); \
  532. const vector unsigned short cmpHi##i = \
  533. (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \
  534. const vector unsigned short cmpLi##i = \
  535. (vector unsigned short)vec_mergel(cmp##i, cmp##i); \
  536. const vector signed short cmpHf##i = \
  537. (vector signed short)vec_and(cmpHi##i, mask1); \
  538. const vector signed short cmpLf##i = \
  539. (vector signed short)vec_and(cmpLi##i, mask2); \
  540. const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \
  541. const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \
  542. sum##i = vec_sums(sumq##i, zero); } while (0)
  543. COMPARE(0);
  544. COMPARE(1);
  545. COMPARE(2);
  546. COMPARE(3);
  547. COMPARE(4);
  548. COMPARE(5);
  549. COMPARE(6);
  550. COMPARE(7);
  551. COMPARE(8);
  552. COMPARE(9);
  553. #undef COMPARE
  554. vector signed int sumA2;
  555. vector signed int sumB2;
  556. {
  557. const vector signed int sump02 = vec_mergel(sum0, sum2);
  558. const vector signed int sump13 = vec_mergel(sum1, sum3);
  559. const vector signed int sumA = vec_mergel(sump02, sump13);
  560. const vector signed int sump46 = vec_mergel(sum4, sum6);
  561. const vector signed int sump57 = vec_mergel(sum5, sum7);
  562. const vector signed int sumB = vec_mergel(sump46, sump57);
  563. const vector signed int sump8A = vec_mergel(sum8, zero);
  564. const vector signed int sump9B = vec_mergel(sum9, zero);
  565. const vector signed int sumC = vec_mergel(sump8A, sump9B);
  566. const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
  567. const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
  568. const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
  569. const vector signed int t2A = vec_or(sumA, tA);
  570. const vector signed int t2B = vec_or(sumB, tB);
  571. const vector signed int t2C = vec_or(sumC, tC);
  572. const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
  573. vec_sl(t2A, vuint32_1));
  574. const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
  575. vec_sl(t2B, vuint32_1));
  576. const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
  577. vec_sl(t2C, vuint32_1));
  578. const vector signed int yA = vec_and(t2A, t3A);
  579. const vector signed int yB = vec_and(t2B, t3B);
  580. const vector signed int yC = vec_and(t2C, t3C);
  581. const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
  582. const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
  583. const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
  584. const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
  585. const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
  586. const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
  587. const vector signed int sumAp = vec_and(yA,
  588. vec_and(sumAd4,sumAd8));
  589. const vector signed int sumBp = vec_and(yB,
  590. vec_and(sumBd4,sumBd8));
  591. sumA2 = vec_or(sumAp,
  592. vec_sra(sumAp,
  593. vuint32_16));
  594. sumB2 = vec_or(sumBp,
  595. vec_sra(sumBp,
  596. vuint32_16));
  597. }
  598. vec_st(sumA2, 0, S);
  599. vec_st(sumB2, 16, S);
  600. }
  601. /* I'm not sure the following is actually faster
  602. than straight, unvectorized C code :-( */
  603. int __attribute__((aligned(16))) tQP2[4];
  604. tQP2[0]= c->QP/2 + 1;
  605. vector signed int vQP2 = vec_ld(0, tQP2);
  606. vQP2 = vec_splat(vQP2, 0);
  607. const vector unsigned char vuint8_2 = vec_splat_u8(2);
  608. const vector signed int vsint32_8 = vec_splat_s32(8);
  609. const vector unsigned int vuint32_4 = vec_splat_u32(4);
  610. const vector unsigned char permA1 = (vector unsigned char)
  611. AVV(0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
  612. 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
  613. const vector unsigned char permA2 = (vector unsigned char)
  614. AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
  615. 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F);
  616. const vector unsigned char permA1inc = (vector unsigned char)
  617. AVV(0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
  618. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
  619. const vector unsigned char permA2inc = (vector unsigned char)
  620. AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
  621. 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
  622. const vector unsigned char magic = (vector unsigned char)
  623. AVV(0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
  624. 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
  625. const vector unsigned char extractPerm = (vector unsigned char)
  626. AVV(0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
  627. 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01);
  628. const vector unsigned char extractPermInc = (vector unsigned char)
  629. AVV(0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
  630. 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01);
  631. const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
  632. const vector unsigned char tenRight = (vector unsigned char)
  633. AVV(0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  634. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
  635. const vector unsigned char eightLeft = (vector unsigned char)
  636. AVV(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  637. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08);
  638. #define F_INIT(i) \
  639. vector unsigned char tenRightM##i = tenRight; \
  640. vector unsigned char permA1M##i = permA1; \
  641. vector unsigned char permA2M##i = permA2; \
  642. vector unsigned char extractPermM##i = extractPerm
  643. #define F2(i, j, k, l) \
  644. if (S[i] & (1 << (l+1))) { \
  645. const vector unsigned char a_##j##_A##l = \
  646. vec_perm(src##i, src##j, permA1M##i); \
  647. const vector unsigned char a_##j##_B##l = \
  648. vec_perm(a_##j##_A##l, src##k, permA2M##i); \
  649. const vector signed int a_##j##_sump##l = \
  650. (vector signed int)vec_msum(a_##j##_B##l, magic, \
  651. (vector unsigned int)zero); \
  652. vector signed int F_##j##_##l = \
  653. vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \
  654. F_##j##_##l = vec_splat(F_##j##_##l, 3); \
  655. const vector signed int p_##j##_##l = \
  656. (vector signed int)vec_perm(src##j, \
  657. (vector unsigned char)zero, \
  658. extractPermM##i); \
  659. const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\
  660. const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\
  661. vector signed int newpm_##j##_##l; \
  662. if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \
  663. newpm_##j##_##l = sum_##j##_##l; \
  664. else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \
  665. newpm_##j##_##l = diff_##j##_##l; \
  666. else newpm_##j##_##l = F_##j##_##l; \
  667. const vector unsigned char newpm2_##j##_##l = \
  668. vec_splat((vector unsigned char)newpm_##j##_##l, 15); \
  669. const vector unsigned char mask##j##l = vec_add(identity, \
  670. tenRightM##i); \
  671. src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \
  672. } \
  673. permA1M##i = vec_add(permA1M##i, permA1inc); \
  674. permA2M##i = vec_add(permA2M##i, permA2inc); \
  675. tenRightM##i = vec_sro(tenRightM##i, eightLeft); \
  676. extractPermM##i = vec_add(extractPermM##i, extractPermInc)
  677. #define ITER(i, j, k) \
  678. F_INIT(i); \
  679. F2(i, j, k, 0); \
  680. F2(i, j, k, 1); \
  681. F2(i, j, k, 2); \
  682. F2(i, j, k, 3); \
  683. F2(i, j, k, 4); \
  684. F2(i, j, k, 5); \
  685. F2(i, j, k, 6); \
  686. F2(i, j, k, 7)
  687. ITER(0, 1, 2);
  688. ITER(1, 2, 3);
  689. ITER(2, 3, 4);
  690. ITER(3, 4, 5);
  691. ITER(4, 5, 6);
  692. ITER(5, 6, 7);
  693. ITER(6, 7, 8);
  694. ITER(7, 8, 9);
  695. const vector signed char neg1 = vec_splat_s8(-1);
  696. #define STORE_LINE(i) \
  697. const vector unsigned char permST##i = \
  698. vec_lvsr(i * stride, srcCopy); \
  699. const vector unsigned char maskST##i = \
  700. vec_perm((vector unsigned char)zero, \
  701. (vector unsigned char)neg1, permST##i); \
  702. src##i = vec_perm(src##i ,src##i, permST##i); \
  703. sA##i= vec_sel(sA##i, src##i, maskST##i); \
  704. sB##i= vec_sel(src##i, sB##i, maskST##i); \
  705. vec_st(sA##i, i * stride, srcCopy); \
  706. vec_st(sB##i, i * stride + 16, srcCopy)
  707. STORE_LINE(1);
  708. STORE_LINE(2);
  709. STORE_LINE(3);
  710. STORE_LINE(4);
  711. STORE_LINE(5);
  712. STORE_LINE(6);
  713. STORE_LINE(7);
  714. STORE_LINE(8);
  715. #undef STORE_LINE
  716. #undef ITER
  717. #undef F2
  718. }
  719. #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
  720. #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
  721. #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
  722. static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
  723. uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
  724. {
  725. const vector signed int zero = vec_splat_s32(0);
  726. const vector signed short vsint16_1 = vec_splat_s16(1);
  727. vector signed int v_dp = zero;
  728. vector signed int v_sysdp = zero;
  729. int d, sysd, i;
  730. tempBluredPast[127]= maxNoise[0];
  731. tempBluredPast[128]= maxNoise[1];
  732. tempBluredPast[129]= maxNoise[2];
  733. #define LOAD_LINE(src, i) \
  734. register int j##src##i = i * stride; \
  735. vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
  736. const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
  737. const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
  738. const vector unsigned char v_##src##A##i = \
  739. vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
  740. vector signed short v_##src##Ass##i = \
  741. (vector signed short)vec_mergeh((vector signed char)zero, \
  742. (vector signed char)v_##src##A##i)
  743. LOAD_LINE(src, 0);
  744. LOAD_LINE(src, 1);
  745. LOAD_LINE(src, 2);
  746. LOAD_LINE(src, 3);
  747. LOAD_LINE(src, 4);
  748. LOAD_LINE(src, 5);
  749. LOAD_LINE(src, 6);
  750. LOAD_LINE(src, 7);
  751. LOAD_LINE(tempBlured, 0);
  752. LOAD_LINE(tempBlured, 1);
  753. LOAD_LINE(tempBlured, 2);
  754. LOAD_LINE(tempBlured, 3);
  755. LOAD_LINE(tempBlured, 4);
  756. LOAD_LINE(tempBlured, 5);
  757. LOAD_LINE(tempBlured, 6);
  758. LOAD_LINE(tempBlured, 7);
  759. #undef LOAD_LINE
  760. #define ACCUMULATE_DIFFS(i) \
  761. vector signed short v_d##i = vec_sub(v_tempBluredAss##i, \
  762. v_srcAss##i); \
  763. v_dp = vec_msums(v_d##i, v_d##i, v_dp); \
  764. v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
  765. ACCUMULATE_DIFFS(0);
  766. ACCUMULATE_DIFFS(1);
  767. ACCUMULATE_DIFFS(2);
  768. ACCUMULATE_DIFFS(3);
  769. ACCUMULATE_DIFFS(4);
  770. ACCUMULATE_DIFFS(5);
  771. ACCUMULATE_DIFFS(6);
  772. ACCUMULATE_DIFFS(7);
  773. #undef ACCUMULATE_DIFFS
  774. v_dp = vec_sums(v_dp, zero);
  775. v_sysdp = vec_sums(v_sysdp, zero);
  776. v_dp = vec_splat(v_dp, 3);
  777. v_sysdp = vec_splat(v_sysdp, 3);
  778. vec_ste(v_dp, 0, &d);
  779. vec_ste(v_sysdp, 0, &sysd);
  780. i = d;
  781. d = (4*d
  782. +(*(tempBluredPast-256))
  783. +(*(tempBluredPast-1))+ (*(tempBluredPast+1))
  784. +(*(tempBluredPast+256))
  785. +4)>>3;
  786. *tempBluredPast=i;
  787. if (d > maxNoise[1]) {
  788. if (d < maxNoise[2]) {
  789. #define OP(i) v_tempBluredAss##i = vec_avg(v_tempBluredAss##i, v_srcAss##i);
  790. OP(0);
  791. OP(1);
  792. OP(2);
  793. OP(3);
  794. OP(4);
  795. OP(5);
  796. OP(6);
  797. OP(7);
  798. #undef OP
  799. } else {
  800. #define OP(i) v_tempBluredAss##i = v_srcAss##i;
  801. OP(0);
  802. OP(1);
  803. OP(2);
  804. OP(3);
  805. OP(4);
  806. OP(5);
  807. OP(6);
  808. OP(7);
  809. #undef OP
  810. }
  811. } else {
  812. if (d < maxNoise[0]) {
  813. const vector signed short vsint16_7 = vec_splat_s16(7);
  814. const vector signed short vsint16_4 = vec_splat_s16(4);
  815. const vector unsigned short vuint16_3 = vec_splat_u16(3);
  816. #define OP(i) \
  817. const vector signed short v_temp##i = \
  818. vec_mladd(v_tempBluredAss##i, \
  819. vsint16_7, v_srcAss##i); \
  820. const vector signed short v_temp2##i = \
  821. vec_add(v_temp##i, vsint16_4); \
  822. v_tempBluredAss##i = vec_sr(v_temp2##i, vuint16_3)
  823. OP(0);
  824. OP(1);
  825. OP(2);
  826. OP(3);
  827. OP(4);
  828. OP(5);
  829. OP(6);
  830. OP(7);
  831. #undef OP
  832. } else {
  833. const vector signed short vsint16_3 = vec_splat_s16(3);
  834. const vector signed short vsint16_2 = vec_splat_s16(2);
  835. #define OP(i) \
  836. const vector signed short v_temp##i = \
  837. vec_mladd(v_tempBluredAss##i, \
  838. vsint16_3, v_srcAss##i); \
  839. const vector signed short v_temp2##i = \
  840. vec_add(v_temp##i, vsint16_2); \
  841. v_tempBluredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2)
  842. OP(0);
  843. OP(1);
  844. OP(2);
  845. OP(3);
  846. OP(4);
  847. OP(5);
  848. OP(6);
  849. OP(7);
  850. #undef OP
  851. }
  852. }
  853. const vector signed char neg1 = vec_splat_s8(-1);
  854. const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  855. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
  856. #define PACK_AND_STORE(src, i) \
  857. const vector unsigned char perms##src##i = \
  858. vec_lvsr(i * stride, src); \
  859. const vector unsigned char vf##src##i = \
  860. vec_packsu(v_tempBluredAss##i, (vector signed short)zero); \
  861. const vector unsigned char vg##src##i = \
  862. vec_perm(vf##src##i, v_##src##A##i, permHH); \
  863. const vector unsigned char mask##src##i = \
  864. vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \
  865. const vector unsigned char vg2##src##i = \
  866. vec_perm(vg##src##i, vg##src##i, perms##src##i); \
  867. const vector unsigned char svA##src##i = \
  868. vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \
  869. const vector unsigned char svB##src##i = \
  870. vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \
  871. vec_st(svA##src##i, i * stride, src); \
  872. vec_st(svB##src##i, i * stride + 16, src)
  873. PACK_AND_STORE(src, 0);
  874. PACK_AND_STORE(src, 1);
  875. PACK_AND_STORE(src, 2);
  876. PACK_AND_STORE(src, 3);
  877. PACK_AND_STORE(src, 4);
  878. PACK_AND_STORE(src, 5);
  879. PACK_AND_STORE(src, 6);
  880. PACK_AND_STORE(src, 7);
  881. PACK_AND_STORE(tempBlured, 0);
  882. PACK_AND_STORE(tempBlured, 1);
  883. PACK_AND_STORE(tempBlured, 2);
  884. PACK_AND_STORE(tempBlured, 3);
  885. PACK_AND_STORE(tempBlured, 4);
  886. PACK_AND_STORE(tempBlured, 5);
  887. PACK_AND_STORE(tempBlured, 6);
  888. PACK_AND_STORE(tempBlured, 7);
  889. #undef PACK_AND_STORE
  890. }
  891. static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
  892. const vector unsigned char zero = vec_splat_u8(0);
  893. #define LOAD_DOUBLE_LINE(i, j) \
  894. vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
  895. vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
  896. vector unsigned char srcA##i = vec_ld(i * stride, src); \
  897. vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
  898. vector unsigned char srcC##i = vec_ld(j * stride, src); \
  899. vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
  900. vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
  901. vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
  902. LOAD_DOUBLE_LINE(0, 1);
  903. LOAD_DOUBLE_LINE(2, 3);
  904. LOAD_DOUBLE_LINE(4, 5);
  905. LOAD_DOUBLE_LINE(6, 7);
  906. #undef LOAD_DOUBLE_LINE
  907. vector unsigned char tempA = vec_mergeh(src0, zero);
  908. vector unsigned char tempB = vec_mergel(src0, zero);
  909. vector unsigned char tempC = vec_mergeh(src1, zero);
  910. vector unsigned char tempD = vec_mergel(src1, zero);
  911. vector unsigned char tempE = vec_mergeh(src2, zero);
  912. vector unsigned char tempF = vec_mergel(src2, zero);
  913. vector unsigned char tempG = vec_mergeh(src3, zero);
  914. vector unsigned char tempH = vec_mergel(src3, zero);
  915. vector unsigned char tempI = vec_mergeh(src4, zero);
  916. vector unsigned char tempJ = vec_mergel(src4, zero);
  917. vector unsigned char tempK = vec_mergeh(src5, zero);
  918. vector unsigned char tempL = vec_mergel(src5, zero);
  919. vector unsigned char tempM = vec_mergeh(src6, zero);
  920. vector unsigned char tempN = vec_mergel(src6, zero);
  921. vector unsigned char tempO = vec_mergeh(src7, zero);
  922. vector unsigned char tempP = vec_mergel(src7, zero);
  923. vector unsigned char temp0 = vec_mergeh(tempA, tempI);
  924. vector unsigned char temp1 = vec_mergel(tempA, tempI);
  925. vector unsigned char temp2 = vec_mergeh(tempB, tempJ);
  926. vector unsigned char temp3 = vec_mergel(tempB, tempJ);
  927. vector unsigned char temp4 = vec_mergeh(tempC, tempK);
  928. vector unsigned char temp5 = vec_mergel(tempC, tempK);
  929. vector unsigned char temp6 = vec_mergeh(tempD, tempL);
  930. vector unsigned char temp7 = vec_mergel(tempD, tempL);
  931. vector unsigned char temp8 = vec_mergeh(tempE, tempM);
  932. vector unsigned char temp9 = vec_mergel(tempE, tempM);
  933. vector unsigned char temp10 = vec_mergeh(tempF, tempN);
  934. vector unsigned char temp11 = vec_mergel(tempF, tempN);
  935. vector unsigned char temp12 = vec_mergeh(tempG, tempO);
  936. vector unsigned char temp13 = vec_mergel(tempG, tempO);
  937. vector unsigned char temp14 = vec_mergeh(tempH, tempP);
  938. vector unsigned char temp15 = vec_mergel(tempH, tempP);
  939. tempA = vec_mergeh(temp0, temp8);
  940. tempB = vec_mergel(temp0, temp8);
  941. tempC = vec_mergeh(temp1, temp9);
  942. tempD = vec_mergel(temp1, temp9);
  943. tempE = vec_mergeh(temp2, temp10);
  944. tempF = vec_mergel(temp2, temp10);
  945. tempG = vec_mergeh(temp3, temp11);
  946. tempH = vec_mergel(temp3, temp11);
  947. tempI = vec_mergeh(temp4, temp12);
  948. tempJ = vec_mergel(temp4, temp12);
  949. tempK = vec_mergeh(temp5, temp13);
  950. tempL = vec_mergel(temp5, temp13);
  951. tempM = vec_mergeh(temp6, temp14);
  952. tempN = vec_mergel(temp6, temp14);
  953. tempO = vec_mergeh(temp7, temp15);
  954. tempP = vec_mergel(temp7, temp15);
  955. temp0 = vec_mergeh(tempA, tempI);
  956. temp1 = vec_mergel(tempA, tempI);
  957. temp2 = vec_mergeh(tempB, tempJ);
  958. temp3 = vec_mergel(tempB, tempJ);
  959. temp4 = vec_mergeh(tempC, tempK);
  960. temp5 = vec_mergel(tempC, tempK);
  961. temp6 = vec_mergeh(tempD, tempL);
  962. temp7 = vec_mergel(tempD, tempL);
  963. temp8 = vec_mergeh(tempE, tempM);
  964. temp9 = vec_mergel(tempE, tempM);
  965. temp10 = vec_mergeh(tempF, tempN);
  966. temp11 = vec_mergel(tempF, tempN);
  967. temp12 = vec_mergeh(tempG, tempO);
  968. temp13 = vec_mergel(tempG, tempO);
  969. temp14 = vec_mergeh(tempH, tempP);
  970. temp15 = vec_mergel(tempH, tempP);
  971. vec_st(temp0, 0, dst);
  972. vec_st(temp1, 16, dst);
  973. vec_st(temp2, 32, dst);
  974. vec_st(temp3, 48, dst);
  975. vec_st(temp4, 64, dst);
  976. vec_st(temp5, 80, dst);
  977. vec_st(temp6, 96, dst);
  978. vec_st(temp7, 112, dst);
  979. vec_st(temp8, 128, dst);
  980. vec_st(temp9, 144, dst);
  981. vec_st(temp10, 160, dst);
  982. vec_st(temp11, 176, dst);
  983. vec_st(temp12, 192, dst);
  984. vec_st(temp13, 208, dst);
  985. vec_st(temp14, 224, dst);
  986. vec_st(temp15, 240, dst);
  987. }
  988. static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
  989. const vector unsigned char zero = vec_splat_u8(0);
  990. const vector unsigned char magic_perm = (const vector unsigned char)
  991. AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  992. 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
  993. #define LOAD_DOUBLE_LINE(i, j) \
  994. vector unsigned char src##i = vec_ld(i * 16, src); \
  995. vector unsigned char src##j = vec_ld(j * 16, src)
  996. LOAD_DOUBLE_LINE(0, 1);
  997. LOAD_DOUBLE_LINE(2, 3);
  998. LOAD_DOUBLE_LINE(4, 5);
  999. LOAD_DOUBLE_LINE(6, 7);
  1000. LOAD_DOUBLE_LINE(8, 9);
  1001. LOAD_DOUBLE_LINE(10, 11);
  1002. LOAD_DOUBLE_LINE(12, 13);
  1003. LOAD_DOUBLE_LINE(14, 15);
  1004. #undef LOAD_DOUBLE_LINE
  1005. vector unsigned char tempA = vec_mergeh(src0, src8);
  1006. vector unsigned char tempB;
  1007. vector unsigned char tempC = vec_mergeh(src1, src9);
  1008. vector unsigned char tempD;
  1009. vector unsigned char tempE = vec_mergeh(src2, src10);
  1010. vector unsigned char tempG = vec_mergeh(src3, src11);
  1011. vector unsigned char tempI = vec_mergeh(src4, src12);
  1012. vector unsigned char tempJ;
  1013. vector unsigned char tempK = vec_mergeh(src5, src13);
  1014. vector unsigned char tempL;
  1015. vector unsigned char tempM = vec_mergeh(src6, src14);
  1016. vector unsigned char tempO = vec_mergeh(src7, src15);
  1017. vector unsigned char temp0 = vec_mergeh(tempA, tempI);
  1018. vector unsigned char temp1 = vec_mergel(tempA, tempI);
  1019. vector unsigned char temp2;
  1020. vector unsigned char temp3;
  1021. vector unsigned char temp4 = vec_mergeh(tempC, tempK);
  1022. vector unsigned char temp5 = vec_mergel(tempC, tempK);
  1023. vector unsigned char temp6;
  1024. vector unsigned char temp7;
  1025. vector unsigned char temp8 = vec_mergeh(tempE, tempM);
  1026. vector unsigned char temp9 = vec_mergel(tempE, tempM);
  1027. vector unsigned char temp12 = vec_mergeh(tempG, tempO);
  1028. vector unsigned char temp13 = vec_mergel(tempG, tempO);
  1029. tempA = vec_mergeh(temp0, temp8);
  1030. tempB = vec_mergel(temp0, temp8);
  1031. tempC = vec_mergeh(temp1, temp9);
  1032. tempD = vec_mergel(temp1, temp9);
  1033. tempI = vec_mergeh(temp4, temp12);
  1034. tempJ = vec_mergel(temp4, temp12);
  1035. tempK = vec_mergeh(temp5, temp13);
  1036. tempL = vec_mergel(temp5, temp13);
  1037. temp0 = vec_mergeh(tempA, tempI);
  1038. temp1 = vec_mergel(tempA, tempI);
  1039. temp2 = vec_mergeh(tempB, tempJ);
  1040. temp3 = vec_mergel(tempB, tempJ);
  1041. temp4 = vec_mergeh(tempC, tempK);
  1042. temp5 = vec_mergel(tempC, tempK);
  1043. temp6 = vec_mergeh(tempD, tempL);
  1044. temp7 = vec_mergel(tempD, tempL);
  1045. const vector signed char neg1 = vec_splat_s8(-1);
  1046. #define STORE_DOUBLE_LINE(i, j) \
  1047. vector unsigned char dstA##i = vec_ld(i * stride, dst); \
  1048. vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \
  1049. vector unsigned char dstA##j = vec_ld(j * stride, dst); \
  1050. vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \
  1051. vector unsigned char align##i = vec_lvsr(i * stride, dst); \
  1052. vector unsigned char align##j = vec_lvsr(j * stride, dst); \
  1053. vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \
  1054. vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \
  1055. vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i); \
  1056. vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j); \
  1057. vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \
  1058. vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \
  1059. vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \
  1060. vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \
  1061. vec_st(dstAF##i, i * stride, dst); \
  1062. vec_st(dstBF##i, i * stride + 16, dst); \
  1063. vec_st(dstAF##j, j * stride, dst); \
  1064. vec_st(dstBF##j, j * stride + 16, dst)
  1065. STORE_DOUBLE_LINE(0,1);
  1066. STORE_DOUBLE_LINE(2,3);
  1067. STORE_DOUBLE_LINE(4,5);
  1068. STORE_DOUBLE_LINE(6,7);
  1069. }