alpha_processing_sse2.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. // Copyright 2014 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // Utilities for processing transparent channel.
  11. //
  12. // Author: Skal (pascal.massimino@gmail.com)
  13. #include "./dsp.h"
  14. #if defined(WEBP_USE_SSE2)
  15. #include <emmintrin.h>
  16. //------------------------------------------------------------------------------
  17. static int DispatchAlpha_SSE2(const uint8_t* WEBP_RESTRICT alpha,
  18. int alpha_stride, int width, int height,
  19. uint8_t* WEBP_RESTRICT dst, int dst_stride) {
  20. // alpha_and stores an 'and' operation of all the alpha[] values. The final
  21. // value is not 0xff if any of the alpha[] is not equal to 0xff.
  22. uint32_t alpha_and = 0xff;
  23. int i, j;
  24. const __m128i zero = _mm_setzero_si128();
  25. const __m128i rgb_mask = _mm_set1_epi32(0xffffff00u); // to preserve RGB
  26. const __m128i all_0xff = _mm_set_epi32(0, 0, ~0u, ~0u);
  27. __m128i all_alphas = all_0xff;
  28. // We must be able to access 3 extra bytes after the last written byte
  29. // 'dst[4 * width - 4]', because we don't know if alpha is the first or the
  30. // last byte of the quadruplet.
  31. const int limit = (width - 1) & ~7;
  32. for (j = 0; j < height; ++j) {
  33. __m128i* out = (__m128i*)dst;
  34. for (i = 0; i < limit; i += 8) {
  35. // load 8 alpha bytes
  36. const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[i]);
  37. const __m128i a1 = _mm_unpacklo_epi8(a0, zero);
  38. const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);
  39. const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);
  40. // load 8 dst pixels (32 bytes)
  41. const __m128i b0_lo = _mm_loadu_si128(out + 0);
  42. const __m128i b0_hi = _mm_loadu_si128(out + 1);
  43. // mask dst alpha values
  44. const __m128i b1_lo = _mm_and_si128(b0_lo, rgb_mask);
  45. const __m128i b1_hi = _mm_and_si128(b0_hi, rgb_mask);
  46. // combine
  47. const __m128i b2_lo = _mm_or_si128(b1_lo, a2_lo);
  48. const __m128i b2_hi = _mm_or_si128(b1_hi, a2_hi);
  49. // store
  50. _mm_storeu_si128(out + 0, b2_lo);
  51. _mm_storeu_si128(out + 1, b2_hi);
  52. // accumulate eight alpha 'and' in parallel
  53. all_alphas = _mm_and_si128(all_alphas, a0);
  54. out += 2;
  55. }
  56. for (; i < width; ++i) {
  57. const uint32_t alpha_value = alpha[i];
  58. dst[4 * i] = alpha_value;
  59. alpha_and &= alpha_value;
  60. }
  61. alpha += alpha_stride;
  62. dst += dst_stride;
  63. }
  64. // Combine the eight alpha 'and' into a 8-bit mask.
  65. alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff));
  66. return (alpha_and != 0xff);
  67. }
  68. static void DispatchAlphaToGreen_SSE2(const uint8_t* WEBP_RESTRICT alpha,
  69. int alpha_stride, int width, int height,
  70. uint32_t* WEBP_RESTRICT dst,
  71. int dst_stride) {
  72. int i, j;
  73. const __m128i zero = _mm_setzero_si128();
  74. const int limit = width & ~15;
  75. for (j = 0; j < height; ++j) {
  76. for (i = 0; i < limit; i += 16) { // process 16 alpha bytes
  77. const __m128i a0 = _mm_loadu_si128((const __m128i*)&alpha[i]);
  78. const __m128i a1 = _mm_unpacklo_epi8(zero, a0); // note the 'zero' first!
  79. const __m128i b1 = _mm_unpackhi_epi8(zero, a0);
  80. const __m128i a2_lo = _mm_unpacklo_epi16(a1, zero);
  81. const __m128i b2_lo = _mm_unpacklo_epi16(b1, zero);
  82. const __m128i a2_hi = _mm_unpackhi_epi16(a1, zero);
  83. const __m128i b2_hi = _mm_unpackhi_epi16(b1, zero);
  84. _mm_storeu_si128((__m128i*)&dst[i + 0], a2_lo);
  85. _mm_storeu_si128((__m128i*)&dst[i + 4], a2_hi);
  86. _mm_storeu_si128((__m128i*)&dst[i + 8], b2_lo);
  87. _mm_storeu_si128((__m128i*)&dst[i + 12], b2_hi);
  88. }
  89. for (; i < width; ++i) dst[i] = alpha[i] << 8;
  90. alpha += alpha_stride;
  91. dst += dst_stride;
  92. }
  93. }
  94. static int ExtractAlpha_SSE2(const uint8_t* WEBP_RESTRICT argb, int argb_stride,
  95. int width, int height,
  96. uint8_t* WEBP_RESTRICT alpha, int alpha_stride) {
  97. // alpha_and stores an 'and' operation of all the alpha[] values. The final
  98. // value is not 0xff if any of the alpha[] is not equal to 0xff.
  99. uint32_t alpha_and = 0xff;
  100. int i, j;
  101. const __m128i a_mask = _mm_set1_epi32(0xffu); // to preserve alpha
  102. const __m128i all_0xff = _mm_set_epi32(0, 0, ~0u, ~0u);
  103. __m128i all_alphas = all_0xff;
  104. // We must be able to access 3 extra bytes after the last written byte
  105. // 'src[4 * width - 4]', because we don't know if alpha is the first or the
  106. // last byte of the quadruplet.
  107. const int limit = (width - 1) & ~7;
  108. for (j = 0; j < height; ++j) {
  109. const __m128i* src = (const __m128i*)argb;
  110. for (i = 0; i < limit; i += 8) {
  111. // load 32 argb bytes
  112. const __m128i a0 = _mm_loadu_si128(src + 0);
  113. const __m128i a1 = _mm_loadu_si128(src + 1);
  114. const __m128i b0 = _mm_and_si128(a0, a_mask);
  115. const __m128i b1 = _mm_and_si128(a1, a_mask);
  116. const __m128i c0 = _mm_packs_epi32(b0, b1);
  117. const __m128i d0 = _mm_packus_epi16(c0, c0);
  118. // store
  119. _mm_storel_epi64((__m128i*)&alpha[i], d0);
  120. // accumulate eight alpha 'and' in parallel
  121. all_alphas = _mm_and_si128(all_alphas, d0);
  122. src += 2;
  123. }
  124. for (; i < width; ++i) {
  125. const uint32_t alpha_value = argb[4 * i];
  126. alpha[i] = alpha_value;
  127. alpha_and &= alpha_value;
  128. }
  129. argb += argb_stride;
  130. alpha += alpha_stride;
  131. }
  132. // Combine the eight alpha 'and' into a 8-bit mask.
  133. alpha_and &= _mm_movemask_epi8(_mm_cmpeq_epi8(all_alphas, all_0xff));
  134. return (alpha_and == 0xff);
  135. }
  136. //------------------------------------------------------------------------------
  137. // Non-dither premultiplied modes
  138. #define MULTIPLIER(a) ((a) * 0x8081)
  139. #define PREMULTIPLY(x, m) (((x) * (m)) >> 23)
  140. // We can't use a 'const int' for the SHUFFLE value, because it has to be an
  141. // immediate in the _mm_shufflexx_epi16() instruction. We really need a macro.
  142. // We use: v / 255 = (v * 0x8081) >> 23, where v = alpha * {r,g,b} is a 16bit
  143. // value.
  144. #define APPLY_ALPHA(RGBX, SHUFFLE) do { \
  145. const __m128i argb0 = _mm_loadu_si128((const __m128i*)&(RGBX)); \
  146. const __m128i argb1_lo = _mm_unpacklo_epi8(argb0, zero); \
  147. const __m128i argb1_hi = _mm_unpackhi_epi8(argb0, zero); \
  148. const __m128i alpha0_lo = _mm_or_si128(argb1_lo, kMask); \
  149. const __m128i alpha0_hi = _mm_or_si128(argb1_hi, kMask); \
  150. const __m128i alpha1_lo = _mm_shufflelo_epi16(alpha0_lo, SHUFFLE); \
  151. const __m128i alpha1_hi = _mm_shufflelo_epi16(alpha0_hi, SHUFFLE); \
  152. const __m128i alpha2_lo = _mm_shufflehi_epi16(alpha1_lo, SHUFFLE); \
  153. const __m128i alpha2_hi = _mm_shufflehi_epi16(alpha1_hi, SHUFFLE); \
  154. /* alpha2 = [ff a0 a0 a0][ff a1 a1 a1] */ \
  155. const __m128i A0_lo = _mm_mullo_epi16(alpha2_lo, argb1_lo); \
  156. const __m128i A0_hi = _mm_mullo_epi16(alpha2_hi, argb1_hi); \
  157. const __m128i A1_lo = _mm_mulhi_epu16(A0_lo, kMult); \
  158. const __m128i A1_hi = _mm_mulhi_epu16(A0_hi, kMult); \
  159. const __m128i A2_lo = _mm_srli_epi16(A1_lo, 7); \
  160. const __m128i A2_hi = _mm_srli_epi16(A1_hi, 7); \
  161. const __m128i A3 = _mm_packus_epi16(A2_lo, A2_hi); \
  162. _mm_storeu_si128((__m128i*)&(RGBX), A3); \
  163. } while (0)
  164. static void ApplyAlphaMultiply_SSE2(uint8_t* rgba, int alpha_first,
  165. int w, int h, int stride) {
  166. const __m128i zero = _mm_setzero_si128();
  167. const __m128i kMult = _mm_set1_epi16(0x8081u);
  168. const __m128i kMask = _mm_set_epi16(0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0);
  169. const int kSpan = 4;
  170. while (h-- > 0) {
  171. uint32_t* const rgbx = (uint32_t*)rgba;
  172. int i;
  173. if (!alpha_first) {
  174. for (i = 0; i + kSpan <= w; i += kSpan) {
  175. APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(2, 3, 3, 3));
  176. }
  177. } else {
  178. for (i = 0; i + kSpan <= w; i += kSpan) {
  179. APPLY_ALPHA(rgbx[i], _MM_SHUFFLE(0, 0, 0, 1));
  180. }
  181. }
  182. // Finish with left-overs.
  183. for (; i < w; ++i) {
  184. uint8_t* const rgb = rgba + (alpha_first ? 1 : 0);
  185. const uint8_t* const alpha = rgba + (alpha_first ? 0 : 3);
  186. const uint32_t a = alpha[4 * i];
  187. if (a != 0xff) {
  188. const uint32_t mult = MULTIPLIER(a);
  189. rgb[4 * i + 0] = PREMULTIPLY(rgb[4 * i + 0], mult);
  190. rgb[4 * i + 1] = PREMULTIPLY(rgb[4 * i + 1], mult);
  191. rgb[4 * i + 2] = PREMULTIPLY(rgb[4 * i + 2], mult);
  192. }
  193. }
  194. rgba += stride;
  195. }
  196. }
  197. #undef MULTIPLIER
  198. #undef PREMULTIPLY
  199. //------------------------------------------------------------------------------
  200. // Alpha detection
  201. static int HasAlpha8b_SSE2(const uint8_t* src, int length) {
  202. const __m128i all_0xff = _mm_set1_epi8((char)0xff);
  203. int i = 0;
  204. for (; i + 16 <= length; i += 16) {
  205. const __m128i v = _mm_loadu_si128((const __m128i*)(src + i));
  206. const __m128i bits = _mm_cmpeq_epi8(v, all_0xff);
  207. const int mask = _mm_movemask_epi8(bits);
  208. if (mask != 0xffff) return 1;
  209. }
  210. for (; i < length; ++i) if (src[i] != 0xff) return 1;
  211. return 0;
  212. }
  213. static int HasAlpha32b_SSE2(const uint8_t* src, int length) {
  214. const __m128i alpha_mask = _mm_set1_epi32(0xff);
  215. const __m128i all_0xff = _mm_set1_epi8((char)0xff);
  216. int i = 0;
  217. // We don't know if we can access the last 3 bytes after the last alpha
  218. // value 'src[4 * length - 4]' (because we don't know if alpha is the first
  219. // or the last byte of the quadruplet). Hence the '-3' protection below.
  220. length = length * 4 - 3; // size in bytes
  221. for (; i + 64 <= length; i += 64) {
  222. const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0));
  223. const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16));
  224. const __m128i a2 = _mm_loadu_si128((const __m128i*)(src + i + 32));
  225. const __m128i a3 = _mm_loadu_si128((const __m128i*)(src + i + 48));
  226. const __m128i b0 = _mm_and_si128(a0, alpha_mask);
  227. const __m128i b1 = _mm_and_si128(a1, alpha_mask);
  228. const __m128i b2 = _mm_and_si128(a2, alpha_mask);
  229. const __m128i b3 = _mm_and_si128(a3, alpha_mask);
  230. const __m128i c0 = _mm_packs_epi32(b0, b1);
  231. const __m128i c1 = _mm_packs_epi32(b2, b3);
  232. const __m128i d = _mm_packus_epi16(c0, c1);
  233. const __m128i bits = _mm_cmpeq_epi8(d, all_0xff);
  234. const int mask = _mm_movemask_epi8(bits);
  235. if (mask != 0xffff) return 1;
  236. }
  237. for (; i + 32 <= length; i += 32) {
  238. const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0));
  239. const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 16));
  240. const __m128i b0 = _mm_and_si128(a0, alpha_mask);
  241. const __m128i b1 = _mm_and_si128(a1, alpha_mask);
  242. const __m128i c = _mm_packs_epi32(b0, b1);
  243. const __m128i d = _mm_packus_epi16(c, c);
  244. const __m128i bits = _mm_cmpeq_epi8(d, all_0xff);
  245. const int mask = _mm_movemask_epi8(bits);
  246. if (mask != 0xffff) return 1;
  247. }
  248. for (; i <= length; i += 4) if (src[i] != 0xff) return 1;
  249. return 0;
  250. }
  251. static void AlphaReplace_SSE2(uint32_t* src, int length, uint32_t color) {
  252. const __m128i m_color = _mm_set1_epi32(color);
  253. const __m128i zero = _mm_setzero_si128();
  254. int i = 0;
  255. for (; i + 8 <= length; i += 8) {
  256. const __m128i a0 = _mm_loadu_si128((const __m128i*)(src + i + 0));
  257. const __m128i a1 = _mm_loadu_si128((const __m128i*)(src + i + 4));
  258. const __m128i b0 = _mm_srai_epi32(a0, 24);
  259. const __m128i b1 = _mm_srai_epi32(a1, 24);
  260. const __m128i c0 = _mm_cmpeq_epi32(b0, zero);
  261. const __m128i c1 = _mm_cmpeq_epi32(b1, zero);
  262. const __m128i d0 = _mm_and_si128(c0, m_color);
  263. const __m128i d1 = _mm_and_si128(c1, m_color);
  264. const __m128i e0 = _mm_andnot_si128(c0, a0);
  265. const __m128i e1 = _mm_andnot_si128(c1, a1);
  266. _mm_storeu_si128((__m128i*)(src + i + 0), _mm_or_si128(d0, e0));
  267. _mm_storeu_si128((__m128i*)(src + i + 4), _mm_or_si128(d1, e1));
  268. }
  269. for (; i < length; ++i) if ((src[i] >> 24) == 0) src[i] = color;
  270. }
  271. // -----------------------------------------------------------------------------
  272. // Apply alpha value to rows
  273. static void MultARGBRow_SSE2(uint32_t* const ptr, int width, int inverse) {
  274. int x = 0;
  275. if (!inverse) {
  276. const int kSpan = 2;
  277. const __m128i zero = _mm_setzero_si128();
  278. const __m128i k128 = _mm_set1_epi16(128);
  279. const __m128i kMult = _mm_set1_epi16(0x0101);
  280. const __m128i kMask = _mm_set_epi16(0, 0xff, 0, 0, 0, 0xff, 0, 0);
  281. for (x = 0; x + kSpan <= width; x += kSpan) {
  282. // To compute 'result = (int)(a * x / 255. + .5)', we use:
  283. // tmp = a * v + 128, result = (tmp * 0x0101u) >> 16
  284. const __m128i A0 = _mm_loadl_epi64((const __m128i*)&ptr[x]);
  285. const __m128i A1 = _mm_unpacklo_epi8(A0, zero);
  286. const __m128i A2 = _mm_or_si128(A1, kMask);
  287. const __m128i A3 = _mm_shufflelo_epi16(A2, _MM_SHUFFLE(2, 3, 3, 3));
  288. const __m128i A4 = _mm_shufflehi_epi16(A3, _MM_SHUFFLE(2, 3, 3, 3));
  289. // here, A4 = [ff a0 a0 a0][ff a1 a1 a1]
  290. const __m128i A5 = _mm_mullo_epi16(A4, A1);
  291. const __m128i A6 = _mm_add_epi16(A5, k128);
  292. const __m128i A7 = _mm_mulhi_epu16(A6, kMult);
  293. const __m128i A10 = _mm_packus_epi16(A7, zero);
  294. _mm_storel_epi64((__m128i*)&ptr[x], A10);
  295. }
  296. }
  297. width -= x;
  298. if (width > 0) WebPMultARGBRow_C(ptr + x, width, inverse);
  299. }
  300. static void MultRow_SSE2(uint8_t* WEBP_RESTRICT const ptr,
  301. const uint8_t* WEBP_RESTRICT const alpha,
  302. int width, int inverse) {
  303. int x = 0;
  304. if (!inverse) {
  305. const __m128i zero = _mm_setzero_si128();
  306. const __m128i k128 = _mm_set1_epi16(128);
  307. const __m128i kMult = _mm_set1_epi16(0x0101);
  308. for (x = 0; x + 8 <= width; x += 8) {
  309. const __m128i v0 = _mm_loadl_epi64((__m128i*)&ptr[x]);
  310. const __m128i a0 = _mm_loadl_epi64((const __m128i*)&alpha[x]);
  311. const __m128i v1 = _mm_unpacklo_epi8(v0, zero);
  312. const __m128i a1 = _mm_unpacklo_epi8(a0, zero);
  313. const __m128i v2 = _mm_mullo_epi16(v1, a1);
  314. const __m128i v3 = _mm_add_epi16(v2, k128);
  315. const __m128i v4 = _mm_mulhi_epu16(v3, kMult);
  316. const __m128i v5 = _mm_packus_epi16(v4, zero);
  317. _mm_storel_epi64((__m128i*)&ptr[x], v5);
  318. }
  319. }
  320. width -= x;
  321. if (width > 0) WebPMultRow_C(ptr + x, alpha + x, width, inverse);
  322. }
  323. //------------------------------------------------------------------------------
  324. // Entry point
  325. extern void WebPInitAlphaProcessingSSE2(void);
  326. WEBP_TSAN_IGNORE_FUNCTION void WebPInitAlphaProcessingSSE2(void) {
  327. WebPMultARGBRow = MultARGBRow_SSE2;
  328. WebPMultRow = MultRow_SSE2;
  329. WebPApplyAlphaMultiply = ApplyAlphaMultiply_SSE2;
  330. WebPDispatchAlpha = DispatchAlpha_SSE2;
  331. WebPDispatchAlphaToGreen = DispatchAlphaToGreen_SSE2;
  332. WebPExtractAlpha = ExtractAlpha_SSE2;
  333. WebPHasAlpha8b = HasAlpha8b_SSE2;
  334. WebPHasAlpha32b = HasAlpha32b_SSE2;
  335. WebPAlphaReplace = AlphaReplace_SSE2;
  336. }
  337. #else // !WEBP_USE_SSE2
  338. WEBP_DSP_INIT_STUB(WebPInitAlphaProcessingSSE2)
  339. #endif // WEBP_USE_SSE2