indeo3.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137
  1. /*
  2. * Intel Indeo 3 (IV31, IV32, etc.) video decoder for ffmpeg
  3. * written, produced, and directed by Alan Smithee
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdio.h>
  22. #include <stdlib.h>
  23. #include <string.h>
  24. #include <unistd.h>
  25. #include "avcodec.h"
  26. #include "dsputil.h"
  27. #include "bytestream.h"
  28. #include "indeo3data.h"
  29. typedef struct
  30. {
  31. uint8_t *Ybuf;
  32. uint8_t *Ubuf;
  33. uint8_t *Vbuf;
  34. unsigned short y_w, y_h;
  35. unsigned short uv_w, uv_h;
  36. } YUVBufs;
  37. typedef struct Indeo3DecodeContext {
  38. AVCodecContext *avctx;
  39. int width, height;
  40. AVFrame frame;
  41. uint8_t *buf;
  42. YUVBufs iv_frame[2];
  43. YUVBufs *cur_frame;
  44. YUVBufs *ref_frame;
  45. uint8_t *ModPred;
  46. uint8_t *corrector_type;
  47. } Indeo3DecodeContext;
  48. static const uint8_t corrector_type_0[24] = {
  49. 195, 159, 133, 115, 101, 93, 87, 77,
  50. 195, 159, 133, 115, 101, 93, 87, 77,
  51. 128, 79, 79, 79, 79, 79, 79, 79
  52. };
  53. static const uint8_t corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
  54. static av_cold int build_modpred(Indeo3DecodeContext *s)
  55. {
  56. int i, j;
  57. if (!(s->ModPred = av_malloc(8 * 128)))
  58. return AVERROR(ENOMEM);
  59. for (i=0; i < 128; ++i) {
  60. s->ModPred[i+0*128] = i > 126 ? 254 : 2*(i + 1 - ((i + 1) % 2));
  61. s->ModPred[i+1*128] = i == 7 ? 20 :
  62. i == 119 ||
  63. i == 120 ? 236 : 2*(i + 2 - ((i + 1) % 3));
  64. s->ModPred[i+2*128] = i > 125 ? 248 : 2*(i + 2 - ((i + 2) % 4));
  65. s->ModPred[i+3*128] = 2*(i + 1 - ((i - 3) % 5));
  66. s->ModPred[i+4*128] = i == 8 ? 20 : 2*(i + 1 - ((i - 3) % 6));
  67. s->ModPred[i+5*128] = 2*(i + 4 - ((i + 3) % 7));
  68. s->ModPred[i+6*128] = i > 123 ? 240 : 2*(i + 4 - ((i + 4) % 8));
  69. s->ModPred[i+7*128] = 2*(i + 5 - ((i + 4) % 9));
  70. }
  71. if (!(s->corrector_type = av_malloc(24 * 256)))
  72. return AVERROR(ENOMEM);
  73. for (i=0; i < 24; ++i) {
  74. for (j=0; j < 256; ++j) {
  75. s->corrector_type[i*256+j] = j < corrector_type_0[i] ? 1 :
  76. j < 248 || (i == 16 && j == 248) ? 0 :
  77. corrector_type_2[j - 248];
  78. }
  79. }
  80. return 0;
  81. }
  82. static av_cold int iv_alloc_frames(Indeo3DecodeContext *s)
  83. {
  84. int luma_width = (s->width + 3) & ~3,
  85. luma_height = (s->height + 3) & ~3,
  86. chroma_width = ((luma_width >> 2) + 3) & ~3,
  87. chroma_height = ((luma_height >> 2) + 3) & ~3,
  88. luma_pixels = luma_width * luma_height,
  89. chroma_pixels = chroma_width * chroma_height,
  90. i;
  91. unsigned int bufsize = luma_pixels * 2 + luma_width * 3 +
  92. (chroma_pixels + chroma_width) * 4;
  93. if(!(s->buf = av_malloc(bufsize)))
  94. return AVERROR(ENOMEM);
  95. s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
  96. s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
  97. s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
  98. s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
  99. s->iv_frame[0].Ybuf = s->buf + luma_width;
  100. i = luma_pixels + luma_width * 2;
  101. s->iv_frame[1].Ybuf = s->buf + i;
  102. i += (luma_pixels + luma_width);
  103. s->iv_frame[0].Ubuf = s->buf + i;
  104. i += (chroma_pixels + chroma_width);
  105. s->iv_frame[1].Ubuf = s->buf + i;
  106. i += (chroma_pixels + chroma_width);
  107. s->iv_frame[0].Vbuf = s->buf + i;
  108. i += (chroma_pixels + chroma_width);
  109. s->iv_frame[1].Vbuf = s->buf + i;
  110. for(i = 1; i <= luma_width; i++)
  111. s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
  112. s->iv_frame[0].Ubuf[-i] = 0x80;
  113. for(i = 1; i <= chroma_width; i++) {
  114. s->iv_frame[1].Ubuf[-i] = 0x80;
  115. s->iv_frame[0].Vbuf[-i] = 0x80;
  116. s->iv_frame[1].Vbuf[-i] = 0x80;
  117. s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
  118. }
  119. return 0;
  120. }
  121. static av_cold void iv_free_func(Indeo3DecodeContext *s)
  122. {
  123. av_free(s->buf);
  124. av_free(s->ModPred);
  125. av_free(s->corrector_type);
  126. }
  127. struct ustr {
  128. long xpos;
  129. long ypos;
  130. long width;
  131. long height;
  132. long split_flag;
  133. long split_direction;
  134. long usl7;
  135. };
  136. #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
  137. if((lv1 & 0x80) != 0) { \
  138. if(rle_v3 != 0) \
  139. rle_v3 = 0; \
  140. else { \
  141. rle_v3 = 1; \
  142. buf1 -= 2; \
  143. } \
  144. } \
  145. lp2 = 4;
  146. #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
  147. if(rle_v3 == 0) { \
  148. rle_v2 = *buf1; \
  149. rle_v1 = 1; \
  150. if(rle_v2 > 32) { \
  151. rle_v2 -= 32; \
  152. rle_v1 = 0; \
  153. } \
  154. rle_v3 = 1; \
  155. } \
  156. buf1--;
  157. #define LP2_CHECK(buf1,rle_v3,lp2) \
  158. if(lp2 == 0 && rle_v3 != 0) \
  159. rle_v3 = 0; \
  160. else { \
  161. buf1--; \
  162. rle_v3 = 1; \
  163. }
  164. #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
  165. rle_v2--; \
  166. if(rle_v2 == 0) { \
  167. rle_v3 = 0; \
  168. buf1 += 2; \
  169. } \
  170. lp2 = 4;
  171. static void iv_Decode_Chunk(Indeo3DecodeContext *s,
  172. uint8_t *cur, uint8_t *ref, int width, int height,
  173. const uint8_t *buf1, long cb_offset, const uint8_t *hdr,
  174. const uint8_t *buf2, int min_width_160)
  175. {
  176. uint8_t bit_buf;
  177. unsigned long bit_pos, lv, lv1, lv2;
  178. long *width_tbl, width_tbl_arr[10];
  179. const signed char *ref_vectors;
  180. uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
  181. uint32_t *cur_lp, *ref_lp;
  182. const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
  183. uint8_t *correction_type_sp[2];
  184. struct ustr strip_tbl[20], *strip;
  185. int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
  186. rle_v1, rle_v2, rle_v3;
  187. unsigned short res;
  188. bit_buf = 0;
  189. ref_vectors = NULL;
  190. width_tbl = width_tbl_arr + 1;
  191. i = (width < 0 ? width + 3 : width)/4;
  192. for(j = -1; j < 8; j++)
  193. width_tbl[j] = i * j;
  194. strip = strip_tbl;
  195. for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
  196. strip->ypos = strip->xpos = 0;
  197. for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
  198. strip->height = height;
  199. strip->split_direction = 0;
  200. strip->split_flag = 0;
  201. strip->usl7 = 0;
  202. bit_pos = 0;
  203. rle_v1 = rle_v2 = rle_v3 = 0;
  204. while(strip >= strip_tbl) {
  205. if(bit_pos <= 0) {
  206. bit_pos = 8;
  207. bit_buf = *buf1++;
  208. }
  209. bit_pos -= 2;
  210. cmd = (bit_buf >> bit_pos) & 0x03;
  211. if(cmd == 0) {
  212. strip++;
  213. if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
  214. av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
  215. break;
  216. }
  217. memcpy(strip, strip-1, sizeof(*strip));
  218. strip->split_flag = 1;
  219. strip->split_direction = 0;
  220. strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
  221. continue;
  222. } else if(cmd == 1) {
  223. strip++;
  224. if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
  225. av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
  226. break;
  227. }
  228. memcpy(strip, strip-1, sizeof(*strip));
  229. strip->split_flag = 1;
  230. strip->split_direction = 1;
  231. strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
  232. continue;
  233. } else if(cmd == 2) {
  234. if(strip->usl7 == 0) {
  235. strip->usl7 = 1;
  236. ref_vectors = NULL;
  237. continue;
  238. }
  239. } else if(cmd == 3) {
  240. if(strip->usl7 == 0) {
  241. strip->usl7 = 1;
  242. ref_vectors = (const signed char*)buf2 + (*buf1 * 2);
  243. buf1++;
  244. continue;
  245. }
  246. }
  247. cur_frm_pos = cur + width * strip->ypos + strip->xpos;
  248. if((blks_width = strip->width) < 0)
  249. blks_width += 3;
  250. blks_width >>= 2;
  251. blks_height = strip->height;
  252. if(ref_vectors != NULL) {
  253. ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
  254. ref_vectors[1] + strip->xpos;
  255. } else
  256. ref_frm_pos = cur_frm_pos - width_tbl[4];
  257. if(cmd == 2) {
  258. if(bit_pos <= 0) {
  259. bit_pos = 8;
  260. bit_buf = *buf1++;
  261. }
  262. bit_pos -= 2;
  263. cmd = (bit_buf >> bit_pos) & 0x03;
  264. if(cmd == 0 || ref_vectors != NULL) {
  265. for(lp1 = 0; lp1 < blks_width; lp1++) {
  266. for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
  267. ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
  268. cur_frm_pos += 4;
  269. ref_frm_pos += 4;
  270. }
  271. } else if(cmd != 1)
  272. return;
  273. } else {
  274. k = *buf1 >> 4;
  275. j = *buf1 & 0x0f;
  276. buf1++;
  277. lv = j + cb_offset;
  278. if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
  279. cp2 = s->ModPred + ((lv - 8) << 7);
  280. cp = ref_frm_pos;
  281. for(i = 0; i < blks_width << 2; i++) {
  282. int v = *cp >> 1;
  283. *(cp++) = cp2[v];
  284. }
  285. }
  286. if(k == 1 || k == 4) {
  287. lv = (hdr[j] & 0xf) + cb_offset;
  288. correction_type_sp[0] = s->corrector_type + (lv << 8);
  289. correction_lp[0] = correction + (lv << 8);
  290. lv = (hdr[j] >> 4) + cb_offset;
  291. correction_lp[1] = correction + (lv << 8);
  292. correction_type_sp[1] = s->corrector_type + (lv << 8);
  293. } else {
  294. correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
  295. correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
  296. correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
  297. correction_lp[0] = correction_lp[1] = correction + (lv << 8);
  298. }
  299. switch(k) {
  300. case 1:
  301. case 0: /********** CASE 0 **********/
  302. for( ; blks_height > 0; blks_height -= 4) {
  303. for(lp1 = 0; lp1 < blks_width; lp1++) {
  304. for(lp2 = 0; lp2 < 4; ) {
  305. k = *buf1++;
  306. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
  307. ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
  308. switch(correction_type_sp[0][k]) {
  309. case 0:
  310. *cur_lp = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  311. lp2++;
  312. break;
  313. case 1:
  314. res = ((le2me_16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
  315. ((unsigned short *)cur_lp)[0] = le2me_16(res);
  316. res = ((le2me_16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
  317. ((unsigned short *)cur_lp)[1] = le2me_16(res);
  318. buf1++;
  319. lp2++;
  320. break;
  321. case 2:
  322. if(lp2 == 0) {
  323. for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
  324. cur_lp[j] = ref_lp[j];
  325. lp2 += 2;
  326. }
  327. break;
  328. case 3:
  329. if(lp2 < 2) {
  330. for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
  331. cur_lp[j] = ref_lp[j];
  332. lp2 = 3;
  333. }
  334. break;
  335. case 8:
  336. if(lp2 == 0) {
  337. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  338. if(rle_v1 == 1 || ref_vectors != NULL) {
  339. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  340. cur_lp[j] = ref_lp[j];
  341. }
  342. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  343. break;
  344. } else {
  345. rle_v1 = 1;
  346. rle_v2 = *buf1 - 1;
  347. }
  348. case 5:
  349. LP2_CHECK(buf1,rle_v3,lp2)
  350. case 4:
  351. for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
  352. cur_lp[j] = ref_lp[j];
  353. lp2 = 4;
  354. break;
  355. case 7:
  356. if(rle_v3 != 0)
  357. rle_v3 = 0;
  358. else {
  359. buf1--;
  360. rle_v3 = 1;
  361. }
  362. case 6:
  363. if(ref_vectors != NULL) {
  364. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  365. cur_lp[j] = ref_lp[j];
  366. }
  367. lp2 = 4;
  368. break;
  369. case 9:
  370. lv1 = *buf1++;
  371. lv = (lv1 & 0x7F) << 1;
  372. lv += (lv << 8);
  373. lv += (lv << 16);
  374. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  375. cur_lp[j] = lv;
  376. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  377. break;
  378. default:
  379. return;
  380. }
  381. }
  382. cur_frm_pos += 4;
  383. ref_frm_pos += 4;
  384. }
  385. cur_frm_pos += ((width - blks_width) * 4);
  386. ref_frm_pos += ((width - blks_width) * 4);
  387. }
  388. break;
  389. case 4:
  390. case 3: /********** CASE 3 **********/
  391. if(ref_vectors != NULL)
  392. return;
  393. flag1 = 1;
  394. for( ; blks_height > 0; blks_height -= 8) {
  395. for(lp1 = 0; lp1 < blks_width; lp1++) {
  396. for(lp2 = 0; lp2 < 4; ) {
  397. k = *buf1++;
  398. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  399. ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
  400. switch(correction_type_sp[lp2 & 0x01][k]) {
  401. case 0:
  402. cur_lp[width_tbl[1]] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  403. if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
  404. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  405. else
  406. cur_lp[0] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  407. lp2++;
  408. break;
  409. case 1:
  410. res = ((le2me_16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
  411. ((unsigned short *)cur_lp)[width_tbl[2]] = le2me_16(res);
  412. res = ((le2me_16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
  413. ((unsigned short *)cur_lp)[width_tbl[2]+1] = le2me_16(res);
  414. if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
  415. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  416. else
  417. cur_lp[0] = cur_lp[width_tbl[1]];
  418. buf1++;
  419. lp2++;
  420. break;
  421. case 2:
  422. if(lp2 == 0) {
  423. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  424. cur_lp[j] = *ref_lp;
  425. lp2 += 2;
  426. }
  427. break;
  428. case 3:
  429. if(lp2 < 2) {
  430. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
  431. cur_lp[j] = *ref_lp;
  432. lp2 = 3;
  433. }
  434. break;
  435. case 6:
  436. lp2 = 4;
  437. break;
  438. case 7:
  439. if(rle_v3 != 0)
  440. rle_v3 = 0;
  441. else {
  442. buf1--;
  443. rle_v3 = 1;
  444. }
  445. lp2 = 4;
  446. break;
  447. case 8:
  448. if(lp2 == 0) {
  449. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  450. if(rle_v1 == 1) {
  451. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  452. cur_lp[j] = ref_lp[j];
  453. }
  454. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  455. break;
  456. } else {
  457. rle_v2 = (*buf1) - 1;
  458. rle_v1 = 1;
  459. }
  460. case 5:
  461. LP2_CHECK(buf1,rle_v3,lp2)
  462. case 4:
  463. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
  464. cur_lp[j] = *ref_lp;
  465. lp2 = 4;
  466. break;
  467. case 9:
  468. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  469. lv1 = *buf1++;
  470. lv = (lv1 & 0x7F) << 1;
  471. lv += (lv << 8);
  472. lv += (lv << 16);
  473. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  474. cur_lp[j] = lv;
  475. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  476. break;
  477. default:
  478. return;
  479. }
  480. }
  481. cur_frm_pos += 4;
  482. }
  483. cur_frm_pos += (((width * 2) - blks_width) * 4);
  484. flag1 = 0;
  485. }
  486. break;
  487. case 10: /********** CASE 10 **********/
  488. if(ref_vectors == NULL) {
  489. flag1 = 1;
  490. for( ; blks_height > 0; blks_height -= 8) {
  491. for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
  492. for(lp2 = 0; lp2 < 4; ) {
  493. k = *buf1++;
  494. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  495. ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
  496. lv1 = ref_lp[0];
  497. lv2 = ref_lp[1];
  498. if(lp2 == 0 && flag1 != 0) {
  499. #ifdef WORDS_BIGENDIAN
  500. lv1 = lv1 & 0xFF00FF00;
  501. lv1 = (lv1 >> 8) | lv1;
  502. lv2 = lv2 & 0xFF00FF00;
  503. lv2 = (lv2 >> 8) | lv2;
  504. #else
  505. lv1 = lv1 & 0x00FF00FF;
  506. lv1 = (lv1 << 8) | lv1;
  507. lv2 = lv2 & 0x00FF00FF;
  508. lv2 = (lv2 << 8) | lv2;
  509. #endif
  510. }
  511. switch(correction_type_sp[lp2 & 0x01][k]) {
  512. case 0:
  513. cur_lp[width_tbl[1]] = le2me_32(((le2me_32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
  514. cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1);
  515. if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
  516. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  517. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  518. } else {
  519. cur_lp[0] = cur_lp[width_tbl[1]];
  520. cur_lp[1] = cur_lp[width_tbl[1]+1];
  521. }
  522. lp2++;
  523. break;
  524. case 1:
  525. cur_lp[width_tbl[1]] = le2me_32(((le2me_32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1);
  526. cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
  527. if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
  528. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  529. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  530. } else {
  531. cur_lp[0] = cur_lp[width_tbl[1]];
  532. cur_lp[1] = cur_lp[width_tbl[1]+1];
  533. }
  534. buf1++;
  535. lp2++;
  536. break;
  537. case 2:
  538. if(lp2 == 0) {
  539. if(flag1 != 0) {
  540. for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
  541. cur_lp[j] = lv1;
  542. cur_lp[j+1] = lv2;
  543. }
  544. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  545. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  546. } else {
  547. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
  548. cur_lp[j] = lv1;
  549. cur_lp[j+1] = lv2;
  550. }
  551. }
  552. lp2 += 2;
  553. }
  554. break;
  555. case 3:
  556. if(lp2 < 2) {
  557. if(lp2 == 0 && flag1 != 0) {
  558. for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
  559. cur_lp[j] = lv1;
  560. cur_lp[j+1] = lv2;
  561. }
  562. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  563. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  564. } else {
  565. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
  566. cur_lp[j] = lv1;
  567. cur_lp[j+1] = lv2;
  568. }
  569. }
  570. lp2 = 3;
  571. }
  572. break;
  573. case 8:
  574. if(lp2 == 0) {
  575. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  576. if(rle_v1 == 1) {
  577. if(flag1 != 0) {
  578. for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
  579. cur_lp[j] = lv1;
  580. cur_lp[j+1] = lv2;
  581. }
  582. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  583. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  584. } else {
  585. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
  586. cur_lp[j] = lv1;
  587. cur_lp[j+1] = lv2;
  588. }
  589. }
  590. }
  591. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  592. break;
  593. } else {
  594. rle_v1 = 1;
  595. rle_v2 = (*buf1) - 1;
  596. }
  597. case 5:
  598. LP2_CHECK(buf1,rle_v3,lp2)
  599. case 4:
  600. if(lp2 == 0 && flag1 != 0) {
  601. for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
  602. cur_lp[j] = lv1;
  603. cur_lp[j+1] = lv2;
  604. }
  605. cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
  606. cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
  607. } else {
  608. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
  609. cur_lp[j] = lv1;
  610. cur_lp[j+1] = lv2;
  611. }
  612. }
  613. lp2 = 4;
  614. break;
  615. case 6:
  616. lp2 = 4;
  617. break;
  618. case 7:
  619. if(lp2 == 0) {
  620. if(rle_v3 != 0)
  621. rle_v3 = 0;
  622. else {
  623. buf1--;
  624. rle_v3 = 1;
  625. }
  626. lp2 = 4;
  627. }
  628. break;
  629. case 9:
  630. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  631. lv1 = *buf1;
  632. lv = (lv1 & 0x7F) << 1;
  633. lv += (lv << 8);
  634. lv += (lv << 16);
  635. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  636. cur_lp[j] = lv;
  637. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  638. break;
  639. default:
  640. return;
  641. }
  642. }
  643. cur_frm_pos += 8;
  644. }
  645. cur_frm_pos += (((width * 2) - blks_width) * 4);
  646. flag1 = 0;
  647. }
  648. } else {
  649. for( ; blks_height > 0; blks_height -= 8) {
  650. for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
  651. for(lp2 = 0; lp2 < 4; ) {
  652. k = *buf1++;
  653. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  654. ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
  655. switch(correction_type_sp[lp2 & 0x01][k]) {
  656. case 0:
  657. lv1 = correctionloworder_lp[lp2 & 0x01][k];
  658. lv2 = correctionhighorder_lp[lp2 & 0x01][k];
  659. cur_lp[0] = le2me_32(((le2me_32(ref_lp[0]) >> 1) + lv1) << 1);
  660. cur_lp[1] = le2me_32(((le2me_32(ref_lp[1]) >> 1) + lv2) << 1);
  661. cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
  662. cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
  663. lp2++;
  664. break;
  665. case 1:
  666. lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
  667. lv2 = correctionloworder_lp[lp2 & 0x01][k];
  668. cur_lp[0] = le2me_32(((le2me_32(ref_lp[0]) >> 1) + lv1) << 1);
  669. cur_lp[1] = le2me_32(((le2me_32(ref_lp[1]) >> 1) + lv2) << 1);
  670. cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
  671. cur_lp[width_tbl[1]+1] = le2me_32(((le2me_32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
  672. lp2++;
  673. break;
  674. case 2:
  675. if(lp2 == 0) {
  676. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
  677. cur_lp[j] = ref_lp[j];
  678. cur_lp[j+1] = ref_lp[j+1];
  679. }
  680. lp2 += 2;
  681. }
  682. break;
  683. case 3:
  684. if(lp2 < 2) {
  685. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
  686. cur_lp[j] = ref_lp[j];
  687. cur_lp[j+1] = ref_lp[j+1];
  688. }
  689. lp2 = 3;
  690. }
  691. break;
  692. case 8:
  693. if(lp2 == 0) {
  694. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  695. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
  696. ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
  697. ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
  698. }
  699. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  700. break;
  701. } else {
  702. rle_v1 = 1;
  703. rle_v2 = (*buf1) - 1;
  704. }
  705. case 5:
  706. case 7:
  707. LP2_CHECK(buf1,rle_v3,lp2)
  708. case 6:
  709. case 4:
  710. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
  711. cur_lp[j] = ref_lp[j];
  712. cur_lp[j+1] = ref_lp[j+1];
  713. }
  714. lp2 = 4;
  715. break;
  716. case 9:
  717. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  718. lv1 = *buf1;
  719. lv = (lv1 & 0x7F) << 1;
  720. lv += (lv << 8);
  721. lv += (lv << 16);
  722. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  723. ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
  724. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  725. break;
  726. default:
  727. return;
  728. }
  729. }
  730. cur_frm_pos += 8;
  731. ref_frm_pos += 8;
  732. }
  733. cur_frm_pos += (((width * 2) - blks_width) * 4);
  734. ref_frm_pos += (((width * 2) - blks_width) * 4);
  735. }
  736. }
  737. break;
  738. case 11: /********** CASE 11 **********/
  739. if(ref_vectors == NULL)
  740. return;
  741. for( ; blks_height > 0; blks_height -= 8) {
  742. for(lp1 = 0; lp1 < blks_width; lp1++) {
  743. for(lp2 = 0; lp2 < 4; ) {
  744. k = *buf1++;
  745. cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
  746. ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
  747. switch(correction_type_sp[lp2 & 0x01][k]) {
  748. case 0:
  749. cur_lp[0] = le2me_32(((le2me_32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  750. cur_lp[width_tbl[1]] = le2me_32(((le2me_32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
  751. lp2++;
  752. break;
  753. case 1:
  754. lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
  755. lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
  756. res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1);
  757. ((unsigned short *)cur_lp)[0] = le2me_16(res);
  758. res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1);
  759. ((unsigned short *)cur_lp)[1] = le2me_16(res);
  760. res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1);
  761. ((unsigned short *)cur_lp)[width_tbl[2]] = le2me_16(res);
  762. res = (unsigned short)(((le2me_16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1);
  763. ((unsigned short *)cur_lp)[width_tbl[2]+1] = le2me_16(res);
  764. lp2++;
  765. break;
  766. case 2:
  767. if(lp2 == 0) {
  768. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  769. cur_lp[j] = ref_lp[j];
  770. lp2 += 2;
  771. }
  772. break;
  773. case 3:
  774. if(lp2 < 2) {
  775. for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
  776. cur_lp[j] = ref_lp[j];
  777. lp2 = 3;
  778. }
  779. break;
  780. case 8:
  781. if(lp2 == 0) {
  782. RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
  783. for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
  784. cur_lp[j] = ref_lp[j];
  785. RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
  786. break;
  787. } else {
  788. rle_v1 = 1;
  789. rle_v2 = (*buf1) - 1;
  790. }
  791. case 5:
  792. case 7:
  793. LP2_CHECK(buf1,rle_v3,lp2)
  794. case 4:
  795. case 6:
  796. for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
  797. cur_lp[j] = ref_lp[j];
  798. lp2 = 4;
  799. break;
  800. case 9:
  801. av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
  802. lv1 = *buf1++;
  803. lv = (lv1 & 0x7F) << 1;
  804. lv += (lv << 8);
  805. lv += (lv << 16);
  806. for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
  807. cur_lp[j] = lv;
  808. LV1_CHECK(buf1,rle_v3,lv1,lp2)
  809. break;
  810. default:
  811. return;
  812. }
  813. }
  814. cur_frm_pos += 4;
  815. ref_frm_pos += 4;
  816. }
  817. cur_frm_pos += (((width * 2) - blks_width) * 4);
  818. ref_frm_pos += (((width * 2) - blks_width) * 4);
  819. }
  820. break;
  821. default:
  822. return;
  823. }
  824. }
  825. for( ; strip >= strip_tbl; strip--) {
  826. if(strip->split_flag != 0) {
  827. strip->split_flag = 0;
  828. strip->usl7 = (strip-1)->usl7;
  829. if(strip->split_direction) {
  830. strip->xpos += strip->width;
  831. strip->width = (strip-1)->width - strip->width;
  832. if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
  833. strip->width = width - strip->xpos;
  834. } else {
  835. strip->ypos += strip->height;
  836. strip->height = (strip-1)->height - strip->height;
  837. }
  838. break;
  839. }
  840. }
  841. }
  842. }
  843. static av_cold int indeo3_decode_init(AVCodecContext *avctx)
  844. {
  845. Indeo3DecodeContext *s = avctx->priv_data;
  846. int ret = 0;
  847. s->avctx = avctx;
  848. s->width = avctx->width;
  849. s->height = avctx->height;
  850. avctx->pix_fmt = PIX_FMT_YUV410P;
  851. if (!(ret = build_modpred(s)))
  852. ret = iv_alloc_frames(s);
  853. if (ret)
  854. iv_free_func(s);
  855. return ret;
  856. }
  857. static int iv_decode_frame(Indeo3DecodeContext *s,
  858. const uint8_t *buf, int buf_size)
  859. {
  860. unsigned int image_width, image_height,
  861. chroma_width, chroma_height;
  862. unsigned long flags, cb_offset, data_size,
  863. y_offset, v_offset, u_offset, mc_vector_count;
  864. const uint8_t *hdr_pos, *buf_pos;
  865. buf_pos = buf;
  866. buf_pos += 18; /* skip OS header (16 bytes) and version number */
  867. flags = bytestream_get_le16(&buf_pos);
  868. data_size = bytestream_get_le32(&buf_pos);
  869. cb_offset = *buf_pos++;
  870. buf_pos += 3; /* skip reserved byte and checksum */
  871. image_height = bytestream_get_le16(&buf_pos);
  872. image_width = bytestream_get_le16(&buf_pos);
  873. if(avcodec_check_dimensions(NULL, image_width, image_height))
  874. return -1;
  875. chroma_height = ((image_height >> 2) + 3) & 0x7ffc;
  876. chroma_width = ((image_width >> 2) + 3) & 0x7ffc;
  877. y_offset = bytestream_get_le32(&buf_pos);
  878. v_offset = bytestream_get_le32(&buf_pos);
  879. u_offset = bytestream_get_le32(&buf_pos);
  880. buf_pos += 4; /* reserved */
  881. hdr_pos = buf_pos;
  882. if(data_size == 0x80) return 4;
  883. if(FFMAX3(y_offset, v_offset, u_offset) >= buf_size-16) {
  884. av_log(s->avctx, AV_LOG_ERROR, "y/u/v offset outside buffer\n");
  885. return -1;
  886. }
  887. if(flags & 0x200) {
  888. s->cur_frame = s->iv_frame + 1;
  889. s->ref_frame = s->iv_frame;
  890. } else {
  891. s->cur_frame = s->iv_frame;
  892. s->ref_frame = s->iv_frame + 1;
  893. }
  894. buf_pos = buf + 16 + y_offset;
  895. mc_vector_count = bytestream_get_le32(&buf_pos);
  896. if(2LL*mc_vector_count >= buf_size-16-y_offset) {
  897. av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
  898. return -1;
  899. }
  900. iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, image_width,
  901. image_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
  902. FFMIN(image_width, 160));
  903. if (!(s->avctx->flags & CODEC_FLAG_GRAY))
  904. {
  905. buf_pos = buf + 16 + v_offset;
  906. mc_vector_count = bytestream_get_le32(&buf_pos);
  907. if(2LL*mc_vector_count >= buf_size-16-v_offset) {
  908. av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
  909. return -1;
  910. }
  911. iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
  912. chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
  913. FFMIN(chroma_width, 40));
  914. buf_pos = buf + 16 + u_offset;
  915. mc_vector_count = bytestream_get_le32(&buf_pos);
  916. if(2LL*mc_vector_count >= buf_size-16-u_offset) {
  917. av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
  918. return -1;
  919. }
  920. iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
  921. chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
  922. FFMIN(chroma_width, 40));
  923. }
  924. return 8;
  925. }
  926. static int indeo3_decode_frame(AVCodecContext *avctx,
  927. void *data, int *data_size,
  928. const uint8_t *buf, int buf_size)
  929. {
  930. Indeo3DecodeContext *s=avctx->priv_data;
  931. uint8_t *src, *dest;
  932. int y;
  933. if (iv_decode_frame(s, buf, buf_size) < 0)
  934. return -1;
  935. if(s->frame.data[0])
  936. avctx->release_buffer(avctx, &s->frame);
  937. s->frame.reference = 0;
  938. if(avctx->get_buffer(avctx, &s->frame) < 0) {
  939. av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  940. return -1;
  941. }
  942. src = s->cur_frame->Ybuf;
  943. dest = s->frame.data[0];
  944. for (y = 0; y < s->height; y++) {
  945. memcpy(dest, src, s->cur_frame->y_w);
  946. src += s->cur_frame->y_w;
  947. dest += s->frame.linesize[0];
  948. }
  949. if (!(s->avctx->flags & CODEC_FLAG_GRAY))
  950. {
  951. src = s->cur_frame->Ubuf;
  952. dest = s->frame.data[1];
  953. for (y = 0; y < s->height / 4; y++) {
  954. memcpy(dest, src, s->cur_frame->uv_w);
  955. src += s->cur_frame->uv_w;
  956. dest += s->frame.linesize[1];
  957. }
  958. src = s->cur_frame->Vbuf;
  959. dest = s->frame.data[2];
  960. for (y = 0; y < s->height / 4; y++) {
  961. memcpy(dest, src, s->cur_frame->uv_w);
  962. src += s->cur_frame->uv_w;
  963. dest += s->frame.linesize[2];
  964. }
  965. }
  966. *data_size=sizeof(AVFrame);
  967. *(AVFrame*)data= s->frame;
  968. return buf_size;
  969. }
  970. static av_cold int indeo3_decode_end(AVCodecContext *avctx)
  971. {
  972. Indeo3DecodeContext *s = avctx->priv_data;
  973. iv_free_func(s);
  974. return 0;
  975. }
  976. AVCodec indeo3_decoder = {
  977. "indeo3",
  978. CODEC_TYPE_VIDEO,
  979. CODEC_ID_INDEO3,
  980. sizeof(Indeo3DecodeContext),
  981. indeo3_decode_init,
  982. NULL,
  983. indeo3_decode_end,
  984. indeo3_decode_frame,
  985. 0,
  986. NULL,
  987. .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),
  988. };