proresenc_anatoliy.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. /*
  2. * Apple ProRes encoder
  3. *
  4. * Copyright (c) 2011 Anatoliy Wasserman
  5. * Copyright (c) 2012 Konstantin Shishkov
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * Apple ProRes encoder (Anatoliy Wasserman version)
  26. * Known FOURCCs: 'ap4h' (444), 'apch' (HQ), 'apcn' (422), 'apcs' (LT), 'acpo' (Proxy)
  27. */
  28. #include "libavutil/mem.h"
  29. #include "libavutil/mem_internal.h"
  30. #include "libavutil/opt.h"
  31. #include "avcodec.h"
  32. #include "codec_internal.h"
  33. #include "encode.h"
  34. #include "profiles.h"
  35. #include "proresdata.h"
  36. #include "put_bits.h"
  37. #include "bytestream.h"
  38. #include "fdctdsp.h"
  39. #define DEFAULT_SLICE_MB_WIDTH 8
  40. static const AVProfile profiles[] = {
  41. { AV_PROFILE_PRORES_PROXY, "apco"},
  42. { AV_PROFILE_PRORES_LT, "apcs"},
  43. { AV_PROFILE_PRORES_STANDARD, "apcn"},
  44. { AV_PROFILE_PRORES_HQ, "apch"},
  45. { AV_PROFILE_PRORES_4444, "ap4h"},
  46. { AV_PROFILE_PRORES_XQ, "ap4x"},
  47. { AV_PROFILE_UNKNOWN }
  48. };
  49. static const int qp_start_table[] = { 8, 3, 2, 1, 1, 1};
  50. static const int qp_end_table[] = { 13, 9, 6, 6, 5, 4};
  51. static const int bitrate_table[] = { 1000, 2100, 3500, 5400, 7000, 10000};
  52. static const int valid_primaries[] = { AVCOL_PRI_RESERVED0, AVCOL_PRI_BT709, AVCOL_PRI_UNSPECIFIED, AVCOL_PRI_BT470BG,
  53. AVCOL_PRI_SMPTE170M, AVCOL_PRI_BT2020, AVCOL_PRI_SMPTE431, AVCOL_PRI_SMPTE432, INT_MAX };
  54. static const int valid_trc[] = { AVCOL_TRC_RESERVED0, AVCOL_TRC_BT709, AVCOL_TRC_UNSPECIFIED, AVCOL_TRC_SMPTE2084,
  55. AVCOL_TRC_ARIB_STD_B67, INT_MAX };
  56. static const int valid_colorspace[] = { AVCOL_SPC_BT709, AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_SMPTE170M,
  57. AVCOL_SPC_BT2020_NCL, INT_MAX };
  58. static const uint8_t QMAT_LUMA[6][64] = {
  59. {
  60. 4, 7, 9, 11, 13, 14, 15, 63,
  61. 7, 7, 11, 12, 14, 15, 63, 63,
  62. 9, 11, 13, 14, 15, 63, 63, 63,
  63. 11, 11, 13, 14, 63, 63, 63, 63,
  64. 11, 13, 14, 63, 63, 63, 63, 63,
  65. 13, 14, 63, 63, 63, 63, 63, 63,
  66. 13, 63, 63, 63, 63, 63, 63, 63,
  67. 63, 63, 63, 63, 63, 63, 63, 63
  68. }, {
  69. 4, 5, 6, 7, 9, 11, 13, 15,
  70. 5, 5, 7, 8, 11, 13, 15, 17,
  71. 6, 7, 9, 11, 13, 15, 15, 17,
  72. 7, 7, 9, 11, 13, 15, 17, 19,
  73. 7, 9, 11, 13, 14, 16, 19, 23,
  74. 9, 11, 13, 14, 16, 19, 23, 29,
  75. 9, 11, 13, 15, 17, 21, 28, 35,
  76. 11, 13, 16, 17, 21, 28, 35, 41
  77. }, {
  78. 4, 4, 5, 5, 6, 7, 7, 9,
  79. 4, 4, 5, 6, 7, 7, 9, 9,
  80. 5, 5, 6, 7, 7, 9, 9, 10,
  81. 5, 5, 6, 7, 7, 9, 9, 10,
  82. 5, 6, 7, 7, 8, 9, 10, 12,
  83. 6, 7, 7, 8, 9, 10, 12, 15,
  84. 6, 7, 7, 9, 10, 11, 14, 17,
  85. 7, 7, 9, 10, 11, 14, 17, 21
  86. }, {
  87. 4, 4, 4, 4, 4, 4, 4, 4,
  88. 4, 4, 4, 4, 4, 4, 4, 4,
  89. 4, 4, 4, 4, 4, 4, 4, 4,
  90. 4, 4, 4, 4, 4, 4, 4, 5,
  91. 4, 4, 4, 4, 4, 4, 5, 5,
  92. 4, 4, 4, 4, 4, 5, 5, 6,
  93. 4, 4, 4, 4, 5, 5, 6, 7,
  94. 4, 4, 4, 4, 5, 6, 7, 7
  95. }, { /* 444 */
  96. 4, 4, 4, 4, 4, 4, 4, 4,
  97. 4, 4, 4, 4, 4, 4, 4, 4,
  98. 4, 4, 4, 4, 4, 4, 4, 4,
  99. 4, 4, 4, 4, 4, 4, 4, 5,
  100. 4, 4, 4, 4, 4, 4, 5, 5,
  101. 4, 4, 4, 4, 4, 5, 5, 6,
  102. 4, 4, 4, 4, 5, 5, 6, 7,
  103. 4, 4, 4, 4, 5, 6, 7, 7
  104. }, { /* 444 XQ */
  105. 2, 2, 2, 2, 2, 2, 2, 2,
  106. 2, 2, 2, 2, 2, 2, 2, 2,
  107. 2, 2, 2, 2, 2, 2, 2, 2,
  108. 2, 2, 2, 2, 2, 2, 2, 3,
  109. 2, 2, 2, 2, 2, 2, 3, 3,
  110. 2, 2, 2, 2, 2, 3, 3, 3,
  111. 2, 2, 2, 2, 3, 3, 3, 4,
  112. 2, 2, 2, 2, 3, 3, 4, 4,
  113. }
  114. };
  115. static const uint8_t QMAT_CHROMA[6][64] = {
  116. {
  117. 4, 7, 9, 11, 13, 14, 63, 63,
  118. 7, 7, 11, 12, 14, 63, 63, 63,
  119. 9, 11, 13, 14, 63, 63, 63, 63,
  120. 11, 11, 13, 14, 63, 63, 63, 63,
  121. 11, 13, 14, 63, 63, 63, 63, 63,
  122. 13, 14, 63, 63, 63, 63, 63, 63,
  123. 13, 63, 63, 63, 63, 63, 63, 63,
  124. 63, 63, 63, 63, 63, 63, 63, 63
  125. }, {
  126. 4, 5, 6, 7, 9, 11, 13, 15,
  127. 5, 5, 7, 8, 11, 13, 15, 17,
  128. 6, 7, 9, 11, 13, 15, 15, 17,
  129. 7, 7, 9, 11, 13, 15, 17, 19,
  130. 7, 9, 11, 13, 14, 16, 19, 23,
  131. 9, 11, 13, 14, 16, 19, 23, 29,
  132. 9, 11, 13, 15, 17, 21, 28, 35,
  133. 11, 13, 16, 17, 21, 28, 35, 41
  134. }, {
  135. 4, 4, 5, 5, 6, 7, 7, 9,
  136. 4, 4, 5, 6, 7, 7, 9, 9,
  137. 5, 5, 6, 7, 7, 9, 9, 10,
  138. 5, 5, 6, 7, 7, 9, 9, 10,
  139. 5, 6, 7, 7, 8, 9, 10, 12,
  140. 6, 7, 7, 8, 9, 10, 12, 15,
  141. 6, 7, 7, 9, 10, 11, 14, 17,
  142. 7, 7, 9, 10, 11, 14, 17, 21
  143. }, {
  144. 4, 4, 4, 4, 4, 4, 4, 4,
  145. 4, 4, 4, 4, 4, 4, 4, 4,
  146. 4, 4, 4, 4, 4, 4, 4, 4,
  147. 4, 4, 4, 4, 4, 4, 4, 5,
  148. 4, 4, 4, 4, 4, 4, 5, 5,
  149. 4, 4, 4, 4, 4, 5, 5, 6,
  150. 4, 4, 4, 4, 5, 5, 6, 7,
  151. 4, 4, 4, 4, 5, 6, 7, 7
  152. }, { /* 444 */
  153. 4, 4, 4, 4, 4, 4, 4, 4,
  154. 4, 4, 4, 4, 4, 4, 4, 4,
  155. 4, 4, 4, 4, 4, 4, 4, 4,
  156. 4, 4, 4, 4, 4, 4, 4, 5,
  157. 4, 4, 4, 4, 4, 4, 5, 5,
  158. 4, 4, 4, 4, 4, 5, 5, 6,
  159. 4, 4, 4, 4, 5, 5, 6, 7,
  160. 4, 4, 4, 4, 5, 6, 7, 7
  161. }, { /* 444 xq */
  162. 4, 4, 4, 4, 4, 4, 4, 4,
  163. 4, 4, 4, 4, 4, 4, 4, 4,
  164. 4, 4, 4, 4, 4, 4, 4, 4,
  165. 4, 4, 4, 4, 4, 4, 4, 5,
  166. 4, 4, 4, 4, 4, 4, 5, 5,
  167. 4, 4, 4, 4, 4, 5, 5, 6,
  168. 4, 4, 4, 4, 5, 5, 6, 7,
  169. 4, 4, 4, 4, 5, 6, 7, 7
  170. }
  171. };
  172. typedef struct {
  173. AVClass *class;
  174. FDCTDSPContext fdsp;
  175. uint8_t* fill_y;
  176. uint8_t* fill_u;
  177. uint8_t* fill_v;
  178. uint8_t* fill_a;
  179. int qmat_luma[16][64];
  180. int qmat_chroma[16][64];
  181. const uint8_t *scantable;
  182. int is_422;
  183. int need_alpha;
  184. int is_interlaced;
  185. char *vendor;
  186. } ProresContext;
  187. /**
  188. * Check if a value is in the list. If not, return the default value
  189. *
  190. * @param ctx Context for the log msg
  191. * @param val_name Name of the checked value, for log msg
  192. * @param array_valid_values Array of valid int, ended with INT_MAX
  193. * @param default_value Value return if checked value is not in the array
  194. * @return Value or default_value.
  195. */
  196. static int int_from_list_or_default(void *ctx, const char *val_name, int val,
  197. const int *array_valid_values, int default_value)
  198. {
  199. int i = 0;
  200. while (1) {
  201. int ref_val = array_valid_values[i];
  202. if (ref_val == INT_MAX)
  203. break;
  204. if (val == ref_val)
  205. return val;
  206. i++;
  207. }
  208. /* val is not a valid value */
  209. av_log(ctx, AV_LOG_DEBUG,
  210. "%s %d are not supported. Set to default value : %d\n",
  211. val_name, val, default_value);
  212. return default_value;
  213. }
  214. static void encode_vlc_codeword(PutBitContext *pb, unsigned codebook, int val)
  215. {
  216. unsigned int rice_order, exp_order, switch_bits, switch_val;
  217. int exponent;
  218. /* number of prefix bits to switch between Rice and expGolomb */
  219. switch_bits = (codebook & 3) + 1;
  220. rice_order = codebook >> 5; /* rice code order */
  221. exp_order = (codebook >> 2) & 7; /* exp golomb code order */
  222. switch_val = switch_bits << rice_order;
  223. if (val >= switch_val) {
  224. val -= switch_val - (1 << exp_order);
  225. exponent = av_log2(val);
  226. put_bits(pb, exponent - exp_order + switch_bits, 0);
  227. put_bits(pb, exponent + 1, val);
  228. } else {
  229. exponent = val >> rice_order;
  230. if (exponent)
  231. put_bits(pb, exponent, 0);
  232. put_bits(pb, 1, 1);
  233. if (rice_order)
  234. put_sbits(pb, rice_order, val);
  235. }
  236. }
  237. #define GET_SIGN(x) ((x) >> 31)
  238. #define MAKE_CODE(x) (((x) * 2) ^ GET_SIGN(x))
  239. static void encode_dcs(PutBitContext *pb, int16_t *blocks,
  240. int blocks_per_slice, int scale)
  241. {
  242. int i;
  243. int codebook = 5, code, dc, prev_dc, delta, sign, new_sign;
  244. prev_dc = (blocks[0] - 0x4000) / scale;
  245. encode_vlc_codeword(pb, FIRST_DC_CB, MAKE_CODE(prev_dc));
  246. sign = 0;
  247. blocks += 64;
  248. for (i = 1; i < blocks_per_slice; i++, blocks += 64) {
  249. dc = (blocks[0] - 0x4000) / scale;
  250. delta = dc - prev_dc;
  251. new_sign = GET_SIGN(delta);
  252. delta = (delta ^ sign) - sign;
  253. code = MAKE_CODE(delta);
  254. encode_vlc_codeword(pb, ff_prores_dc_codebook[codebook], code);
  255. codebook = FFMIN(code, 6);
  256. sign = new_sign;
  257. prev_dc = dc;
  258. }
  259. }
  260. static void encode_acs(PutBitContext *pb, int16_t *blocks,
  261. int blocks_per_slice,
  262. int *qmat, const uint8_t *scan)
  263. {
  264. int idx, i;
  265. int prev_run = 4;
  266. int prev_level = 2;
  267. int run = 0, level;
  268. int max_coeffs, abs_level;
  269. max_coeffs = blocks_per_slice << 6;
  270. for (i = 1; i < 64; i++) {
  271. for (idx = scan[i]; idx < max_coeffs; idx += 64) {
  272. level = blocks[idx] / qmat[scan[i]];
  273. if (level) {
  274. abs_level = FFABS(level);
  275. encode_vlc_codeword(pb, ff_prores_run_to_cb[prev_run], run);
  276. encode_vlc_codeword(pb, ff_prores_level_to_cb[prev_level], abs_level - 1);
  277. put_sbits(pb, 1, GET_SIGN(level));
  278. prev_run = FFMIN(run, 15);
  279. prev_level = FFMIN(abs_level, 9);
  280. run = 0;
  281. } else {
  282. run++;
  283. }
  284. }
  285. }
  286. }
  287. static void get(const uint8_t *pixels, int stride, int16_t* block)
  288. {
  289. int i;
  290. for (i = 0; i < 8; i++) {
  291. AV_WN64(block, AV_RN64(pixels));
  292. AV_WN64(block+4, AV_RN64(pixels+8));
  293. pixels += stride;
  294. block += 8;
  295. }
  296. }
  297. static void fdct_get(FDCTDSPContext *fdsp, const uint8_t *pixels, int stride, int16_t* block)
  298. {
  299. get(pixels, stride, block);
  300. fdsp->fdct(block);
  301. }
  302. static void calc_plane_dct(FDCTDSPContext *fdsp, const uint8_t *src, int16_t * blocks, int src_stride, int mb_count, int chroma, int is_422)
  303. {
  304. int16_t *block;
  305. int i;
  306. block = blocks;
  307. if (!chroma) { /* Luma plane */
  308. for (i = 0; i < mb_count; i++) {
  309. fdct_get(fdsp, src, src_stride, block + (0 << 6));
  310. fdct_get(fdsp, src + 16, src_stride, block + (1 << 6));
  311. fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (2 << 6));
  312. fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
  313. block += 256;
  314. src += 32;
  315. }
  316. } else if (chroma && is_422){ /* chroma plane 422 */
  317. for (i = 0; i < mb_count; i++) {
  318. fdct_get(fdsp, src, src_stride, block + (0 << 6));
  319. fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (1 << 6));
  320. block += (256 >> 1);
  321. src += (32 >> 1);
  322. }
  323. } else { /* chroma plane 444 */
  324. for (i = 0; i < mb_count; i++) {
  325. fdct_get(fdsp, src, src_stride, block + (0 << 6));
  326. fdct_get(fdsp, src + 8 * src_stride, src_stride, block + (1 << 6));
  327. fdct_get(fdsp, src + 16, src_stride, block + (2 << 6));
  328. fdct_get(fdsp, src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
  329. block += 256;
  330. src += 32;
  331. }
  332. }
  333. }
  334. static int encode_slice_plane(int16_t *blocks, int mb_count, uint8_t *buf, unsigned buf_size, int *qmat, int sub_sample_chroma,
  335. const uint8_t *scan)
  336. {
  337. int blocks_per_slice;
  338. PutBitContext pb;
  339. blocks_per_slice = mb_count << (2 - sub_sample_chroma);
  340. init_put_bits(&pb, buf, buf_size);
  341. encode_dcs(&pb, blocks, blocks_per_slice, qmat[0]);
  342. encode_acs(&pb, blocks, blocks_per_slice, qmat, scan);
  343. flush_put_bits(&pb);
  344. return put_bytes_output(&pb);
  345. }
  346. static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx,
  347. int16_t * blocks_y, int16_t * blocks_u, int16_t * blocks_v,
  348. unsigned mb_count, uint8_t *buf, unsigned data_size,
  349. unsigned* y_data_size, unsigned* u_data_size, unsigned* v_data_size,
  350. int qp)
  351. {
  352. ProresContext* ctx = avctx->priv_data;
  353. *y_data_size = encode_slice_plane(blocks_y, mb_count,
  354. buf, data_size, ctx->qmat_luma[qp - 1], 0, ctx->scantable);
  355. if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
  356. *u_data_size = encode_slice_plane(blocks_u, mb_count, buf + *y_data_size, data_size - *y_data_size,
  357. ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
  358. *v_data_size = encode_slice_plane(blocks_v, mb_count, buf + *y_data_size + *u_data_size,
  359. data_size - *y_data_size - *u_data_size,
  360. ctx->qmat_chroma[qp - 1], ctx->is_422, ctx->scantable);
  361. }
  362. return *y_data_size + *u_data_size + *v_data_size;
  363. }
  364. static void put_alpha_diff(PutBitContext *pb, int cur, int prev)
  365. {
  366. const int abits = 16;
  367. const int dbits = 7;
  368. const int dsize = 1 << dbits - 1;
  369. int diff = cur - prev;
  370. diff = av_zero_extend(diff, abits);
  371. if (diff >= (1 << abits) - dsize)
  372. diff -= 1 << abits;
  373. if (diff < -dsize || diff > dsize || !diff) {
  374. put_bits(pb, 1, 1);
  375. put_bits(pb, abits, diff);
  376. } else {
  377. put_bits(pb, 1, 0);
  378. put_bits(pb, dbits - 1, FFABS(diff) - 1);
  379. put_bits(pb, 1, diff < 0);
  380. }
  381. }
  382. static inline void put_alpha_run(PutBitContext *pb, int run)
  383. {
  384. if (run) {
  385. put_bits(pb, 1, 0);
  386. if (run < 0x10)
  387. put_bits(pb, 4, run);
  388. else
  389. put_bits(pb, 15, run);
  390. } else {
  391. put_bits(pb, 1, 1);
  392. }
  393. }
  394. static av_always_inline int encode_alpha_slice_data(AVCodecContext *avctx, int8_t * src_a,
  395. unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned* a_data_size)
  396. {
  397. const int abits = 16;
  398. const int mask = (1 << abits) - 1;
  399. const int num_coeffs = mb_count * 256;
  400. int prev = mask, cur;
  401. int idx = 0;
  402. int run = 0;
  403. int16_t * blocks = (int16_t *)src_a;
  404. PutBitContext pb;
  405. init_put_bits(&pb, buf, data_size);
  406. cur = blocks[idx++];
  407. put_alpha_diff(&pb, cur, prev);
  408. prev = cur;
  409. do {
  410. cur = blocks[idx++];
  411. if (cur != prev) {
  412. put_alpha_run (&pb, run);
  413. put_alpha_diff(&pb, cur, prev);
  414. prev = cur;
  415. run = 0;
  416. } else {
  417. run++;
  418. }
  419. } while (idx < num_coeffs);
  420. put_alpha_run(&pb, run);
  421. flush_put_bits(&pb);
  422. *a_data_size = put_bytes_output(&pb);
  423. if (put_bits_left(&pb) < 0) {
  424. av_log(avctx, AV_LOG_ERROR,
  425. "Underestimated required buffer size.\n");
  426. return AVERROR_BUG;
  427. } else {
  428. return 0;
  429. }
  430. }
  431. static inline void subimage_with_fill_template(const uint16_t *src, unsigned x, unsigned y,
  432. unsigned stride, unsigned width, unsigned height, uint16_t *dst,
  433. unsigned dst_width, unsigned dst_height, int is_alpha_plane,
  434. int is_interlaced, int is_top_field)
  435. {
  436. int box_width = FFMIN(width - x, dst_width);
  437. int i, j, src_stride, box_height;
  438. uint16_t last_pix, *last_line;
  439. if (!is_interlaced) {
  440. src_stride = stride >> 1;
  441. src += y * src_stride + x;
  442. box_height = FFMIN(height - y, dst_height);
  443. } else {
  444. src_stride = stride; /* 2 lines stride */
  445. src += y * src_stride + x;
  446. box_height = FFMIN(height/2 - y, dst_height);
  447. if (!is_top_field)
  448. src += stride >> 1;
  449. }
  450. for (i = 0; i < box_height; ++i) {
  451. for (j = 0; j < box_width; ++j) {
  452. if (!is_alpha_plane) {
  453. dst[j] = src[j];
  454. } else {
  455. dst[j] = src[j] << 6; /* alpha 10b to 16b */
  456. }
  457. }
  458. if (!is_alpha_plane) {
  459. last_pix = dst[j - 1];
  460. } else {
  461. last_pix = dst[j - 1] << 6; /* alpha 10b to 16b */
  462. }
  463. for (; j < dst_width; j++)
  464. dst[j] = last_pix;
  465. src += src_stride;
  466. dst += dst_width;
  467. }
  468. last_line = dst - dst_width;
  469. for (; i < dst_height; i++) {
  470. for (j = 0; j < dst_width; ++j) {
  471. dst[j] = last_line[j];
  472. }
  473. dst += dst_width;
  474. }
  475. }
  476. static void subimage_with_fill(const uint16_t *src, unsigned x, unsigned y,
  477. unsigned stride, unsigned width, unsigned height, uint16_t *dst,
  478. unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
  479. {
  480. subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 0, is_interlaced, is_top_field);
  481. }
  482. /* reorganize alpha data and convert 10b -> 16b */
  483. static void subimage_alpha_with_fill(const uint16_t *src, unsigned x, unsigned y,
  484. unsigned stride, unsigned width, unsigned height, uint16_t *dst,
  485. unsigned dst_width, unsigned dst_height, int is_interlaced, int is_top_field)
  486. {
  487. subimage_with_fill_template(src, x, y, stride, width, height, dst, dst_width, dst_height, 1, is_interlaced, is_top_field);
  488. }
  489. static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
  490. int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size,
  491. int unsafe, int *qp, int is_interlaced, int is_top_field)
  492. {
  493. int luma_stride, chroma_stride, alpha_stride = 0;
  494. ProresContext* ctx = avctx->priv_data;
  495. int hdr_size = 6 + (ctx->need_alpha * 2); /* v data size is write when there is alpha */
  496. int ret = 0, slice_size;
  497. const uint8_t *dest_y, *dest_u, *dest_v;
  498. unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0, a_data_size = 0;
  499. FDCTDSPContext *fdsp = &ctx->fdsp;
  500. int tgt_bits = (mb_count * bitrate_table[avctx->profile]) >> 2;
  501. int low_bytes = (tgt_bits - (tgt_bits >> 3)) >> 3; // 12% bitrate fluctuation
  502. int high_bytes = (tgt_bits + (tgt_bits >> 3)) >> 3;
  503. LOCAL_ALIGNED(16, int16_t, blocks_y, [DEFAULT_SLICE_MB_WIDTH << 8]);
  504. LOCAL_ALIGNED(16, int16_t, blocks_u, [DEFAULT_SLICE_MB_WIDTH << 8]);
  505. LOCAL_ALIGNED(16, int16_t, blocks_v, [DEFAULT_SLICE_MB_WIDTH << 8]);
  506. luma_stride = pic->linesize[0];
  507. chroma_stride = pic->linesize[1];
  508. if (ctx->need_alpha)
  509. alpha_stride = pic->linesize[3];
  510. if (!is_interlaced) {
  511. dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
  512. dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
  513. dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422));
  514. } else {
  515. dest_y = pic->data[0] + (mb_y << 4) * luma_stride * 2 + (mb_x << 5);
  516. dest_u = pic->data[1] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
  517. dest_v = pic->data[2] + (mb_y << 4) * chroma_stride * 2 + (mb_x << (5 - ctx->is_422));
  518. if (!is_top_field){ /* bottom field, offset dest */
  519. dest_y += luma_stride;
  520. dest_u += chroma_stride;
  521. dest_v += chroma_stride;
  522. }
  523. }
  524. if (unsafe) {
  525. subimage_with_fill((const uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
  526. luma_stride, avctx->width, avctx->height,
  527. (uint16_t *) ctx->fill_y, mb_count << 4, 16, is_interlaced, is_top_field);
  528. subimage_with_fill((const uint16_t *) pic->data[1], mb_x << (4 - ctx->is_422), mb_y << 4,
  529. chroma_stride, avctx->width >> ctx->is_422, avctx->height,
  530. (uint16_t *) ctx->fill_u, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
  531. subimage_with_fill((const uint16_t *) pic->data[2], mb_x << (4 - ctx->is_422), mb_y << 4,
  532. chroma_stride, avctx->width >> ctx->is_422, avctx->height,
  533. (uint16_t *) ctx->fill_v, mb_count << (4 - ctx->is_422), 16, is_interlaced, is_top_field);
  534. /* no need for interlaced special case, data already reorganized in subimage_with_fill */
  535. calc_plane_dct(fdsp, ctx->fill_y, blocks_y, mb_count << 5, mb_count, 0, 0);
  536. calc_plane_dct(fdsp, ctx->fill_u, blocks_u, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
  537. calc_plane_dct(fdsp, ctx->fill_v, blocks_v, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422);
  538. slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
  539. mb_count, buf + hdr_size, data_size - hdr_size,
  540. &y_data_size, &u_data_size, &v_data_size,
  541. *qp);
  542. } else {
  543. if (!is_interlaced) {
  544. calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride, mb_count, 0, 0);
  545. calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride, mb_count, 1, ctx->is_422);
  546. calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride, mb_count, 1, ctx->is_422);
  547. } else {
  548. calc_plane_dct(fdsp, dest_y, blocks_y, luma_stride * 2, mb_count, 0, 0);
  549. calc_plane_dct(fdsp, dest_u, blocks_u, chroma_stride * 2, mb_count, 1, ctx->is_422);
  550. calc_plane_dct(fdsp, dest_v, blocks_v, chroma_stride * 2, mb_count, 1, ctx->is_422);
  551. }
  552. slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
  553. mb_count, buf + hdr_size, data_size - hdr_size,
  554. &y_data_size, &u_data_size, &v_data_size,
  555. *qp);
  556. if (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]) {
  557. do {
  558. *qp += 1;
  559. slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
  560. mb_count, buf + hdr_size, data_size - hdr_size,
  561. &y_data_size, &u_data_size, &v_data_size,
  562. *qp);
  563. } while (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]);
  564. } else if (slice_size < low_bytes && *qp
  565. > qp_start_table[avctx->profile]) {
  566. do {
  567. *qp -= 1;
  568. slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v,
  569. mb_count, buf + hdr_size, data_size - hdr_size,
  570. &y_data_size, &u_data_size, &v_data_size,
  571. *qp);
  572. } while (slice_size < low_bytes && *qp > qp_start_table[avctx->profile]);
  573. }
  574. }
  575. buf[0] = hdr_size << 3;
  576. buf[1] = *qp;
  577. AV_WB16(buf + 2, y_data_size);
  578. AV_WB16(buf + 4, u_data_size);
  579. if (ctx->need_alpha) {
  580. AV_WB16(buf + 6, v_data_size); /* write v data size only if there is alpha */
  581. subimage_alpha_with_fill((const uint16_t *) pic->data[3], mb_x << 4, mb_y << 4,
  582. alpha_stride, avctx->width, avctx->height,
  583. (uint16_t *) ctx->fill_a, mb_count << 4, 16, is_interlaced, is_top_field);
  584. ret = encode_alpha_slice_data(avctx, ctx->fill_a, mb_count,
  585. buf + hdr_size + slice_size,
  586. data_size - hdr_size - slice_size, &a_data_size);
  587. }
  588. if (ret != 0) {
  589. return ret;
  590. }
  591. return hdr_size + y_data_size + u_data_size + v_data_size + a_data_size;
  592. }
  593. static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic,
  594. uint8_t *buf, const int buf_size, const int picture_index, const int is_top_field)
  595. {
  596. ProresContext *ctx = avctx->priv_data;
  597. int mb_width = (avctx->width + 15) >> 4;
  598. int hdr_size, sl_size, i;
  599. int mb_y, sl_data_size, qp, mb_height, picture_height, unsafe_mb_height_limit;
  600. int unsafe_bot, unsafe_right;
  601. uint8_t *sl_data, *sl_data_sizes;
  602. int slice_per_line = 0, rem = mb_width;
  603. if (!ctx->is_interlaced) { /* progressive encoding */
  604. mb_height = (avctx->height + 15) >> 4;
  605. unsafe_mb_height_limit = mb_height;
  606. } else {
  607. if (is_top_field) {
  608. picture_height = (avctx->height + 1) / 2;
  609. } else {
  610. picture_height = avctx->height / 2;
  611. }
  612. mb_height = (picture_height + 15) >> 4;
  613. unsafe_mb_height_limit = mb_height;
  614. }
  615. for (i = av_log2(DEFAULT_SLICE_MB_WIDTH); i >= 0; --i) {
  616. slice_per_line += rem >> i;
  617. rem &= (1 << i) - 1;
  618. }
  619. qp = qp_start_table[avctx->profile];
  620. hdr_size = 8; sl_data_size = buf_size - hdr_size;
  621. sl_data_sizes = buf + hdr_size;
  622. sl_data = sl_data_sizes + (slice_per_line * mb_height * 2);
  623. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  624. int mb_x = 0;
  625. int slice_mb_count = DEFAULT_SLICE_MB_WIDTH;
  626. while (mb_x < mb_width) {
  627. while (mb_width - mb_x < slice_mb_count)
  628. slice_mb_count >>= 1;
  629. unsafe_bot = (avctx->height & 0xf) && (mb_y == unsafe_mb_height_limit - 1);
  630. unsafe_right = (avctx->width & 0xf) && (mb_x + slice_mb_count == mb_width);
  631. sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count,
  632. sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp, ctx->is_interlaced, is_top_field);
  633. if (sl_size < 0){
  634. return sl_size;
  635. }
  636. bytestream_put_be16(&sl_data_sizes, sl_size);
  637. sl_data += sl_size;
  638. sl_data_size -= sl_size;
  639. mb_x += slice_mb_count;
  640. }
  641. }
  642. buf[0] = hdr_size << 3;
  643. AV_WB32(buf + 1, sl_data - buf);
  644. AV_WB16(buf + 5, slice_per_line * mb_height); /* picture size */
  645. buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4; /* number of slices */
  646. return sl_data - buf;
  647. }
  648. static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  649. const AVFrame *pict, int *got_packet)
  650. {
  651. ProresContext *ctx = avctx->priv_data;
  652. int header_size = 148;
  653. uint8_t *buf;
  654. int compress_frame_size, pic_size, ret, is_top_field_first = 0;
  655. uint8_t frame_flags;
  656. int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + FF_INPUT_BUFFER_MIN_SIZE; //FIXME choose tighter limit
  657. if ((ret = ff_alloc_packet(avctx, pkt, frame_size + FF_INPUT_BUFFER_MIN_SIZE)) < 0)
  658. return ret;
  659. buf = pkt->data;
  660. compress_frame_size = 8 + header_size;
  661. bytestream_put_be32(&buf, compress_frame_size);/* frame size will be update after picture(s) encoding */
  662. bytestream_put_be32(&buf, FRAME_ID);
  663. bytestream_put_be16(&buf, header_size);
  664. bytestream_put_be16(&buf, avctx->pix_fmt != AV_PIX_FMT_YUV422P10 || ctx->need_alpha ? 1 : 0); /* version */
  665. bytestream_put_buffer(&buf, ctx->vendor, 4);
  666. bytestream_put_be16(&buf, avctx->width);
  667. bytestream_put_be16(&buf, avctx->height);
  668. frame_flags = 0x80; /* 422 not interlaced */
  669. if (avctx->profile >= AV_PROFILE_PRORES_4444) /* 4444 or 4444 Xq */
  670. frame_flags |= 0x40; /* 444 chroma */
  671. if (ctx->is_interlaced) {
  672. if ((pict->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST) || !(pict->flags & AV_FRAME_FLAG_INTERLACED)) {
  673. /* tff frame or progressive frame interpret as tff */
  674. av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, top field first\n");
  675. frame_flags |= 0x04; /* interlaced tff */
  676. is_top_field_first = 1;
  677. } else {
  678. av_log(avctx, AV_LOG_DEBUG, "use interlaced encoding, bottom field first\n");
  679. frame_flags |= 0x08; /* interlaced bff */
  680. }
  681. } else {
  682. av_log(avctx, AV_LOG_DEBUG, "use progressive encoding\n");
  683. }
  684. *buf++ = frame_flags;
  685. *buf++ = 0; /* reserved */
  686. /* only write color properties, if valid value. set to unspecified otherwise */
  687. *buf++ = int_from_list_or_default(avctx, "frame color primaries",
  688. pict->color_primaries, valid_primaries, 0);
  689. *buf++ = int_from_list_or_default(avctx, "frame color trc",
  690. pict->color_trc, valid_trc, 0);
  691. *buf++ = int_from_list_or_default(avctx, "frame colorspace",
  692. pict->colorspace, valid_colorspace, 0);
  693. *buf++ = ctx->need_alpha ? 0x2 /* 16-bit alpha */ : 0;
  694. *buf++ = 0; /* reserved */
  695. *buf++ = 3; /* luma and chroma matrix present */
  696. bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64);
  697. bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64);
  698. pic_size = prores_encode_picture(avctx, pict, buf,
  699. pkt->size - compress_frame_size, 0, is_top_field_first);/* encode progressive or first field */
  700. if (pic_size < 0) {
  701. return pic_size;
  702. }
  703. compress_frame_size += pic_size;
  704. if (ctx->is_interlaced) { /* encode second field */
  705. pic_size = prores_encode_picture(avctx, pict, pkt->data + compress_frame_size,
  706. pkt->size - compress_frame_size, 1, !is_top_field_first);
  707. if (pic_size < 0) {
  708. return pic_size;
  709. }
  710. compress_frame_size += pic_size;
  711. }
  712. AV_WB32(pkt->data, compress_frame_size);/* update frame size */
  713. pkt->size = compress_frame_size;
  714. *got_packet = 1;
  715. return 0;
  716. }
  717. static void scale_mat(const uint8_t* src, int* dst, int scale)
  718. {
  719. int i;
  720. for (i = 0; i < 64; i++)
  721. dst[i] = src[i] * scale;
  722. }
  723. static av_cold int prores_encode_init(AVCodecContext *avctx)
  724. {
  725. int i;
  726. ProresContext* ctx = avctx->priv_data;
  727. avctx->bits_per_raw_sample = 10;
  728. ctx->need_alpha = 0;
  729. ctx->is_interlaced = !!(avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT);
  730. if (ctx->is_interlaced) {
  731. ctx->scantable = ff_prores_interlaced_scan;
  732. } else {
  733. ctx->scantable = ff_prores_progressive_scan;
  734. }
  735. if (avctx->width & 0x1) {
  736. av_log(avctx, AV_LOG_ERROR,
  737. "frame width needs to be multiple of 2\n");
  738. return AVERROR(EINVAL);
  739. }
  740. if (avctx->width > 65534 || avctx->height > 65535) {
  741. av_log(avctx, AV_LOG_ERROR,
  742. "The maximum dimensions are 65534x65535\n");
  743. return AVERROR(EINVAL);
  744. }
  745. if (strlen(ctx->vendor) != 4) {
  746. av_log(avctx, AV_LOG_ERROR, "vendor ID should be 4 bytes\n");
  747. return AVERROR(EINVAL);
  748. }
  749. if (avctx->profile == AV_PROFILE_UNKNOWN) {
  750. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P10) {
  751. avctx->profile = AV_PROFILE_PRORES_STANDARD;
  752. av_log(avctx, AV_LOG_INFO,
  753. "encoding with ProRes standard (apcn) profile\n");
  754. } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10) {
  755. avctx->profile = AV_PROFILE_PRORES_4444;
  756. av_log(avctx, AV_LOG_INFO,
  757. "encoding with ProRes 4444 (ap4h) profile\n");
  758. } else if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
  759. avctx->profile = AV_PROFILE_PRORES_4444;
  760. av_log(avctx, AV_LOG_INFO,
  761. "encoding with ProRes 4444+ (ap4h) profile\n");
  762. } else
  763. av_assert0(0);
  764. } else if (avctx->profile < AV_PROFILE_PRORES_PROXY
  765. || avctx->profile > AV_PROFILE_PRORES_XQ) {
  766. av_log(
  767. avctx,
  768. AV_LOG_ERROR,
  769. "unknown profile %d, use [0 - apco, 1 - apcs, 2 - apcn (default), 3 - apch, 4 - ap4h, 5 - ap4x]\n",
  770. avctx->profile);
  771. return AVERROR(EINVAL);
  772. } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P10) && (avctx->profile > AV_PROFILE_PRORES_HQ)){
  773. av_log(avctx, AV_LOG_ERROR,
  774. "encoding with ProRes 444/Xq (ap4h/ap4x) profile, need YUV444P10 input\n");
  775. return AVERROR(EINVAL);
  776. } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10)
  777. && (avctx->profile < AV_PROFILE_PRORES_4444)){
  778. av_log(avctx, AV_LOG_ERROR,
  779. "encoding with ProRes Proxy/LT/422/422 HQ (apco, apcs, apcn, ap4h) profile, need YUV422P10 input\n");
  780. return AVERROR(EINVAL);
  781. }
  782. if (avctx->profile < AV_PROFILE_PRORES_4444) { /* 422 versions */
  783. ctx->is_422 = 1;
  784. if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
  785. ctx->fill_y = av_malloc(4 * (DEFAULT_SLICE_MB_WIDTH << 8));
  786. if (!ctx->fill_y)
  787. return AVERROR(ENOMEM);
  788. ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
  789. ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 8);
  790. }
  791. } else { /* 444 */
  792. ctx->is_422 = 0;
  793. if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
  794. ctx->fill_y = av_malloc(3 * (DEFAULT_SLICE_MB_WIDTH << 9));
  795. if (!ctx->fill_y)
  796. return AVERROR(ENOMEM);
  797. ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
  798. ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 9);
  799. }
  800. if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
  801. ctx->need_alpha = 1;
  802. ctx->fill_a = av_malloc(DEFAULT_SLICE_MB_WIDTH << 9); /* 8 blocks x 16px x 16px x sizeof (uint16) */
  803. if (!ctx->fill_a)
  804. return AVERROR(ENOMEM);
  805. }
  806. }
  807. if (ctx->need_alpha)
  808. avctx->bits_per_coded_sample = 32;
  809. ff_fdctdsp_init(&ctx->fdsp, avctx);
  810. avctx->codec_tag = AV_RL32((const uint8_t*)profiles[avctx->profile].name);
  811. for (i = 1; i <= 16; i++) {
  812. scale_mat(QMAT_LUMA[avctx->profile] , ctx->qmat_luma[i - 1] , i);
  813. scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
  814. }
  815. return 0;
  816. }
  817. static av_cold int prores_encode_close(AVCodecContext *avctx)
  818. {
  819. ProresContext* ctx = avctx->priv_data;
  820. av_freep(&ctx->fill_y);
  821. av_freep(&ctx->fill_a);
  822. return 0;
  823. }
  824. #define OFFSET(x) offsetof(ProresContext, x)
  825. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  826. static const AVOption options[] = {
  827. { "vendor", "vendor ID", OFFSET(vendor), AV_OPT_TYPE_STRING, { .str = "fmpg" }, 0, 0, VE },
  828. { NULL }
  829. };
  830. static const AVClass prores_enc_class = {
  831. .class_name = "ProRes encoder",
  832. .item_name = av_default_item_name,
  833. .option = options,
  834. .version = LIBAVUTIL_VERSION_INT,
  835. };
  836. static const enum AVPixelFormat pix_fmts[] = {
  837. AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  838. AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_NONE
  839. };
  840. const FFCodec ff_prores_aw_encoder = {
  841. .p.name = "prores_aw",
  842. CODEC_LONG_NAME("Apple ProRes"),
  843. .p.type = AVMEDIA_TYPE_VIDEO,
  844. .p.id = AV_CODEC_ID_PRORES,
  845. .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
  846. AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE,
  847. CODEC_PIXFMTS_ARRAY(pix_fmts),
  848. .color_ranges = AVCOL_RANGE_MPEG,
  849. .priv_data_size = sizeof(ProresContext),
  850. .init = prores_encode_init,
  851. .close = prores_encode_close,
  852. FF_CODEC_ENCODE_CB(prores_encode_frame),
  853. .p.priv_class = &prores_enc_class,
  854. .p.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
  855. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  856. };
  857. const FFCodec ff_prores_encoder = {
  858. .p.name = "prores",
  859. CODEC_LONG_NAME("Apple ProRes"),
  860. .p.type = AVMEDIA_TYPE_VIDEO,
  861. .p.id = AV_CODEC_ID_PRORES,
  862. .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
  863. AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE,
  864. CODEC_PIXFMTS_ARRAY(pix_fmts),
  865. .color_ranges = AVCOL_RANGE_MPEG,
  866. .priv_data_size = sizeof(ProresContext),
  867. .init = prores_encode_init,
  868. .close = prores_encode_close,
  869. FF_CODEC_ENCODE_CB(prores_encode_frame),
  870. .p.priv_class = &prores_enc_class,
  871. .p.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
  872. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
  873. };