vp9.c 75 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916
  1. /*
  2. * VP9 compatible video decoder
  3. *
  4. * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
  5. * Copyright (C) 2013 Clément Bœsch <u pkh me>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "config_components.h"
  24. #include "avcodec.h"
  25. #include "codec_internal.h"
  26. #include "get_bits.h"
  27. #include "hwconfig.h"
  28. #include "internal.h"
  29. #include "profiles.h"
  30. #include "thread.h"
  31. #include "threadframe.h"
  32. #include "pthread_internal.h"
  33. #include "videodsp.h"
  34. #include "vp89_rac.h"
  35. #include "vp9.h"
  36. #include "vp9data.h"
  37. #include "vp9dec.h"
  38. #include "vpx_rac.h"
  39. #include "libavutil/avassert.h"
  40. #include "libavutil/pixdesc.h"
  41. #include "libavutil/video_enc_params.h"
  42. #define VP9_SYNCCODE 0x498342
  43. #if HAVE_THREADS
  44. DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
  45. (offsetof(VP9Context, progress_mutex)),
  46. (offsetof(VP9Context, progress_cond)));
  47. static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
  48. VP9Context *s = avctx->priv_data;
  49. int i;
  50. if (avctx->active_thread_type & FF_THREAD_SLICE) {
  51. if (s->entries)
  52. av_freep(&s->entries);
  53. s->entries = av_malloc_array(n, sizeof(atomic_int));
  54. if (!s->entries)
  55. return AVERROR(ENOMEM);
  56. for (i = 0; i < n; i++)
  57. atomic_init(&s->entries[i], 0);
  58. }
  59. return 0;
  60. }
  61. static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
  62. pthread_mutex_lock(&s->progress_mutex);
  63. atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
  64. pthread_cond_signal(&s->progress_cond);
  65. pthread_mutex_unlock(&s->progress_mutex);
  66. }
  67. static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
  68. if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
  69. return;
  70. pthread_mutex_lock(&s->progress_mutex);
  71. while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
  72. pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
  73. pthread_mutex_unlock(&s->progress_mutex);
  74. }
  75. #else
  76. static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
  77. #endif
  78. static void vp9_tile_data_free(VP9TileData *td)
  79. {
  80. av_freep(&td->b_base);
  81. av_freep(&td->block_base);
  82. av_freep(&td->block_structure);
  83. }
  84. static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
  85. {
  86. ff_thread_release_ext_buffer(avctx, &f->tf);
  87. av_buffer_unref(&f->extradata);
  88. av_buffer_unref(&f->hwaccel_priv_buf);
  89. f->segmentation_map = NULL;
  90. f->hwaccel_picture_private = NULL;
  91. }
  92. static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
  93. {
  94. VP9Context *s = avctx->priv_data;
  95. int ret, sz;
  96. ret = ff_thread_get_ext_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF);
  97. if (ret < 0)
  98. return ret;
  99. sz = 64 * s->sb_cols * s->sb_rows;
  100. if (sz != s->frame_extradata_pool_size) {
  101. av_buffer_pool_uninit(&s->frame_extradata_pool);
  102. s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
  103. if (!s->frame_extradata_pool) {
  104. s->frame_extradata_pool_size = 0;
  105. goto fail;
  106. }
  107. s->frame_extradata_pool_size = sz;
  108. }
  109. f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
  110. if (!f->extradata) {
  111. goto fail;
  112. }
  113. memset(f->extradata->data, 0, f->extradata->size);
  114. f->segmentation_map = f->extradata->data;
  115. f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
  116. if (avctx->hwaccel) {
  117. const AVHWAccel *hwaccel = avctx->hwaccel;
  118. av_assert0(!f->hwaccel_picture_private);
  119. if (hwaccel->frame_priv_data_size) {
  120. f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
  121. if (!f->hwaccel_priv_buf)
  122. goto fail;
  123. f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
  124. }
  125. }
  126. return 0;
  127. fail:
  128. vp9_frame_unref(avctx, f);
  129. return AVERROR(ENOMEM);
  130. }
  131. static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
  132. {
  133. int ret;
  134. ret = ff_thread_ref_frame(&dst->tf, &src->tf);
  135. if (ret < 0)
  136. return ret;
  137. dst->extradata = av_buffer_ref(src->extradata);
  138. if (!dst->extradata)
  139. goto fail;
  140. dst->segmentation_map = src->segmentation_map;
  141. dst->mv = src->mv;
  142. dst->uses_2pass = src->uses_2pass;
  143. if (src->hwaccel_picture_private) {
  144. dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
  145. if (!dst->hwaccel_priv_buf)
  146. goto fail;
  147. dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
  148. }
  149. return 0;
  150. fail:
  151. vp9_frame_unref(avctx, dst);
  152. return AVERROR(ENOMEM);
  153. }
  154. static int update_size(AVCodecContext *avctx, int w, int h)
  155. {
  156. #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
  157. CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
  158. CONFIG_VP9_NVDEC_HWACCEL + \
  159. CONFIG_VP9_VAAPI_HWACCEL + \
  160. CONFIG_VP9_VDPAU_HWACCEL + \
  161. CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
  162. enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
  163. VP9Context *s = avctx->priv_data;
  164. uint8_t *p;
  165. int bytesperpixel = s->bytesperpixel, ret, cols, rows;
  166. int lflvl_len, i;
  167. av_assert0(w > 0 && h > 0);
  168. if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
  169. if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
  170. return ret;
  171. switch (s->pix_fmt) {
  172. case AV_PIX_FMT_YUV420P:
  173. case AV_PIX_FMT_YUV420P10:
  174. #if CONFIG_VP9_DXVA2_HWACCEL
  175. *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
  176. #endif
  177. #if CONFIG_VP9_D3D11VA_HWACCEL
  178. *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
  179. *fmtp++ = AV_PIX_FMT_D3D11;
  180. #endif
  181. #if CONFIG_VP9_NVDEC_HWACCEL
  182. *fmtp++ = AV_PIX_FMT_CUDA;
  183. #endif
  184. #if CONFIG_VP9_VAAPI_HWACCEL
  185. *fmtp++ = AV_PIX_FMT_VAAPI;
  186. #endif
  187. #if CONFIG_VP9_VDPAU_HWACCEL
  188. *fmtp++ = AV_PIX_FMT_VDPAU;
  189. #endif
  190. #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
  191. *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
  192. #endif
  193. break;
  194. case AV_PIX_FMT_YUV420P12:
  195. #if CONFIG_VP9_NVDEC_HWACCEL
  196. *fmtp++ = AV_PIX_FMT_CUDA;
  197. #endif
  198. #if CONFIG_VP9_VAAPI_HWACCEL
  199. *fmtp++ = AV_PIX_FMT_VAAPI;
  200. #endif
  201. #if CONFIG_VP9_VDPAU_HWACCEL
  202. *fmtp++ = AV_PIX_FMT_VDPAU;
  203. #endif
  204. break;
  205. case AV_PIX_FMT_YUV444P:
  206. #if CONFIG_VP9_VAAPI_HWACCEL
  207. *fmtp++ = AV_PIX_FMT_VAAPI;
  208. #endif
  209. break;
  210. }
  211. *fmtp++ = s->pix_fmt;
  212. *fmtp = AV_PIX_FMT_NONE;
  213. ret = ff_thread_get_format(avctx, pix_fmts);
  214. if (ret < 0)
  215. return ret;
  216. avctx->pix_fmt = ret;
  217. s->gf_fmt = s->pix_fmt;
  218. s->w = w;
  219. s->h = h;
  220. }
  221. cols = (w + 7) >> 3;
  222. rows = (h + 7) >> 3;
  223. if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
  224. return 0;
  225. s->last_fmt = s->pix_fmt;
  226. s->sb_cols = (w + 63) >> 6;
  227. s->sb_rows = (h + 63) >> 6;
  228. s->cols = (w + 7) >> 3;
  229. s->rows = (h + 7) >> 3;
  230. lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
  231. #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
  232. av_freep(&s->intra_pred_data[0]);
  233. // FIXME we slightly over-allocate here for subsampled chroma, but a little
  234. // bit of padding shouldn't affect performance...
  235. p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
  236. lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
  237. if (!p)
  238. return AVERROR(ENOMEM);
  239. assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
  240. assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
  241. assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
  242. assign(s->above_y_nnz_ctx, uint8_t *, 16);
  243. assign(s->above_mode_ctx, uint8_t *, 16);
  244. assign(s->above_mv_ctx, VP9mv(*)[2], 16);
  245. assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
  246. assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
  247. assign(s->above_partition_ctx, uint8_t *, 8);
  248. assign(s->above_skip_ctx, uint8_t *, 8);
  249. assign(s->above_txfm_ctx, uint8_t *, 8);
  250. assign(s->above_segpred_ctx, uint8_t *, 8);
  251. assign(s->above_intra_ctx, uint8_t *, 8);
  252. assign(s->above_comp_ctx, uint8_t *, 8);
  253. assign(s->above_ref_ctx, uint8_t *, 8);
  254. assign(s->above_filter_ctx, uint8_t *, 8);
  255. assign(s->lflvl, VP9Filter *, lflvl_len);
  256. #undef assign
  257. if (s->td) {
  258. for (i = 0; i < s->active_tile_cols; i++)
  259. vp9_tile_data_free(&s->td[i]);
  260. }
  261. if (s->s.h.bpp != s->last_bpp) {
  262. ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
  263. ff_videodsp_init(&s->vdsp, s->s.h.bpp);
  264. s->last_bpp = s->s.h.bpp;
  265. }
  266. return 0;
  267. }
  268. static int update_block_buffers(AVCodecContext *avctx)
  269. {
  270. int i;
  271. VP9Context *s = avctx->priv_data;
  272. int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
  273. VP9TileData *td = &s->td[0];
  274. if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
  275. return 0;
  276. vp9_tile_data_free(td);
  277. chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
  278. chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
  279. if (s->s.frames[CUR_FRAME].uses_2pass) {
  280. int sbs = s->sb_cols * s->sb_rows;
  281. td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
  282. td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
  283. 16 * 16 + 2 * chroma_eobs) * sbs);
  284. if (!td->b_base || !td->block_base)
  285. return AVERROR(ENOMEM);
  286. td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
  287. td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
  288. td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
  289. td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
  290. td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
  291. if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
  292. td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
  293. if (!td->block_structure)
  294. return AVERROR(ENOMEM);
  295. }
  296. } else {
  297. for (i = 1; i < s->active_tile_cols; i++)
  298. vp9_tile_data_free(&s->td[i]);
  299. for (i = 0; i < s->active_tile_cols; i++) {
  300. s->td[i].b_base = av_malloc(sizeof(VP9Block));
  301. s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
  302. 16 * 16 + 2 * chroma_eobs);
  303. if (!s->td[i].b_base || !s->td[i].block_base)
  304. return AVERROR(ENOMEM);
  305. s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
  306. s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
  307. s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
  308. s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
  309. s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
  310. if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
  311. s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
  312. if (!s->td[i].block_structure)
  313. return AVERROR(ENOMEM);
  314. }
  315. }
  316. }
  317. s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
  318. return 0;
  319. }
  320. // The sign bit is at the end, not the start, of a bit sequence
  321. static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
  322. {
  323. int v = get_bits(gb, n);
  324. return get_bits1(gb) ? -v : v;
  325. }
  326. static av_always_inline int inv_recenter_nonneg(int v, int m)
  327. {
  328. if (v > 2 * m)
  329. return v;
  330. if (v & 1)
  331. return m - ((v + 1) >> 1);
  332. return m + (v >> 1);
  333. }
  334. // differential forward probability updates
  335. static int update_prob(VPXRangeCoder *c, int p)
  336. {
  337. static const uint8_t inv_map_table[255] = {
  338. 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
  339. 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
  340. 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
  341. 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
  342. 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
  343. 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
  344. 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
  345. 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
  346. 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
  347. 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
  348. 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
  349. 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
  350. 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
  351. 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
  352. 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
  353. 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
  354. 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
  355. 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
  356. 252, 253, 253,
  357. };
  358. int d;
  359. /* This code is trying to do a differential probability update. For a
  360. * current probability A in the range [1, 255], the difference to a new
  361. * probability of any value can be expressed differentially as 1-A, 255-A
  362. * where some part of this (absolute range) exists both in positive as
  363. * well as the negative part, whereas another part only exists in one
  364. * half. We're trying to code this shared part differentially, i.e.
  365. * times two where the value of the lowest bit specifies the sign, and
  366. * the single part is then coded on top of this. This absolute difference
  367. * then again has a value of [0, 254], but a bigger value in this range
  368. * indicates that we're further away from the original value A, so we
  369. * can code this as a VLC code, since higher values are increasingly
  370. * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
  371. * updates vs. the 'fine, exact' updates further down the range, which
  372. * adds one extra dimension to this differential update model. */
  373. if (!vp89_rac_get(c)) {
  374. d = vp89_rac_get_uint(c, 4) + 0;
  375. } else if (!vp89_rac_get(c)) {
  376. d = vp89_rac_get_uint(c, 4) + 16;
  377. } else if (!vp89_rac_get(c)) {
  378. d = vp89_rac_get_uint(c, 5) + 32;
  379. } else {
  380. d = vp89_rac_get_uint(c, 7);
  381. if (d >= 65)
  382. d = (d << 1) - 65 + vp89_rac_get(c);
  383. d += 64;
  384. av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
  385. }
  386. return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
  387. 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
  388. }
  389. static int read_colorspace_details(AVCodecContext *avctx)
  390. {
  391. static const enum AVColorSpace colorspaces[8] = {
  392. AVCOL_SPC_UNSPECIFIED, AVCOL_SPC_BT470BG, AVCOL_SPC_BT709, AVCOL_SPC_SMPTE170M,
  393. AVCOL_SPC_SMPTE240M, AVCOL_SPC_BT2020_NCL, AVCOL_SPC_RESERVED, AVCOL_SPC_RGB,
  394. };
  395. VP9Context *s = avctx->priv_data;
  396. int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
  397. s->bpp_index = bits;
  398. s->s.h.bpp = 8 + bits * 2;
  399. s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
  400. avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
  401. if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
  402. static const enum AVPixelFormat pix_fmt_rgb[3] = {
  403. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12
  404. };
  405. s->ss_h = s->ss_v = 0;
  406. avctx->color_range = AVCOL_RANGE_JPEG;
  407. s->pix_fmt = pix_fmt_rgb[bits];
  408. if (avctx->profile & 1) {
  409. if (get_bits1(&s->gb)) {
  410. av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
  411. return AVERROR_INVALIDDATA;
  412. }
  413. } else {
  414. av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
  415. avctx->profile);
  416. return AVERROR_INVALIDDATA;
  417. }
  418. } else {
  419. static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
  420. { { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P },
  421. { AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV420P } },
  422. { { AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV422P10 },
  423. { AV_PIX_FMT_YUV440P10, AV_PIX_FMT_YUV420P10 } },
  424. { { AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12 },
  425. { AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12 } }
  426. };
  427. avctx->color_range = get_bits1(&s->gb) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
  428. if (avctx->profile & 1) {
  429. s->ss_h = get_bits1(&s->gb);
  430. s->ss_v = get_bits1(&s->gb);
  431. s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
  432. if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
  433. av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
  434. avctx->profile);
  435. return AVERROR_INVALIDDATA;
  436. } else if (get_bits1(&s->gb)) {
  437. av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
  438. avctx->profile);
  439. return AVERROR_INVALIDDATA;
  440. }
  441. } else {
  442. s->ss_h = s->ss_v = 1;
  443. s->pix_fmt = pix_fmt_for_ss[bits][1][1];
  444. }
  445. }
  446. return 0;
  447. }
  448. static int decode_frame_header(AVCodecContext *avctx,
  449. const uint8_t *data, int size, int *ref)
  450. {
  451. VP9Context *s = avctx->priv_data;
  452. int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
  453. int last_invisible;
  454. const uint8_t *data2;
  455. /* general header */
  456. if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
  457. av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
  458. return ret;
  459. }
  460. if (get_bits(&s->gb, 2) != 0x2) { // frame marker
  461. av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
  462. return AVERROR_INVALIDDATA;
  463. }
  464. avctx->profile = get_bits1(&s->gb);
  465. avctx->profile |= get_bits1(&s->gb) << 1;
  466. if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
  467. if (avctx->profile > 3) {
  468. av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
  469. return AVERROR_INVALIDDATA;
  470. }
  471. s->s.h.profile = avctx->profile;
  472. if (get_bits1(&s->gb)) {
  473. *ref = get_bits(&s->gb, 3);
  474. return 0;
  475. }
  476. s->last_keyframe = s->s.h.keyframe;
  477. s->s.h.keyframe = !get_bits1(&s->gb);
  478. last_invisible = s->s.h.invisible;
  479. s->s.h.invisible = !get_bits1(&s->gb);
  480. s->s.h.errorres = get_bits1(&s->gb);
  481. s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
  482. if (s->s.h.keyframe) {
  483. if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  484. av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
  485. return AVERROR_INVALIDDATA;
  486. }
  487. if ((ret = read_colorspace_details(avctx)) < 0)
  488. return ret;
  489. // for profile 1, here follows the subsampling bits
  490. s->s.h.refreshrefmask = 0xff;
  491. w = get_bits(&s->gb, 16) + 1;
  492. h = get_bits(&s->gb, 16) + 1;
  493. if (get_bits1(&s->gb)) // display size
  494. skip_bits(&s->gb, 32);
  495. } else {
  496. s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
  497. s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
  498. if (s->s.h.intraonly) {
  499. if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
  500. av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
  501. return AVERROR_INVALIDDATA;
  502. }
  503. if (avctx->profile >= 1) {
  504. if ((ret = read_colorspace_details(avctx)) < 0)
  505. return ret;
  506. } else {
  507. s->ss_h = s->ss_v = 1;
  508. s->s.h.bpp = 8;
  509. s->bpp_index = 0;
  510. s->bytesperpixel = 1;
  511. s->pix_fmt = AV_PIX_FMT_YUV420P;
  512. avctx->colorspace = AVCOL_SPC_BT470BG;
  513. avctx->color_range = AVCOL_RANGE_MPEG;
  514. }
  515. s->s.h.refreshrefmask = get_bits(&s->gb, 8);
  516. w = get_bits(&s->gb, 16) + 1;
  517. h = get_bits(&s->gb, 16) + 1;
  518. if (get_bits1(&s->gb)) // display size
  519. skip_bits(&s->gb, 32);
  520. } else {
  521. s->s.h.refreshrefmask = get_bits(&s->gb, 8);
  522. s->s.h.refidx[0] = get_bits(&s->gb, 3);
  523. s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
  524. s->s.h.refidx[1] = get_bits(&s->gb, 3);
  525. s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
  526. s->s.h.refidx[2] = get_bits(&s->gb, 3);
  527. s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
  528. if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
  529. !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
  530. !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
  531. av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
  532. return AVERROR_INVALIDDATA;
  533. }
  534. if (get_bits1(&s->gb)) {
  535. w = s->s.refs[s->s.h.refidx[0]].f->width;
  536. h = s->s.refs[s->s.h.refidx[0]].f->height;
  537. } else if (get_bits1(&s->gb)) {
  538. w = s->s.refs[s->s.h.refidx[1]].f->width;
  539. h = s->s.refs[s->s.h.refidx[1]].f->height;
  540. } else if (get_bits1(&s->gb)) {
  541. w = s->s.refs[s->s.h.refidx[2]].f->width;
  542. h = s->s.refs[s->s.h.refidx[2]].f->height;
  543. } else {
  544. w = get_bits(&s->gb, 16) + 1;
  545. h = get_bits(&s->gb, 16) + 1;
  546. }
  547. // Note that in this code, "CUR_FRAME" is actually before we
  548. // have formally allocated a frame, and thus actually represents
  549. // the _last_ frame
  550. s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
  551. s->s.frames[CUR_FRAME].tf.f->height == h;
  552. if (get_bits1(&s->gb)) // display size
  553. skip_bits(&s->gb, 32);
  554. s->s.h.highprecisionmvs = get_bits1(&s->gb);
  555. s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
  556. get_bits(&s->gb, 2);
  557. s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
  558. s->s.h.signbias[0] != s->s.h.signbias[2];
  559. if (s->s.h.allowcompinter) {
  560. if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
  561. s->s.h.fixcompref = 2;
  562. s->s.h.varcompref[0] = 0;
  563. s->s.h.varcompref[1] = 1;
  564. } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
  565. s->s.h.fixcompref = 1;
  566. s->s.h.varcompref[0] = 0;
  567. s->s.h.varcompref[1] = 2;
  568. } else {
  569. s->s.h.fixcompref = 0;
  570. s->s.h.varcompref[0] = 1;
  571. s->s.h.varcompref[1] = 2;
  572. }
  573. }
  574. }
  575. }
  576. s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
  577. s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
  578. s->s.h.framectxid = c = get_bits(&s->gb, 2);
  579. if (s->s.h.keyframe || s->s.h.intraonly)
  580. s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
  581. /* loopfilter header data */
  582. if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
  583. // reset loopfilter defaults
  584. s->s.h.lf_delta.ref[0] = 1;
  585. s->s.h.lf_delta.ref[1] = 0;
  586. s->s.h.lf_delta.ref[2] = -1;
  587. s->s.h.lf_delta.ref[3] = -1;
  588. s->s.h.lf_delta.mode[0] = 0;
  589. s->s.h.lf_delta.mode[1] = 0;
  590. memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
  591. }
  592. s->s.h.filter.level = get_bits(&s->gb, 6);
  593. sharp = get_bits(&s->gb, 3);
  594. // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
  595. // the old cache values since they are still valid
  596. if (s->s.h.filter.sharpness != sharp) {
  597. for (i = 1; i <= 63; i++) {
  598. int limit = i;
  599. if (sharp > 0) {
  600. limit >>= (sharp + 3) >> 2;
  601. limit = FFMIN(limit, 9 - sharp);
  602. }
  603. limit = FFMAX(limit, 1);
  604. s->filter_lut.lim_lut[i] = limit;
  605. s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
  606. }
  607. }
  608. s->s.h.filter.sharpness = sharp;
  609. if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
  610. if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
  611. for (i = 0; i < 4; i++)
  612. if (get_bits1(&s->gb))
  613. s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
  614. for (i = 0; i < 2; i++)
  615. if (get_bits1(&s->gb))
  616. s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
  617. }
  618. }
  619. /* quantization header data */
  620. s->s.h.yac_qi = get_bits(&s->gb, 8);
  621. s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  622. s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  623. s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
  624. s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
  625. s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
  626. if (s->s.h.lossless)
  627. avctx->properties |= FF_CODEC_PROPERTY_LOSSLESS;
  628. /* segmentation header info */
  629. if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
  630. if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
  631. for (i = 0; i < 7; i++)
  632. s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
  633. get_bits(&s->gb, 8) : 255;
  634. if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
  635. for (i = 0; i < 3; i++)
  636. s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
  637. get_bits(&s->gb, 8) : 255;
  638. }
  639. if (get_bits1(&s->gb)) {
  640. s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
  641. for (i = 0; i < 8; i++) {
  642. if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
  643. s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
  644. if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
  645. s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
  646. if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
  647. s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
  648. s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
  649. }
  650. }
  651. }
  652. // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
  653. for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
  654. int qyac, qydc, quvac, quvdc, lflvl, sh;
  655. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
  656. if (s->s.h.segmentation.absolute_vals)
  657. qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
  658. else
  659. qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
  660. } else {
  661. qyac = s->s.h.yac_qi;
  662. }
  663. qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
  664. quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
  665. quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
  666. qyac = av_clip_uintp2(qyac, 8);
  667. s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
  668. s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
  669. s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
  670. s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
  671. sh = s->s.h.filter.level >= 32;
  672. if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
  673. if (s->s.h.segmentation.absolute_vals)
  674. lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
  675. else
  676. lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
  677. } else {
  678. lflvl = s->s.h.filter.level;
  679. }
  680. if (s->s.h.lf_delta.enabled) {
  681. s->s.h.segmentation.feat[i].lflvl[0][0] =
  682. s->s.h.segmentation.feat[i].lflvl[0][1] =
  683. av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
  684. for (j = 1; j < 4; j++) {
  685. s->s.h.segmentation.feat[i].lflvl[j][0] =
  686. av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
  687. s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
  688. s->s.h.segmentation.feat[i].lflvl[j][1] =
  689. av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
  690. s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
  691. }
  692. } else {
  693. memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
  694. sizeof(s->s.h.segmentation.feat[i].lflvl));
  695. }
  696. }
  697. /* tiling info */
  698. if ((ret = update_size(avctx, w, h)) < 0) {
  699. av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
  700. w, h, s->pix_fmt);
  701. return ret;
  702. }
  703. for (s->s.h.tiling.log2_tile_cols = 0;
  704. s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
  705. s->s.h.tiling.log2_tile_cols++) ;
  706. for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
  707. max = FFMAX(0, max - 1);
  708. while (max > s->s.h.tiling.log2_tile_cols) {
  709. if (get_bits1(&s->gb))
  710. s->s.h.tiling.log2_tile_cols++;
  711. else
  712. break;
  713. }
  714. s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
  715. s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
  716. if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
  717. int n_range_coders;
  718. VPXRangeCoder *rc;
  719. if (s->td) {
  720. for (i = 0; i < s->active_tile_cols; i++)
  721. vp9_tile_data_free(&s->td[i]);
  722. av_freep(&s->td);
  723. }
  724. s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
  725. s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
  726. s->s.h.tiling.tile_cols : 1;
  727. vp9_alloc_entries(avctx, s->sb_rows);
  728. if (avctx->active_thread_type == FF_THREAD_SLICE) {
  729. n_range_coders = 4; // max_tile_rows
  730. } else {
  731. n_range_coders = s->s.h.tiling.tile_cols;
  732. }
  733. s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
  734. n_range_coders * sizeof(VPXRangeCoder));
  735. if (!s->td)
  736. return AVERROR(ENOMEM);
  737. rc = (VPXRangeCoder *) &s->td[s->active_tile_cols];
  738. for (i = 0; i < s->active_tile_cols; i++) {
  739. s->td[i].s = s;
  740. s->td[i].c_b = rc;
  741. rc += n_range_coders;
  742. }
  743. }
  744. /* check reference frames */
  745. if (!s->s.h.keyframe && !s->s.h.intraonly) {
  746. int valid_ref_frame = 0;
  747. for (i = 0; i < 3; i++) {
  748. AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
  749. int refw = ref->width, refh = ref->height;
  750. if (ref->format != avctx->pix_fmt) {
  751. av_log(avctx, AV_LOG_ERROR,
  752. "Ref pixfmt (%s) did not match current frame (%s)",
  753. av_get_pix_fmt_name(ref->format),
  754. av_get_pix_fmt_name(avctx->pix_fmt));
  755. return AVERROR_INVALIDDATA;
  756. } else if (refw == w && refh == h) {
  757. s->mvscale[i][0] = s->mvscale[i][1] = 0;
  758. } else {
  759. /* Check to make sure at least one of frames that */
  760. /* this frame references has valid dimensions */
  761. if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
  762. av_log(avctx, AV_LOG_WARNING,
  763. "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
  764. refw, refh, w, h);
  765. s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
  766. continue;
  767. }
  768. s->mvscale[i][0] = (refw << 14) / w;
  769. s->mvscale[i][1] = (refh << 14) / h;
  770. s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
  771. s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
  772. }
  773. valid_ref_frame++;
  774. }
  775. if (!valid_ref_frame) {
  776. av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
  777. return AVERROR_INVALIDDATA;
  778. }
  779. }
  780. if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
  781. s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
  782. s->prob_ctx[3].p = ff_vp9_default_probs;
  783. memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
  784. sizeof(ff_vp9_default_coef_probs));
  785. memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
  786. sizeof(ff_vp9_default_coef_probs));
  787. memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
  788. sizeof(ff_vp9_default_coef_probs));
  789. memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
  790. sizeof(ff_vp9_default_coef_probs));
  791. } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
  792. s->prob_ctx[c].p = ff_vp9_default_probs;
  793. memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
  794. sizeof(ff_vp9_default_coef_probs));
  795. }
  796. // next 16 bits is size of the rest of the header (arith-coded)
  797. s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
  798. s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
  799. data2 = align_get_bits(&s->gb);
  800. if (size2 > size - (data2 - data)) {
  801. av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
  802. return AVERROR_INVALIDDATA;
  803. }
  804. ret = ff_vpx_init_range_decoder(&s->c, data2, size2);
  805. if (ret < 0)
  806. return ret;
  807. if (vpx_rac_get_prob_branchy(&s->c, 128)) { // marker bit
  808. av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
  809. return AVERROR_INVALIDDATA;
  810. }
  811. for (i = 0; i < s->active_tile_cols; i++) {
  812. if (s->s.h.keyframe || s->s.h.intraonly) {
  813. memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
  814. memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
  815. } else {
  816. memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
  817. }
  818. s->td[i].nb_block_structure = 0;
  819. }
  820. /* FIXME is it faster to not copy here, but do it down in the fw updates
  821. * as explicit copies if the fw update is missing (and skip the copy upon
  822. * fw update)? */
  823. s->prob.p = s->prob_ctx[c].p;
  824. // txfm updates
  825. if (s->s.h.lossless) {
  826. s->s.h.txfmmode = TX_4X4;
  827. } else {
  828. s->s.h.txfmmode = vp89_rac_get_uint(&s->c, 2);
  829. if (s->s.h.txfmmode == 3)
  830. s->s.h.txfmmode += vp89_rac_get(&s->c);
  831. if (s->s.h.txfmmode == TX_SWITCHABLE) {
  832. for (i = 0; i < 2; i++)
  833. if (vpx_rac_get_prob_branchy(&s->c, 252))
  834. s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
  835. for (i = 0; i < 2; i++)
  836. for (j = 0; j < 2; j++)
  837. if (vpx_rac_get_prob_branchy(&s->c, 252))
  838. s->prob.p.tx16p[i][j] =
  839. update_prob(&s->c, s->prob.p.tx16p[i][j]);
  840. for (i = 0; i < 2; i++)
  841. for (j = 0; j < 3; j++)
  842. if (vpx_rac_get_prob_branchy(&s->c, 252))
  843. s->prob.p.tx32p[i][j] =
  844. update_prob(&s->c, s->prob.p.tx32p[i][j]);
  845. }
  846. }
  847. // coef updates
  848. for (i = 0; i < 4; i++) {
  849. uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
  850. if (vp89_rac_get(&s->c)) {
  851. for (j = 0; j < 2; j++)
  852. for (k = 0; k < 2; k++)
  853. for (l = 0; l < 6; l++)
  854. for (m = 0; m < 6; m++) {
  855. uint8_t *p = s->prob.coef[i][j][k][l][m];
  856. uint8_t *r = ref[j][k][l][m];
  857. if (m >= 3 && l == 0) // dc only has 3 pt
  858. break;
  859. for (n = 0; n < 3; n++) {
  860. if (vpx_rac_get_prob_branchy(&s->c, 252))
  861. p[n] = update_prob(&s->c, r[n]);
  862. else
  863. p[n] = r[n];
  864. }
  865. memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
  866. }
  867. } else {
  868. for (j = 0; j < 2; j++)
  869. for (k = 0; k < 2; k++)
  870. for (l = 0; l < 6; l++)
  871. for (m = 0; m < 6; m++) {
  872. uint8_t *p = s->prob.coef[i][j][k][l][m];
  873. uint8_t *r = ref[j][k][l][m];
  874. if (m > 3 && l == 0) // dc only has 3 pt
  875. break;
  876. memcpy(p, r, 3);
  877. memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
  878. }
  879. }
  880. if (s->s.h.txfmmode == i)
  881. break;
  882. }
  883. // mode updates
  884. for (i = 0; i < 3; i++)
  885. if (vpx_rac_get_prob_branchy(&s->c, 252))
  886. s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
  887. if (!s->s.h.keyframe && !s->s.h.intraonly) {
  888. for (i = 0; i < 7; i++)
  889. for (j = 0; j < 3; j++)
  890. if (vpx_rac_get_prob_branchy(&s->c, 252))
  891. s->prob.p.mv_mode[i][j] =
  892. update_prob(&s->c, s->prob.p.mv_mode[i][j]);
  893. if (s->s.h.filtermode == FILTER_SWITCHABLE)
  894. for (i = 0; i < 4; i++)
  895. for (j = 0; j < 2; j++)
  896. if (vpx_rac_get_prob_branchy(&s->c, 252))
  897. s->prob.p.filter[i][j] =
  898. update_prob(&s->c, s->prob.p.filter[i][j]);
  899. for (i = 0; i < 4; i++)
  900. if (vpx_rac_get_prob_branchy(&s->c, 252))
  901. s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
  902. if (s->s.h.allowcompinter) {
  903. s->s.h.comppredmode = vp89_rac_get(&s->c);
  904. if (s->s.h.comppredmode)
  905. s->s.h.comppredmode += vp89_rac_get(&s->c);
  906. if (s->s.h.comppredmode == PRED_SWITCHABLE)
  907. for (i = 0; i < 5; i++)
  908. if (vpx_rac_get_prob_branchy(&s->c, 252))
  909. s->prob.p.comp[i] =
  910. update_prob(&s->c, s->prob.p.comp[i]);
  911. } else {
  912. s->s.h.comppredmode = PRED_SINGLEREF;
  913. }
  914. if (s->s.h.comppredmode != PRED_COMPREF) {
  915. for (i = 0; i < 5; i++) {
  916. if (vpx_rac_get_prob_branchy(&s->c, 252))
  917. s->prob.p.single_ref[i][0] =
  918. update_prob(&s->c, s->prob.p.single_ref[i][0]);
  919. if (vpx_rac_get_prob_branchy(&s->c, 252))
  920. s->prob.p.single_ref[i][1] =
  921. update_prob(&s->c, s->prob.p.single_ref[i][1]);
  922. }
  923. }
  924. if (s->s.h.comppredmode != PRED_SINGLEREF) {
  925. for (i = 0; i < 5; i++)
  926. if (vpx_rac_get_prob_branchy(&s->c, 252))
  927. s->prob.p.comp_ref[i] =
  928. update_prob(&s->c, s->prob.p.comp_ref[i]);
  929. }
  930. for (i = 0; i < 4; i++)
  931. for (j = 0; j < 9; j++)
  932. if (vpx_rac_get_prob_branchy(&s->c, 252))
  933. s->prob.p.y_mode[i][j] =
  934. update_prob(&s->c, s->prob.p.y_mode[i][j]);
  935. for (i = 0; i < 4; i++)
  936. for (j = 0; j < 4; j++)
  937. for (k = 0; k < 3; k++)
  938. if (vpx_rac_get_prob_branchy(&s->c, 252))
  939. s->prob.p.partition[3 - i][j][k] =
  940. update_prob(&s->c,
  941. s->prob.p.partition[3 - i][j][k]);
  942. // mv fields don't use the update_prob subexp model for some reason
  943. for (i = 0; i < 3; i++)
  944. if (vpx_rac_get_prob_branchy(&s->c, 252))
  945. s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  946. for (i = 0; i < 2; i++) {
  947. if (vpx_rac_get_prob_branchy(&s->c, 252))
  948. s->prob.p.mv_comp[i].sign =
  949. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  950. for (j = 0; j < 10; j++)
  951. if (vpx_rac_get_prob_branchy(&s->c, 252))
  952. s->prob.p.mv_comp[i].classes[j] =
  953. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  954. if (vpx_rac_get_prob_branchy(&s->c, 252))
  955. s->prob.p.mv_comp[i].class0 =
  956. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  957. for (j = 0; j < 10; j++)
  958. if (vpx_rac_get_prob_branchy(&s->c, 252))
  959. s->prob.p.mv_comp[i].bits[j] =
  960. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  961. }
  962. for (i = 0; i < 2; i++) {
  963. for (j = 0; j < 2; j++)
  964. for (k = 0; k < 3; k++)
  965. if (vpx_rac_get_prob_branchy(&s->c, 252))
  966. s->prob.p.mv_comp[i].class0_fp[j][k] =
  967. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  968. for (j = 0; j < 3; j++)
  969. if (vpx_rac_get_prob_branchy(&s->c, 252))
  970. s->prob.p.mv_comp[i].fp[j] =
  971. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  972. }
  973. if (s->s.h.highprecisionmvs) {
  974. for (i = 0; i < 2; i++) {
  975. if (vpx_rac_get_prob_branchy(&s->c, 252))
  976. s->prob.p.mv_comp[i].class0_hp =
  977. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  978. if (vpx_rac_get_prob_branchy(&s->c, 252))
  979. s->prob.p.mv_comp[i].hp =
  980. (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
  981. }
  982. }
  983. }
  984. return (data2 - data) + size2;
  985. }
  986. static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
  987. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  988. {
  989. const VP9Context *s = td->s;
  990. int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
  991. (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
  992. const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
  993. s->prob.p.partition[bl][c];
  994. enum BlockPartition bp;
  995. ptrdiff_t hbs = 4 >> bl;
  996. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  997. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  998. int bytesperpixel = s->bytesperpixel;
  999. if (bl == BL_8X8) {
  1000. bp = vp89_rac_get_tree(td->c, ff_vp9_partition_tree, p);
  1001. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1002. } else if (col + hbs < s->cols) { // FIXME why not <=?
  1003. if (row + hbs < s->rows) { // FIXME why not <=?
  1004. bp = vp89_rac_get_tree(td->c, ff_vp9_partition_tree, p);
  1005. switch (bp) {
  1006. case PARTITION_NONE:
  1007. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1008. break;
  1009. case PARTITION_H:
  1010. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1011. yoff += hbs * 8 * y_stride;
  1012. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1013. ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
  1014. break;
  1015. case PARTITION_V:
  1016. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1017. yoff += hbs * 8 * bytesperpixel;
  1018. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  1019. ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
  1020. break;
  1021. case PARTITION_SPLIT:
  1022. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1023. decode_sb(td, row, col + hbs, lflvl,
  1024. yoff + 8 * hbs * bytesperpixel,
  1025. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1026. yoff += hbs * 8 * y_stride;
  1027. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1028. decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  1029. decode_sb(td, row + hbs, col + hbs, lflvl,
  1030. yoff + 8 * hbs * bytesperpixel,
  1031. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1032. break;
  1033. default:
  1034. av_assert0(0);
  1035. }
  1036. } else if (vpx_rac_get_prob_branchy(td->c, p[1])) {
  1037. bp = PARTITION_SPLIT;
  1038. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1039. decode_sb(td, row, col + hbs, lflvl,
  1040. yoff + 8 * hbs * bytesperpixel,
  1041. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1042. } else {
  1043. bp = PARTITION_H;
  1044. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1045. }
  1046. } else if (row + hbs < s->rows) { // FIXME why not <=?
  1047. if (vpx_rac_get_prob_branchy(td->c, p[2])) {
  1048. bp = PARTITION_SPLIT;
  1049. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1050. yoff += hbs * 8 * y_stride;
  1051. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1052. decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  1053. } else {
  1054. bp = PARTITION_V;
  1055. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
  1056. }
  1057. } else {
  1058. bp = PARTITION_SPLIT;
  1059. decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1060. }
  1061. td->counts.partition[bl][c][bp]++;
  1062. }
  1063. static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
  1064. ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
  1065. {
  1066. const VP9Context *s = td->s;
  1067. VP9Block *b = td->b;
  1068. ptrdiff_t hbs = 4 >> bl;
  1069. AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
  1070. ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
  1071. int bytesperpixel = s->bytesperpixel;
  1072. if (bl == BL_8X8) {
  1073. av_assert2(b->bl == BL_8X8);
  1074. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  1075. } else if (td->b->bl == bl) {
  1076. ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
  1077. if (b->bp == PARTITION_H && row + hbs < s->rows) {
  1078. yoff += hbs * 8 * y_stride;
  1079. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1080. ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
  1081. } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
  1082. yoff += hbs * 8 * bytesperpixel;
  1083. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  1084. ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
  1085. }
  1086. } else {
  1087. decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
  1088. if (col + hbs < s->cols) { // FIXME why not <=?
  1089. if (row + hbs < s->rows) {
  1090. decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
  1091. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1092. yoff += hbs * 8 * y_stride;
  1093. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1094. decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  1095. decode_sb_mem(td, row + hbs, col + hbs, lflvl,
  1096. yoff + 8 * hbs * bytesperpixel,
  1097. uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
  1098. } else {
  1099. yoff += hbs * 8 * bytesperpixel;
  1100. uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
  1101. decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
  1102. }
  1103. } else if (row + hbs < s->rows) {
  1104. yoff += hbs * 8 * y_stride;
  1105. uvoff += hbs * 8 * uv_stride >> s->ss_v;
  1106. decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
  1107. }
  1108. }
  1109. }
  1110. static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
  1111. {
  1112. int sb_start = ( idx * n) >> log2_n;
  1113. int sb_end = ((idx + 1) * n) >> log2_n;
  1114. *start = FFMIN(sb_start, n) << 3;
  1115. *end = FFMIN(sb_end, n) << 3;
  1116. }
  1117. static void free_buffers(VP9Context *s)
  1118. {
  1119. int i;
  1120. av_freep(&s->intra_pred_data[0]);
  1121. for (i = 0; i < s->active_tile_cols; i++)
  1122. vp9_tile_data_free(&s->td[i]);
  1123. }
  1124. static av_cold int vp9_decode_free(AVCodecContext *avctx)
  1125. {
  1126. VP9Context *s = avctx->priv_data;
  1127. int i;
  1128. for (i = 0; i < 3; i++) {
  1129. vp9_frame_unref(avctx, &s->s.frames[i]);
  1130. av_frame_free(&s->s.frames[i].tf.f);
  1131. }
  1132. av_buffer_pool_uninit(&s->frame_extradata_pool);
  1133. for (i = 0; i < 8; i++) {
  1134. ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
  1135. av_frame_free(&s->s.refs[i].f);
  1136. ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
  1137. av_frame_free(&s->next_refs[i].f);
  1138. }
  1139. free_buffers(s);
  1140. #if HAVE_THREADS
  1141. av_freep(&s->entries);
  1142. ff_pthread_free(s, vp9_context_offsets);
  1143. #endif
  1144. av_freep(&s->td);
  1145. return 0;
  1146. }
  1147. static int decode_tiles(AVCodecContext *avctx,
  1148. const uint8_t *data, int size)
  1149. {
  1150. VP9Context *s = avctx->priv_data;
  1151. VP9TileData *td = &s->td[0];
  1152. int row, col, tile_row, tile_col, ret;
  1153. int bytesperpixel;
  1154. int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
  1155. AVFrame *f;
  1156. ptrdiff_t yoff, uvoff, ls_y, ls_uv;
  1157. f = s->s.frames[CUR_FRAME].tf.f;
  1158. ls_y = f->linesize[0];
  1159. ls_uv =f->linesize[1];
  1160. bytesperpixel = s->bytesperpixel;
  1161. yoff = uvoff = 0;
  1162. for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
  1163. set_tile_offset(&tile_row_start, &tile_row_end,
  1164. tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
  1165. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  1166. int64_t tile_size;
  1167. if (tile_col == s->s.h.tiling.tile_cols - 1 &&
  1168. tile_row == s->s.h.tiling.tile_rows - 1) {
  1169. tile_size = size;
  1170. } else {
  1171. tile_size = AV_RB32(data);
  1172. data += 4;
  1173. size -= 4;
  1174. }
  1175. if (tile_size > size) {
  1176. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1177. return AVERROR_INVALIDDATA;
  1178. }
  1179. ret = ff_vpx_init_range_decoder(&td->c_b[tile_col], data, tile_size);
  1180. if (ret < 0)
  1181. return ret;
  1182. if (vpx_rac_get_prob_branchy(&td->c_b[tile_col], 128)) { // marker bit
  1183. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1184. return AVERROR_INVALIDDATA;
  1185. }
  1186. data += tile_size;
  1187. size -= tile_size;
  1188. }
  1189. for (row = tile_row_start; row < tile_row_end;
  1190. row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
  1191. VP9Filter *lflvl_ptr = s->lflvl;
  1192. ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
  1193. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  1194. set_tile_offset(&tile_col_start, &tile_col_end,
  1195. tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
  1196. td->tile_col_start = tile_col_start;
  1197. if (s->pass != 2) {
  1198. memset(td->left_partition_ctx, 0, 8);
  1199. memset(td->left_skip_ctx, 0, 8);
  1200. if (s->s.h.keyframe || s->s.h.intraonly) {
  1201. memset(td->left_mode_ctx, DC_PRED, 16);
  1202. } else {
  1203. memset(td->left_mode_ctx, NEARESTMV, 8);
  1204. }
  1205. memset(td->left_y_nnz_ctx, 0, 16);
  1206. memset(td->left_uv_nnz_ctx, 0, 32);
  1207. memset(td->left_segpred_ctx, 0, 8);
  1208. td->c = &td->c_b[tile_col];
  1209. }
  1210. for (col = tile_col_start;
  1211. col < tile_col_end;
  1212. col += 8, yoff2 += 64 * bytesperpixel,
  1213. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1214. // FIXME integrate with lf code (i.e. zero after each
  1215. // use, similar to invtxfm coefficients, or similar)
  1216. if (s->pass != 1) {
  1217. memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
  1218. }
  1219. if (s->pass == 2) {
  1220. decode_sb_mem(td, row, col, lflvl_ptr,
  1221. yoff2, uvoff2, BL_64X64);
  1222. } else {
  1223. if (vpx_rac_is_end(td->c)) {
  1224. return AVERROR_INVALIDDATA;
  1225. }
  1226. decode_sb(td, row, col, lflvl_ptr,
  1227. yoff2, uvoff2, BL_64X64);
  1228. }
  1229. }
  1230. }
  1231. if (s->pass == 1)
  1232. continue;
  1233. // backup pre-loopfilter reconstruction data for intra
  1234. // prediction of next row of sb64s
  1235. if (row + 8 < s->rows) {
  1236. memcpy(s->intra_pred_data[0],
  1237. f->data[0] + yoff + 63 * ls_y,
  1238. 8 * s->cols * bytesperpixel);
  1239. memcpy(s->intra_pred_data[1],
  1240. f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1241. 8 * s->cols * bytesperpixel >> s->ss_h);
  1242. memcpy(s->intra_pred_data[2],
  1243. f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1244. 8 * s->cols * bytesperpixel >> s->ss_h);
  1245. }
  1246. // loopfilter one row
  1247. if (s->s.h.filter.level) {
  1248. yoff2 = yoff;
  1249. uvoff2 = uvoff;
  1250. lflvl_ptr = s->lflvl;
  1251. for (col = 0; col < s->cols;
  1252. col += 8, yoff2 += 64 * bytesperpixel,
  1253. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1254. ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
  1255. yoff2, uvoff2);
  1256. }
  1257. }
  1258. // FIXME maybe we can make this more finegrained by running the
  1259. // loopfilter per-block instead of after each sbrow
  1260. // In fact that would also make intra pred left preparation easier?
  1261. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
  1262. }
  1263. }
  1264. return 0;
  1265. }
  1266. #if HAVE_THREADS
  1267. static av_always_inline
  1268. int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
  1269. int threadnr)
  1270. {
  1271. VP9Context *s = avctx->priv_data;
  1272. VP9TileData *td = &s->td[jobnr];
  1273. ptrdiff_t uvoff, yoff, ls_y, ls_uv;
  1274. int bytesperpixel = s->bytesperpixel, row, col, tile_row;
  1275. unsigned tile_cols_len;
  1276. int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
  1277. VP9Filter *lflvl_ptr_base;
  1278. AVFrame *f;
  1279. f = s->s.frames[CUR_FRAME].tf.f;
  1280. ls_y = f->linesize[0];
  1281. ls_uv =f->linesize[1];
  1282. set_tile_offset(&tile_col_start, &tile_col_end,
  1283. jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
  1284. td->tile_col_start = tile_col_start;
  1285. uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
  1286. yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
  1287. lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
  1288. for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
  1289. set_tile_offset(&tile_row_start, &tile_row_end,
  1290. tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
  1291. td->c = &td->c_b[tile_row];
  1292. for (row = tile_row_start; row < tile_row_end;
  1293. row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
  1294. ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
  1295. VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
  1296. memset(td->left_partition_ctx, 0, 8);
  1297. memset(td->left_skip_ctx, 0, 8);
  1298. if (s->s.h.keyframe || s->s.h.intraonly) {
  1299. memset(td->left_mode_ctx, DC_PRED, 16);
  1300. } else {
  1301. memset(td->left_mode_ctx, NEARESTMV, 8);
  1302. }
  1303. memset(td->left_y_nnz_ctx, 0, 16);
  1304. memset(td->left_uv_nnz_ctx, 0, 32);
  1305. memset(td->left_segpred_ctx, 0, 8);
  1306. for (col = tile_col_start;
  1307. col < tile_col_end;
  1308. col += 8, yoff2 += 64 * bytesperpixel,
  1309. uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1310. // FIXME integrate with lf code (i.e. zero after each
  1311. // use, similar to invtxfm coefficients, or similar)
  1312. memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
  1313. decode_sb(td, row, col, lflvl_ptr,
  1314. yoff2, uvoff2, BL_64X64);
  1315. }
  1316. // backup pre-loopfilter reconstruction data for intra
  1317. // prediction of next row of sb64s
  1318. tile_cols_len = tile_col_end - tile_col_start;
  1319. if (row + 8 < s->rows) {
  1320. memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
  1321. f->data[0] + yoff + 63 * ls_y,
  1322. 8 * tile_cols_len * bytesperpixel);
  1323. memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
  1324. f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1325. 8 * tile_cols_len * bytesperpixel >> s->ss_h);
  1326. memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
  1327. f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
  1328. 8 * tile_cols_len * bytesperpixel >> s->ss_h);
  1329. }
  1330. vp9_report_tile_progress(s, row >> 3, 1);
  1331. }
  1332. }
  1333. return 0;
  1334. }
  1335. static av_always_inline
  1336. int loopfilter_proc(AVCodecContext *avctx)
  1337. {
  1338. VP9Context *s = avctx->priv_data;
  1339. ptrdiff_t uvoff, yoff, ls_y, ls_uv;
  1340. VP9Filter *lflvl_ptr;
  1341. int bytesperpixel = s->bytesperpixel, col, i;
  1342. AVFrame *f;
  1343. f = s->s.frames[CUR_FRAME].tf.f;
  1344. ls_y = f->linesize[0];
  1345. ls_uv =f->linesize[1];
  1346. for (i = 0; i < s->sb_rows; i++) {
  1347. vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
  1348. if (s->s.h.filter.level) {
  1349. yoff = (ls_y * 64)*i;
  1350. uvoff = (ls_uv * 64 >> s->ss_v)*i;
  1351. lflvl_ptr = s->lflvl+s->sb_cols*i;
  1352. for (col = 0; col < s->cols;
  1353. col += 8, yoff += 64 * bytesperpixel,
  1354. uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
  1355. ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
  1356. yoff, uvoff);
  1357. }
  1358. }
  1359. }
  1360. return 0;
  1361. }
  1362. #endif
  1363. static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
  1364. {
  1365. AVVideoEncParams *par;
  1366. unsigned int tile, nb_blocks = 0;
  1367. if (s->s.h.segmentation.enabled) {
  1368. for (tile = 0; tile < s->active_tile_cols; tile++)
  1369. nb_blocks += s->td[tile].nb_block_structure;
  1370. }
  1371. par = av_video_enc_params_create_side_data(frame->tf.f,
  1372. AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
  1373. if (!par)
  1374. return AVERROR(ENOMEM);
  1375. par->qp = s->s.h.yac_qi;
  1376. par->delta_qp[0][0] = s->s.h.ydc_qdelta;
  1377. par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
  1378. par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
  1379. par->delta_qp[1][1] = s->s.h.uvac_qdelta;
  1380. par->delta_qp[2][1] = s->s.h.uvac_qdelta;
  1381. if (nb_blocks) {
  1382. unsigned int block = 0;
  1383. unsigned int tile, block_tile;
  1384. for (tile = 0; tile < s->active_tile_cols; tile++) {
  1385. VP9TileData *td = &s->td[tile];
  1386. for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
  1387. AVVideoBlockParams *b = av_video_enc_params_block(par, block++);
  1388. unsigned int row = td->block_structure[block_tile].row;
  1389. unsigned int col = td->block_structure[block_tile].col;
  1390. uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
  1391. b->src_x = col * 8;
  1392. b->src_y = row * 8;
  1393. b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
  1394. b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
  1395. if (s->s.h.segmentation.feat[seg_id].q_enabled) {
  1396. b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
  1397. if (s->s.h.segmentation.absolute_vals)
  1398. b->delta_qp -= par->qp;
  1399. }
  1400. }
  1401. }
  1402. }
  1403. return 0;
  1404. }
  1405. static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
  1406. int *got_frame, AVPacket *pkt)
  1407. {
  1408. const uint8_t *data = pkt->data;
  1409. int size = pkt->size;
  1410. VP9Context *s = avctx->priv_data;
  1411. int ret, i, j, ref;
  1412. int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
  1413. (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
  1414. AVFrame *f;
  1415. if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
  1416. return ret;
  1417. } else if (ret == 0) {
  1418. if (!s->s.refs[ref].f->buf[0]) {
  1419. av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
  1420. return AVERROR_INVALIDDATA;
  1421. }
  1422. if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
  1423. return ret;
  1424. frame->pts = pkt->pts;
  1425. frame->pkt_dts = pkt->dts;
  1426. for (i = 0; i < 8; i++) {
  1427. if (s->next_refs[i].f->buf[0])
  1428. ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
  1429. if (s->s.refs[i].f->buf[0] &&
  1430. (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
  1431. return ret;
  1432. }
  1433. *got_frame = 1;
  1434. return pkt->size;
  1435. }
  1436. data += ret;
  1437. size -= ret;
  1438. if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
  1439. if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
  1440. vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
  1441. if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
  1442. (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
  1443. return ret;
  1444. }
  1445. if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
  1446. vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
  1447. if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
  1448. (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
  1449. return ret;
  1450. if (s->s.frames[CUR_FRAME].tf.f->buf[0])
  1451. vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
  1452. if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
  1453. return ret;
  1454. f = s->s.frames[CUR_FRAME].tf.f;
  1455. f->key_frame = s->s.h.keyframe;
  1456. f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  1457. if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
  1458. (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
  1459. s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
  1460. vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
  1461. }
  1462. // ref frame setup
  1463. for (i = 0; i < 8; i++) {
  1464. if (s->next_refs[i].f->buf[0])
  1465. ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
  1466. if (s->s.h.refreshrefmask & (1 << i)) {
  1467. ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
  1468. } else if (s->s.refs[i].f->buf[0]) {
  1469. ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
  1470. }
  1471. if (ret < 0)
  1472. return ret;
  1473. }
  1474. if (avctx->hwaccel) {
  1475. ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
  1476. if (ret < 0)
  1477. return ret;
  1478. ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
  1479. if (ret < 0)
  1480. return ret;
  1481. ret = avctx->hwaccel->end_frame(avctx);
  1482. if (ret < 0)
  1483. return ret;
  1484. goto finish;
  1485. }
  1486. // main tile decode loop
  1487. memset(s->above_partition_ctx, 0, s->cols);
  1488. memset(s->above_skip_ctx, 0, s->cols);
  1489. if (s->s.h.keyframe || s->s.h.intraonly) {
  1490. memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
  1491. } else {
  1492. memset(s->above_mode_ctx, NEARESTMV, s->cols);
  1493. }
  1494. memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
  1495. memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
  1496. memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
  1497. memset(s->above_segpred_ctx, 0, s->cols);
  1498. s->pass = s->s.frames[CUR_FRAME].uses_2pass =
  1499. avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
  1500. if ((ret = update_block_buffers(avctx)) < 0) {
  1501. av_log(avctx, AV_LOG_ERROR,
  1502. "Failed to allocate block buffers\n");
  1503. return ret;
  1504. }
  1505. if (s->s.h.refreshctx && s->s.h.parallelmode) {
  1506. int j, k, l, m;
  1507. for (i = 0; i < 4; i++) {
  1508. for (j = 0; j < 2; j++)
  1509. for (k = 0; k < 2; k++)
  1510. for (l = 0; l < 6; l++)
  1511. for (m = 0; m < 6; m++)
  1512. memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
  1513. s->prob.coef[i][j][k][l][m], 3);
  1514. if (s->s.h.txfmmode == i)
  1515. break;
  1516. }
  1517. s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
  1518. ff_thread_finish_setup(avctx);
  1519. } else if (!s->s.h.refreshctx) {
  1520. ff_thread_finish_setup(avctx);
  1521. }
  1522. #if HAVE_THREADS
  1523. if (avctx->active_thread_type & FF_THREAD_SLICE) {
  1524. for (i = 0; i < s->sb_rows; i++)
  1525. atomic_store(&s->entries[i], 0);
  1526. }
  1527. #endif
  1528. do {
  1529. for (i = 0; i < s->active_tile_cols; i++) {
  1530. s->td[i].b = s->td[i].b_base;
  1531. s->td[i].block = s->td[i].block_base;
  1532. s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
  1533. s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
  1534. s->td[i].eob = s->td[i].eob_base;
  1535. s->td[i].uveob[0] = s->td[i].uveob_base[0];
  1536. s->td[i].uveob[1] = s->td[i].uveob_base[1];
  1537. s->td[i].error_info = 0;
  1538. }
  1539. #if HAVE_THREADS
  1540. if (avctx->active_thread_type == FF_THREAD_SLICE) {
  1541. int tile_row, tile_col;
  1542. av_assert1(!s->pass);
  1543. for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
  1544. for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
  1545. int64_t tile_size;
  1546. if (tile_col == s->s.h.tiling.tile_cols - 1 &&
  1547. tile_row == s->s.h.tiling.tile_rows - 1) {
  1548. tile_size = size;
  1549. } else {
  1550. tile_size = AV_RB32(data);
  1551. data += 4;
  1552. size -= 4;
  1553. }
  1554. if (tile_size > size)
  1555. return AVERROR_INVALIDDATA;
  1556. ret = ff_vpx_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
  1557. if (ret < 0)
  1558. return ret;
  1559. if (vpx_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
  1560. return AVERROR_INVALIDDATA;
  1561. data += tile_size;
  1562. size -= tile_size;
  1563. }
  1564. }
  1565. ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
  1566. } else
  1567. #endif
  1568. {
  1569. ret = decode_tiles(avctx, data, size);
  1570. if (ret < 0) {
  1571. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1572. return ret;
  1573. }
  1574. }
  1575. // Sum all counts fields into td[0].counts for tile threading
  1576. if (avctx->active_thread_type == FF_THREAD_SLICE)
  1577. for (i = 1; i < s->s.h.tiling.tile_cols; i++)
  1578. for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
  1579. ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
  1580. if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
  1581. ff_vp9_adapt_probs(s);
  1582. ff_thread_finish_setup(avctx);
  1583. }
  1584. } while (s->pass++ == 1);
  1585. ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
  1586. if (s->td->error_info < 0) {
  1587. av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
  1588. s->td->error_info = 0;
  1589. return AVERROR_INVALIDDATA;
  1590. }
  1591. if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
  1592. ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
  1593. if (ret < 0)
  1594. return ret;
  1595. }
  1596. finish:
  1597. // ref frame setup
  1598. for (i = 0; i < 8; i++) {
  1599. if (s->s.refs[i].f->buf[0])
  1600. ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
  1601. if (s->next_refs[i].f->buf[0] &&
  1602. (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
  1603. return ret;
  1604. }
  1605. if (!s->s.h.invisible) {
  1606. if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
  1607. return ret;
  1608. *got_frame = 1;
  1609. }
  1610. return pkt->size;
  1611. }
  1612. static void vp9_decode_flush(AVCodecContext *avctx)
  1613. {
  1614. VP9Context *s = avctx->priv_data;
  1615. int i;
  1616. for (i = 0; i < 3; i++)
  1617. vp9_frame_unref(avctx, &s->s.frames[i]);
  1618. for (i = 0; i < 8; i++)
  1619. ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
  1620. }
  1621. static av_cold int vp9_decode_init(AVCodecContext *avctx)
  1622. {
  1623. VP9Context *s = avctx->priv_data;
  1624. int ret;
  1625. s->last_bpp = 0;
  1626. s->s.h.filter.sharpness = -1;
  1627. #if HAVE_THREADS
  1628. if (avctx->active_thread_type & FF_THREAD_SLICE) {
  1629. ret = ff_pthread_init(s, vp9_context_offsets);
  1630. if (ret < 0)
  1631. return ret;
  1632. }
  1633. #endif
  1634. for (int i = 0; i < 3; i++) {
  1635. s->s.frames[i].tf.f = av_frame_alloc();
  1636. if (!s->s.frames[i].tf.f)
  1637. return AVERROR(ENOMEM);
  1638. }
  1639. for (int i = 0; i < 8; i++) {
  1640. s->s.refs[i].f = av_frame_alloc();
  1641. s->next_refs[i].f = av_frame_alloc();
  1642. if (!s->s.refs[i].f || !s->next_refs[i].f)
  1643. return AVERROR(ENOMEM);
  1644. }
  1645. return 0;
  1646. }
  1647. #if HAVE_THREADS
  1648. static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  1649. {
  1650. int i, ret;
  1651. VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
  1652. for (i = 0; i < 3; i++) {
  1653. if (s->s.frames[i].tf.f->buf[0])
  1654. vp9_frame_unref(dst, &s->s.frames[i]);
  1655. if (ssrc->s.frames[i].tf.f->buf[0]) {
  1656. if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
  1657. return ret;
  1658. }
  1659. }
  1660. for (i = 0; i < 8; i++) {
  1661. if (s->s.refs[i].f->buf[0])
  1662. ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
  1663. if (ssrc->next_refs[i].f->buf[0]) {
  1664. if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
  1665. return ret;
  1666. }
  1667. }
  1668. s->s.h.invisible = ssrc->s.h.invisible;
  1669. s->s.h.keyframe = ssrc->s.h.keyframe;
  1670. s->s.h.intraonly = ssrc->s.h.intraonly;
  1671. s->ss_v = ssrc->ss_v;
  1672. s->ss_h = ssrc->ss_h;
  1673. s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
  1674. s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
  1675. s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
  1676. s->bytesperpixel = ssrc->bytesperpixel;
  1677. s->gf_fmt = ssrc->gf_fmt;
  1678. s->w = ssrc->w;
  1679. s->h = ssrc->h;
  1680. s->s.h.bpp = ssrc->s.h.bpp;
  1681. s->bpp_index = ssrc->bpp_index;
  1682. s->pix_fmt = ssrc->pix_fmt;
  1683. memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
  1684. memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
  1685. memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
  1686. sizeof(s->s.h.segmentation.feat));
  1687. return 0;
  1688. }
  1689. #endif
  1690. const FFCodec ff_vp9_decoder = {
  1691. .p.name = "vp9",
  1692. .p.long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
  1693. .p.type = AVMEDIA_TYPE_VIDEO,
  1694. .p.id = AV_CODEC_ID_VP9,
  1695. .priv_data_size = sizeof(VP9Context),
  1696. .init = vp9_decode_init,
  1697. .close = vp9_decode_free,
  1698. FF_CODEC_DECODE_CB(vp9_decode_frame),
  1699. .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
  1700. .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
  1701. FF_CODEC_CAP_SLICE_THREAD_HAS_MF |
  1702. FF_CODEC_CAP_ALLOCATE_PROGRESS,
  1703. .flush = vp9_decode_flush,
  1704. .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
  1705. .p.profiles = NULL_IF_CONFIG_SMALL(ff_vp9_profiles),
  1706. .bsfs = "vp9_superframe_split",
  1707. .hw_configs = (const AVCodecHWConfigInternal *const []) {
  1708. #if CONFIG_VP9_DXVA2_HWACCEL
  1709. HWACCEL_DXVA2(vp9),
  1710. #endif
  1711. #if CONFIG_VP9_D3D11VA_HWACCEL
  1712. HWACCEL_D3D11VA(vp9),
  1713. #endif
  1714. #if CONFIG_VP9_D3D11VA2_HWACCEL
  1715. HWACCEL_D3D11VA2(vp9),
  1716. #endif
  1717. #if CONFIG_VP9_NVDEC_HWACCEL
  1718. HWACCEL_NVDEC(vp9),
  1719. #endif
  1720. #if CONFIG_VP9_VAAPI_HWACCEL
  1721. HWACCEL_VAAPI(vp9),
  1722. #endif
  1723. #if CONFIG_VP9_VDPAU_HWACCEL
  1724. HWACCEL_VDPAU(vp9),
  1725. #endif
  1726. #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
  1727. HWACCEL_VIDEOTOOLBOX(vp9),
  1728. #endif
  1729. NULL
  1730. },
  1731. };