Browse Source

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  vorbis: Validate that the floor 1 X values contain no duplicates.
  avprobe: Identify codec probe failures rather than calling them unsupported codecs.
  avformat: Probe codecs at score 0 on buffer exhaustion conditions.
  avformat: Factorize codec probing.
  Indeo Audio decoder
  imc: make IMDCT support stereo output
  imc: move channel-specific data into separate context
  lavfi: remove request/poll and drawing functions from public API on next bump
  lavfi: make avfilter_insert_pad and pals private on next bump.
  lavfi: make formats API private on next bump.
  avplay: use buffersrc instead of custom input filter.
  avtools: move buffer management code from avconv to cmdutils.
  avconv: don't use InputStream in the buffer management code.
  avconv: fix exiting when max frames is reached.
  mpc8: fix maximum bands handling
  aacdec: Turn PS off when switching to stereo and turn it to implicit when switching to mono.

Conflicts:
	Changelog
	cmdutils.h
	ffmpeg.c
	ffplay.c
	ffprobe.c
	libavcodec/avcodec.h
	libavcodec/mpc8.c
	libavcodec/v210dec.h
	libavcodec/version.h
	libavcodec/vorbisdec.c
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/buffersrc.c
	libavfilter/formats.c
	libavfilter/src_movie.c
	libavfilter/vf_aspect.c
	libavfilter/vf_blackframe.c
	libavfilter/vf_boxblur.c
	libavfilter/vf_crop.c
	libavfilter/vf_cropdetect.c
	libavfilter/vf_delogo.c
	libavfilter/vf_drawbox.c
	libavfilter/vf_drawtext.c
	libavfilter/vf_fade.c
	libavfilter/vf_fifo.c
	libavfilter/vf_format.c
	libavfilter/vf_frei0r.c
	libavfilter/vf_gradfun.c
	libavfilter/vf_hflip.c
	libavfilter/vf_hqdn3d.c
	libavfilter/vf_libopencv.c
	libavfilter/vf_lut.c
	libavfilter/vf_overlay.c
	libavfilter/vf_pad.c
	libavfilter/vf_scale.c
	libavfilter/vf_select.c
	libavfilter/vf_showinfo.c
	libavfilter/vf_transpose.c
	libavfilter/vf_unsharp.c
	libavfilter/vf_yadif.c
	libavfilter/vsrc_color.c
	libavfilter/vsrc_testsrc.c
	libavformat/utils.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
Michael Niedermayer 12 years ago
parent
commit
ad60b3b181
10 changed files with 281 additions and 398 deletions
  1. 1 0
      Changelog
  2. 142 0
      cmdutils.c
  3. 42 0
      cmdutils.h
  4. 1 0
      doc/general.texi
  5. 2 154
      ffmpeg.c
  6. 83 243
      ffplay.c
  7. 5 1
      ffprobe.c
  8. 1 0
      libavcodec/Makefile
  9. 3 0
      libavcodec/aacdec.c
  10. 1 0
      libavcodec/allcodecs.c

+ 1 - 0
Changelog

@@ -4,6 +4,7 @@ releases are sorted from youngest to oldest.
 version next:
 - INI and flat output in ffprobe
 - Scene detection in libavfilter
+- Indeo Audio decoder
 
 
 version 0.11:

+ 142 - 0
cmdutils.c

@@ -41,6 +41,7 @@
 #include "libavutil/avassert.h"
 #include "libavutil/avstring.h"
 #include "libavutil/mathematics.h"
+#include "libavutil/imgutils.h"
 #include "libavutil/parseutils.h"
 #include "libavutil/pixdesc.h"
 #include "libavutil/eval.h"
@@ -1222,3 +1223,144 @@ void *grow_array(void *array, int elem_size, int *size, int new_size)
     }
     return array;
 }
+
+static int alloc_buffer(FrameBuffer **pool, AVCodecContext *s, FrameBuffer **pbuf)
+{
+    FrameBuffer  *buf = av_mallocz(sizeof(*buf));
+    int i, ret;
+    const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
+    int h_chroma_shift, v_chroma_shift;
+    int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
+    int w = s->width, h = s->height;
+
+    if (!buf)
+        return AVERROR(ENOMEM);
+
+    avcodec_align_dimensions(s, &w, &h);
+
+    if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
+        w += 2*edge;
+        h += 2*edge;
+    }
+
+    if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
+                              s->pix_fmt, 32)) < 0) {
+        av_freep(&buf);
+        return ret;
+    }
+    /* XXX this shouldn't be needed, but some tests break without this line
+     * those decoders are buggy and need to be fixed.
+     * the following tests fail:
+     * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
+     */
+    memset(buf->base[0], 128, ret);
+
+    avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
+    for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
+        const int h_shift = i==0 ? 0 : h_chroma_shift;
+        const int v_shift = i==0 ? 0 : v_chroma_shift;
+        if ((s->flags & CODEC_FLAG_EMU_EDGE) || !buf->linesize[1] || !buf->base[i])
+            buf->data[i] = buf->base[i];
+        else
+            buf->data[i] = buf->base[i] +
+                           FFALIGN((buf->linesize[i]*edge >> v_shift) +
+                                   (pixel_size*edge >> h_shift), 32);
+    }
+    buf->w       = s->width;
+    buf->h       = s->height;
+    buf->pix_fmt = s->pix_fmt;
+    buf->pool    = pool;
+
+    *pbuf = buf;
+    return 0;
+}
+
+int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
+{
+    FrameBuffer **pool = s->opaque;
+    FrameBuffer *buf;
+    int ret, i;
+
+    if(av_image_check_size(s->width, s->height, 0, s) || s->pix_fmt<0)
+        return -1;
+
+    if (!*pool && (ret = alloc_buffer(pool, s, pool)) < 0)
+        return ret;
+
+    buf              = *pool;
+    *pool            = buf->next;
+    buf->next        = NULL;
+    if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
+        av_freep(&buf->base[0]);
+        av_free(buf);
+        if ((ret = alloc_buffer(pool, s, &buf)) < 0)
+            return ret;
+    }
+    av_assert0(!buf->refcount);
+    buf->refcount++;
+
+    frame->opaque        = buf;
+    frame->type          = FF_BUFFER_TYPE_USER;
+    frame->extended_data = frame->data;
+    frame->pkt_pts       = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
+    frame->width         = buf->w;
+    frame->height        = buf->h;
+    frame->format        = buf->pix_fmt;
+    frame->sample_aspect_ratio = s->sample_aspect_ratio;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
+        frame->base[i]     = buf->base[i];  // XXX h264.c uses base though it shouldn't
+        frame->data[i]     = buf->data[i];
+        frame->linesize[i] = buf->linesize[i];
+    }
+
+    return 0;
+}
+
+static void unref_buffer(FrameBuffer *buf)
+{
+    FrameBuffer **pool = buf->pool;
+
+    av_assert0(buf->refcount > 0);
+    buf->refcount--;
+    if (!buf->refcount) {
+        FrameBuffer *tmp;
+        for(tmp= *pool; tmp; tmp= tmp->next)
+            av_assert1(tmp != buf);
+
+        buf->next = *pool;
+        *pool = buf;
+    }
+}
+
+void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
+{
+    FrameBuffer *buf = frame->opaque;
+    int i;
+
+    if(frame->type!=FF_BUFFER_TYPE_USER)
+        return avcodec_default_release_buffer(s, frame);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
+        frame->data[i] = NULL;
+
+    unref_buffer(buf);
+}
+
+void filter_release_buffer(AVFilterBuffer *fb)
+{
+    FrameBuffer *buf = fb->priv;
+    av_free(fb);
+    unref_buffer(buf);
+}
+
+void free_buffer_pool(FrameBuffer **pool)
+{
+    FrameBuffer *buf = *pool;
+    while (buf) {
+        *pool = buf->next;
+        av_freep(&buf->base[0]);
+        av_free(buf);
+        buf = *pool;
+    }
+}

+ 42 - 0
cmdutils.h

@@ -386,4 +386,46 @@ void exit_program(int ret);
  */
 void *grow_array(void *array, int elem_size, int *size, int new_size);
 
+typedef struct FrameBuffer {
+    uint8_t *base[4];
+    uint8_t *data[4];
+    int  linesize[4];
+
+    int h, w;
+    enum PixelFormat pix_fmt;
+
+    int refcount;
+    struct FrameBuffer **pool;  ///< head of the buffer pool
+    struct FrameBuffer *next;
+} FrameBuffer;
+
+/**
+ * Get a frame from the pool. This is intended to be used as a callback for
+ * AVCodecContext.get_buffer.
+ *
+ * @param s codec context. s->opaque must be a pointer to the head of the
+ *          buffer pool.
+ * @param frame frame->opaque will be set to point to the FrameBuffer
+ *              containing the frame data.
+ */
+int codec_get_buffer(AVCodecContext *s, AVFrame *frame);
+
+/**
+ * A callback to be used for AVCodecContext.release_buffer along with
+ * codec_get_buffer().
+ */
+void codec_release_buffer(AVCodecContext *s, AVFrame *frame);
+
+/**
+ * A callback to be used for AVFilterBuffer.free.
+ * @param fb buffer to free. fb->priv must be a pointer to the FrameBuffer
+ *           containing the buffer data.
+ */
+void filter_release_buffer(AVFilterBuffer *fb);
+
+/**
+ * Free all the buffers in the pool. This must be called after all the
+ * buffers have been released.
+ */
+void free_buffer_pool(FrameBuffer **pool);
 #endif /* CMDUTILS_H */

+ 1 - 0
doc/general.texi

@@ -748,6 +748,7 @@ following image formats are supported:
     @tab encoding supported through external library libgsm
 @item GSM Microsoft variant  @tab  E  @tab  X
     @tab encoding supported through external library libgsm
+@item IAC (Indeo Audio Coder)  @tab     @tab  X
 @item IMC (Intel Music Coder)  @tab     @tab  X
 @item MACE (Macintosh Audio Compression/Expansion) 3:1  @tab     @tab  X
 @item MACE (Macintosh Audio Compression/Expansion) 6:1  @tab     @tab  X

+ 2 - 154
ffmpeg.c

@@ -201,19 +201,6 @@ typedef struct FilterGraph {
     int         nb_outputs;
 } FilterGraph;
 
-typedef struct FrameBuffer {
-    uint8_t *base[4];
-    uint8_t *data[4];
-    int  linesize[4];
-
-    int h, w;
-    enum PixelFormat pix_fmt;
-
-    int refcount;
-    struct InputStream *ist;
-    struct FrameBuffer *next;
-} FrameBuffer;
-
 typedef struct InputStream {
     int file_index;
     AVStream *st;
@@ -534,145 +521,6 @@ static void reset_options(OptionsContext *o, int is_input)
     init_opts();
 }
 
-static int alloc_buffer(InputStream *ist, AVCodecContext *s, FrameBuffer **pbuf)
-{
-    FrameBuffer  *buf = av_mallocz(sizeof(*buf));
-    int i, ret;
-    const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
-    int h_chroma_shift, v_chroma_shift;
-    int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
-    int w = s->width, h = s->height;
-
-    if (!buf)
-        return AVERROR(ENOMEM);
-
-    avcodec_align_dimensions(s, &w, &h);
-
-    if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
-        w += 2*edge;
-        h += 2*edge;
-    }
-
-    if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
-                              s->pix_fmt, 32)) < 0) {
-        av_freep(&buf);
-        return ret;
-    }
-    /* XXX this shouldn't be needed, but some tests break without this line
-     * those decoders are buggy and need to be fixed.
-     * the following tests fail:
-     * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
-     */
-    memset(buf->base[0], 128, ret);
-
-    avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
-    for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
-        const int h_shift = i==0 ? 0 : h_chroma_shift;
-        const int v_shift = i==0 ? 0 : v_chroma_shift;
-        if ((s->flags & CODEC_FLAG_EMU_EDGE) || !buf->linesize[1] || !buf->base[i])
-            buf->data[i] = buf->base[i];
-        else
-            buf->data[i] = buf->base[i] +
-                           FFALIGN((buf->linesize[i]*edge >> v_shift) +
-                                   (pixel_size*edge >> h_shift), 32);
-    }
-    buf->w       = s->width;
-    buf->h       = s->height;
-    buf->pix_fmt = s->pix_fmt;
-    buf->ist     = ist;
-
-    *pbuf = buf;
-    return 0;
-}
-
-static void free_buffer_pool(InputStream *ist)
-{
-    FrameBuffer *buf = ist->buffer_pool;
-    while (buf) {
-        ist->buffer_pool = buf->next;
-        av_freep(&buf->base[0]);
-        av_free(buf);
-        buf = ist->buffer_pool;
-    }
-}
-
-static void unref_buffer(InputStream *ist, FrameBuffer *buf)
-{
-    av_assert0(buf->refcount > 0);
-    buf->refcount--;
-    if (!buf->refcount) {
-        FrameBuffer *tmp;
-        for(tmp= ist->buffer_pool; tmp; tmp= tmp->next)
-            av_assert1(tmp != buf);
-        buf->next = ist->buffer_pool;
-        ist->buffer_pool = buf;
-    }
-}
-
-static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
-{
-    InputStream *ist = s->opaque;
-    FrameBuffer *buf;
-    int ret, i;
-
-    if(av_image_check_size(s->width, s->height, 0, s) || s->pix_fmt<0)
-        return -1;
-
-    if (!ist->buffer_pool && (ret = alloc_buffer(ist, s, &ist->buffer_pool)) < 0)
-        return ret;
-
-    buf              = ist->buffer_pool;
-    ist->buffer_pool = buf->next;
-    buf->next        = NULL;
-    if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
-        av_freep(&buf->base[0]);
-        av_free(buf);
-        if ((ret = alloc_buffer(ist, s, &buf)) < 0)
-            return ret;
-    }
-    av_assert0(!buf->refcount);
-    buf->refcount++;
-
-    frame->opaque        = buf;
-    frame->type          = FF_BUFFER_TYPE_USER;
-    frame->extended_data = frame->data;
-    frame->pkt_pts       = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
-    frame->width         = buf->w;
-    frame->height        = buf->h;
-    frame->format        = buf->pix_fmt;
-    frame->sample_aspect_ratio = s->sample_aspect_ratio;
-
-    for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
-        frame->base[i]     = buf->base[i];  // XXX h264.c uses base though it shouldn't
-        frame->data[i]     = buf->data[i];
-        frame->linesize[i] = buf->linesize[i];
-    }
-
-    return 0;
-}
-
-static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
-{
-    InputStream *ist = s->opaque;
-    FrameBuffer *buf = frame->opaque;
-    int i;
-
-    if(frame->type!=FF_BUFFER_TYPE_USER)
-        return avcodec_default_release_buffer(s, frame);
-
-    for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
-        frame->data[i] = NULL;
-
-    unref_buffer(ist, buf);
-}
-
-static void filter_release_buffer(AVFilterBuffer *fb)
-{
-    FrameBuffer *buf = fb->priv;
-    av_free(fb);
-    unref_buffer(buf->ist, buf);
-}
-
 static enum PixelFormat choose_pixel_fmt(AVStream *st, AVCodec *codec, enum PixelFormat target)
 {
     if (codec && codec->pix_fmts) {
@@ -1508,7 +1356,7 @@ void av_noreturn exit_program(int ret)
     for (i = 0; i < nb_input_streams; i++) {
         av_freep(&input_streams[i]->decoded_frame);
         av_dict_free(&input_streams[i]->opts);
-        free_buffer_pool(input_streams[i]);
+        free_buffer_pool(&input_streams[i]->buffer_pool);
         av_freep(&input_streams[i]->filters);
         av_freep(&input_streams[i]);
     }
@@ -2845,7 +2693,7 @@ static int init_input_stream(int ist_index, char *error, int error_len)
         if (codec->type == AVMEDIA_TYPE_VIDEO && ist->dr1) {
             ist->st->codec->get_buffer     = codec_get_buffer;
             ist->st->codec->release_buffer = codec_release_buffer;
-            ist->st->codec->opaque         = ist;
+            ist->st->codec->opaque         = &ist->buffer_pool;
         }
 
         if (!av_dict_get(ist->opts, "threads", NULL, 0))

+ 83 - 243
ffplay.c

@@ -49,6 +49,7 @@
 # include "libavfilter/avfilter.h"
 # include "libavfilter/avfiltergraph.h"
 # include "libavfilter/buffersink.h"
+# include "libavfilter/buffersrc.h"
 #endif
 
 #include <SDL.h>
@@ -227,7 +228,10 @@ typedef struct VideoState {
     int step;
 
 #if CONFIG_AVFILTER
+    AVFilterContext *in_video_filter;           ///< the first filter in the video chain
     AVFilterContext *out_video_filter;          ///< the last filter in the video chain
+    int use_dr1;
+    FrameBuffer *buffer_pool;
 #endif
 
     int refresh;
@@ -1545,222 +1549,29 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
 }
 
 #if CONFIG_AVFILTER
-typedef struct {
-    VideoState *is;
-    AVFrame *frame;
-    int use_dr1;
-} FilterPriv;
-
-static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
-{
-    AVFilterContext *ctx = codec->opaque;
-    AVFilterBufferRef  *ref;
-    int perms = AV_PERM_WRITE;
-    int i, w, h, stride[AV_NUM_DATA_POINTERS];
-    unsigned edge;
-    int pixel_size;
-
-    av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
-
-    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
-        perms |= AV_PERM_NEG_LINESIZES;
-
-    if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
-        if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
-        if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
-        if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
-    }
-    if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
-
-    w = codec->width;
-    h = codec->height;
-
-    if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
-        return -1;
-
-    avcodec_align_dimensions2(codec, &w, &h, stride);
-    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
-    w += edge << 1;
-    h += edge << 1;
-    if (codec->pix_fmt != ctx->outputs[0]->format) {
-        av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
-        return -1;
-    }
-    if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
-        return -1;
-
-    pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
-    ref->video->w = codec->width;
-    ref->video->h = codec->height;
-    for (i = 0; i < 4; i ++) {
-        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
-        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
-
-        pic->base[i]     = ref->data[i];
-        if (ref->data[i]) {
-            ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
-        }
-        pic->data[i]     = ref->data[i];
-        pic->linesize[i] = ref->linesize[i];
-    }
-    pic->opaque = ref;
-    pic->type   = FF_BUFFER_TYPE_USER;
-    pic->reordered_opaque = codec->reordered_opaque;
-    pic->width               = codec->width;
-    pic->height              = codec->height;
-    pic->format              = codec->pix_fmt;
-    pic->sample_aspect_ratio = codec->sample_aspect_ratio;
-    if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
-    else            pic->pkt_pts = AV_NOPTS_VALUE;
-    return 0;
-}
-
-static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
-{
-    memset(pic->data, 0, sizeof(pic->data));
-    avfilter_unref_buffer(pic->opaque);
-}
-
-static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
-{
-    AVFilterBufferRef *ref = pic->opaque;
-
-    if (pic->data[0] == NULL) {
-        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
-        return codec->get_buffer(codec, pic);
-    }
-
-    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
-        (codec->pix_fmt != ref->format)) {
-        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
-        return -1;
-    }
-
-    pic->reordered_opaque = codec->reordered_opaque;
-    if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
-    else            pic->pkt_pts = AV_NOPTS_VALUE;
-    return 0;
-}
-
-static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
-{
-    FilterPriv *priv = ctx->priv;
-    AVCodecContext *codec;
-    if (!opaque) return -1;
-
-    priv->is = opaque;
-    codec    = priv->is->video_st->codec;
-    codec->opaque = ctx;
-    if (codec->codec->capabilities & CODEC_CAP_DR1) {
-        av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
-        priv->use_dr1 = 1;
-        codec->get_buffer     = input_get_buffer;
-        codec->release_buffer = input_release_buffer;
-        codec->reget_buffer   = input_reget_buffer;
-        codec->thread_safe_callbacks = 1;
-    }
-
-    priv->frame = avcodec_alloc_frame();
-
-    return 0;
-}
-
-static void input_uninit(AVFilterContext *ctx)
-{
-    FilterPriv *priv = ctx->priv;
-    av_free(priv->frame);
-}
-
-static int input_request_frame(AVFilterLink *link)
-{
-    FilterPriv *priv = link->src->priv;
-    AVFilterBufferRef *picref;
-    int64_t pts = 0;
-    AVPacket pkt;
-    int ret;
-
-    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
-        av_free_packet(&pkt);
-    if (ret < 0)
-        return -1;
-
-    if (priv->use_dr1 && priv->frame->opaque) {
-        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
-    } else {
-        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);
-        av_image_copy(picref->data, picref->linesize,
-                      (const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
-                      picref->format, priv->frame->width, priv->frame->height);
-    }
-    av_free_packet(&pkt);
-
-    avfilter_copy_frame_props(picref, priv->frame);
-    picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
-    picref->pts = pts;
-
-    avfilter_start_frame(link, picref);
-    avfilter_draw_slice(link, 0, picref->video->h, 1);
-    avfilter_end_frame(link);
-
-    return 0;
-}
-
-static int input_query_formats(AVFilterContext *ctx)
-{
-    FilterPriv *priv = ctx->priv;
-    enum PixelFormat pix_fmts[] = {
-        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
-    };
-
-    avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
-    return 0;
-}
-
-static int input_config_props(AVFilterLink *link)
-{
-    FilterPriv *priv  = link->src->priv;
-    AVStream *s = priv->is->video_st;
-
-    link->w = s->codec->width;
-    link->h = s->codec->height;
-    link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
-        s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
-    link->time_base = s->time_base;
-
-    return 0;
-}
-
-static AVFilter input_filter =
-{
-    .name      = "ffplay_input",
-
-    .priv_size = sizeof(FilterPriv),
-
-    .init      = input_init,
-    .uninit    = input_uninit,
-
-    .query_formats = input_query_formats,
-
-    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
-    .outputs   = (AVFilterPad[]) {{ .name = "default",
-                                    .type = AVMEDIA_TYPE_VIDEO,
-                                    .request_frame = input_request_frame,
-                                    .config_props  = input_config_props, },
-                                  { .name = NULL }},
-};
-
 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
 {
     static const enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
     char sws_flags_str[128];
+    char buffersrc_args[256];
     int ret;
     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
-    AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;
+    AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
+    AVCodecContext *codec = is->video_st->codec;
+
     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
     graph->scale_sws_opts = av_strdup(sws_flags_str);
 
-    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
-                                            NULL, is, graph)) < 0)
+    snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
+             codec->width, codec->height, codec->pix_fmt,
+             is->video_st->time_base.num, is->video_st->time_base.den,
+             codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
+
+
+    if ((ret = avfilter_graph_create_filter(&filt_src,
+                                            avfilter_get_by_name("buffer"),
+                                            "src", buffersrc_args, NULL,
+                                            graph)) < 0)
         return ret;
 
 #if FF_API_OLD_VSINK_API
@@ -1809,8 +1620,16 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
         return ret;
 
+    is->in_video_filter  = filt_src;
     is->out_video_filter = filt_out;
 
+    if (codec->codec->capabilities & CODEC_CAP_DR1) {
+        is->use_dr1 = 1;
+        codec->get_buffer     = codec_get_buffer;
+        codec->release_buffer = codec_release_buffer;
+        codec->opaque         = &is->buffer_pool;
+    }
+
     return ret;
 }
 
@@ -1826,7 +1645,7 @@ static int video_thread(void *arg)
 
 #if CONFIG_AVFILTER
     AVFilterGraph *graph = avfilter_graph_alloc();
-    AVFilterContext *filt_out = NULL;
+    AVFilterContext *filt_out = NULL, *filt_in = NULL;
     int last_w = is->video_st->codec->width;
     int last_h = is->video_st->codec->height;
 
@@ -1837,18 +1656,31 @@ static int video_thread(void *arg)
         SDL_PushEvent(&event);
         goto the_end;
     }
+    filt_in  = is->in_video_filter;
     filt_out = is->out_video_filter;
 #endif
 
     for (;;) {
-#if !CONFIG_AVFILTER
         AVPacket pkt;
-#else
+#if CONFIG_AVFILTER
         AVFilterBufferRef *picref;
         AVRational tb = filt_out->inputs[0]->time_base;
 #endif
         while (is->paused && !is->videoq.abort_request)
             SDL_Delay(10);
+
+        ret = get_video_frame(is, frame, &pts_int, &pkt);
+        if (ret < 0)
+            goto the_end;
+        av_free_packet(&pkt);
+
+        if (!ret)
+            continue;
+
+        is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
+        if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
+            is->frame_last_filter_delay = 0;
+
 #if CONFIG_AVFILTER
         if (   last_w != is->video_st->codec->width
             || last_h != is->video_st->codec->height) {
@@ -1862,48 +1694,55 @@ static int video_thread(void *arg)
             last_w = is->video_st->codec->width;
             last_h = is->video_st->codec->height;
         }
-        ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
-        if (picref) {
+
+        frame->pts = pts_int;
+        if (is->use_dr1) {
+            FrameBuffer      *buf = frame->opaque;
+            AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
+                                        frame->data, frame->linesize,
+                                        AV_PERM_READ | AV_PERM_PRESERVE,
+                                        frame->width, frame->height,
+                                        frame->format);
+
+            avfilter_copy_frame_props(fb, frame);
+            fb->buf->priv           = buf;
+            fb->buf->free           = filter_release_buffer;
+
+            buf->refcount++;
+            av_buffersrc_buffer(filt_in, fb);
+
+        } else
+            av_buffersrc_write_frame(filt_in, frame);
+
+        while (ret >= 0) {
+            ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
+            if (ret < 0) {
+                ret = 0;
+                break;
+            }
+
             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
+
             pts_int = picref->pts;
             tb      = filt_out->inputs[0]->time_base;
             pos     = picref->pos;
             frame->opaque = picref;
 
-            ret = 1;
-        }
-
-        if (ret >= 0 && av_cmp_q(tb, is->video_st->time_base)) {
-            av_unused int64_t pts1 = pts_int;
-            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
-            av_dlog(NULL, "video_thread(): "
-                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
-                    tb.num, tb.den, pts1,
-                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
+            if (av_cmp_q(tb, is->video_st->time_base)) {
+                av_unused int64_t pts1 = pts_int;
+                pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
+                av_dlog(NULL, "video_thread(): "
+                        "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
+                        tb.num, tb.den, pts1,
+                        is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
+            }
+            pts = pts_int * av_q2d(is->video_st->time_base);
+            ret = queue_picture(is, frame, pts, pos);
         }
 #else
-        ret = get_video_frame(is, frame, &pts_int, &pkt);
-        pos = pkt.pos;
-        av_free_packet(&pkt);
-        if (ret == 0)
-            continue;
-#endif
-
-        if (ret < 0)
-            goto the_end;
-
-        is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
-        if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
-            is->frame_last_filter_delay = 0;
-
-#if CONFIG_AVFILTER
-        if (!picref)
-            continue;
-#endif
-
         pts = pts_int * av_q2d(is->video_st->time_base);
-
-        ret = queue_picture(is, frame, pts, pos);
+        ret = queue_picture(is, frame, pts, pkt.pos);
+#endif
 
         if (ret < 0)
             goto the_end;
@@ -2461,6 +2300,7 @@ static void stream_component_close(VideoState *is, int stream_index)
 
     ic->streams[stream_index]->discard = AVDISCARD_ALL;
     avcodec_close(avctx);
+    free_buffer_pool(&is->buffer_pool);
     switch (avctx->codec_type) {
     case AVMEDIA_TYPE_AUDIO:
         is->audio_st = NULL;

+ 5 - 1
ffprobe.c

@@ -1866,7 +1866,11 @@ static int open_input_file(AVFormatContext **fmt_ctx_ptr, const char *filename)
         AVStream *stream = fmt_ctx->streams[i];
         AVCodec *codec;
 
-        if (!(codec = avcodec_find_decoder(stream->codec->codec_id))) {
+        if (stream->codec->codec_id == CODEC_ID_PROBE) {
+            av_log(NULL, AV_LOG_ERROR,
+                   "Failed to probe codec for input stream %d\n",
+                    stream->index);
+        } else if (!(codec = avcodec_find_decoder(stream->codec->codec_id))) {
             av_log(NULL, AV_LOG_ERROR,
                     "Unsupported codec with id %d for input stream %d\n",
                     stream->codec->codec_id, stream->index);

+ 1 - 0
libavcodec/Makefile

@@ -220,6 +220,7 @@ OBJS-$(CONFIG_H264_VAAPI_HWACCEL)      += vaapi_h264.o
 OBJS-$(CONFIG_H264_VDA_HWACCEL)        += vda_h264.o
 OBJS-$(CONFIG_HUFFYUV_DECODER)         += huffyuv.o
 OBJS-$(CONFIG_HUFFYUV_ENCODER)         += huffyuv.o
+OBJS-$(CONFIG_IAC_DECODER)             += imc.o
 OBJS-$(CONFIG_IDCIN_DECODER)           += idcinvideo.o
 OBJS-$(CONFIG_IDF_DECODER)             += bintext.o cga_data.o
 OBJS-$(CONFIG_IFF_BYTERUN1_DECODER)    += iff.o

+ 3 - 0
libavcodec/aacdec.c

@@ -487,6 +487,7 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
             return NULL;
 
         ac->oc[1].m4ac.chan_config = 2;
+        ac->oc[1].m4ac.ps = 0;
     }
     // And vice-versa
     if (!ac->tags_mapped && type == TYPE_SCE && ac->oc[1].m4ac.chan_config == 2) {
@@ -504,6 +505,8 @@ static ChannelElement *get_che(AACContext *ac, int type, int elem_id)
             return NULL;
 
         ac->oc[1].m4ac.chan_config = 1;
+        if (ac->oc[1].m4ac.sbr)
+            ac->oc[1].m4ac.ps = -1;
     }
     // For indexed channel configurations map the channels solely based on position.
     switch (ac->oc[1].m4ac.chan_config) {

+ 1 - 0
libavcodec/allcodecs.c

@@ -286,6 +286,7 @@ void avcodec_register_all(void)
     REGISTER_DECODER (G729, g729);
     REGISTER_DECODER (GSM, gsm);
     REGISTER_DECODER (GSM_MS, gsm_ms);
+    REGISTER_DECODER (IAC, iac);
     REGISTER_DECODER (IMC, imc);
     REGISTER_DECODER (MACE3, mace3);
     REGISTER_DECODER (MACE6, mace6);

Some files were not shown because too many files changed in this diff