Browse Source

lavfi: switch to AVFrame.

Deprecate AVFilterBuffer/AVFilterBufferRef and everything related to it
and use AVFrame instead.
Anton Khirnov 12 years ago
parent
commit
7e350379f8

+ 0 - 10
doc/filters.texi

@@ -622,9 +622,6 @@ same as @var{out_w} and @var{out_h}
 @item n
 the number of input frame, starting from 0
 
-@item pos
-the position in the file of the input frame, NAN if unknown
-
 @item t
 timestamp expressed in seconds, NAN if the input timestamp is unknown
 
@@ -1760,9 +1757,6 @@ the frame is bottom-field-first
 @item key
 1 if the filtered frame is a key-frame, 0 otherwise
 
-@item pos
-the position in the file of the filtered frame, -1 if the information
-is not available (e.g. for synthetic video)
 @end table
 
 The default value of the select expression is "1".
@@ -1854,10 +1848,6 @@ the PTS of the first video frame
 @item INTERLACED
 tell if the current frame is interlaced
 
-@item POS
-original position in the file of the frame, or undefined if undefined
-for the current frame
-
 @item PREV_INPTS
 previous input PTS
 

+ 9 - 9
libavfilter/af_amix.c

@@ -275,18 +275,18 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
 {
     AVFilterContext *ctx = outlink->src;
     MixContext      *s = ctx->priv;
-    AVFilterBufferRef *out_buf, *in_buf;
+    AVFrame *out_buf, *in_buf;
     int i;
 
     calculate_scales(s, nb_samples);
 
-    out_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
+    out_buf = ff_get_audio_buffer(outlink, nb_samples);
     if (!out_buf)
         return AVERROR(ENOMEM);
 
-    in_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
+    in_buf = ff_get_audio_buffer(outlink, nb_samples);
     if (!in_buf) {
-        avfilter_unref_buffer(out_buf);
+        av_frame_free(&out_buf);
         return AVERROR(ENOMEM);
     }
 
@@ -308,7 +308,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
             }
         }
     }
-    avfilter_unref_buffer(in_buf);
+    av_frame_free(&in_buf);
 
     out_buf->pts = s->next_pts;
     if (s->next_pts != AV_NOPTS_VALUE)
@@ -455,7 +455,7 @@ static int request_frame(AVFilterLink *outlink)
     return output_frame(outlink, available_samples);
 }
 
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
 {
     AVFilterContext  *ctx = inlink->dst;
     MixContext       *s = ctx->priv;
@@ -474,16 +474,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
     if (i == 0) {
         int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
                                    outlink->time_base);
-        ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
+        ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
         if (ret < 0)
             goto fail;
     }
 
     ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
-                              buf->audio->nb_samples);
+                              buf->nb_samples);
 
 fail:
-    avfilter_unref_buffer(buf);
+    av_frame_free(&buf);
 
     return ret;
 }

+ 5 - 6
libavfilter/af_ashowinfo.c

@@ -65,16 +65,16 @@ static void uninit(AVFilterContext *ctx)
     av_freep(&s->plane_checksums);
 }
 
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
 {
     AVFilterContext *ctx = inlink->dst;
     AShowInfoContext *s  = ctx->priv;
     char chlayout_str[128];
     uint32_t checksum = 0;
-    int channels    = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
+    int channels    = av_get_channel_layout_nb_channels(buf->channel_layout);
     int planar      = av_sample_fmt_is_planar(buf->format);
     int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
-    int data_size   = buf->audio->nb_samples * block_align;
+    int data_size   = buf->nb_samples * block_align;
     int planes      = planar ? channels : 1;
     int i;
 
@@ -87,7 +87,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
     }
 
     av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
-                                 buf->audio->channel_layout);
+                                 buf->channel_layout);
 
     av_log(ctx, AV_LOG_INFO,
            "n:%"PRIu64" pts:%"PRId64" pts_time:%f "
@@ -95,7 +95,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
            "checksum:%08X ",
            s->frame, buf->pts, buf->pts * av_q2d(inlink->time_base),
            av_get_sample_fmt_name(buf->format), chlayout_str,
-           buf->audio->sample_rate, buf->audio->nb_samples,
+           buf->sample_rate, buf->nb_samples,
            checksum);
 
     av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
@@ -114,7 +114,6 @@ static const AVFilterPad inputs[] = {
         .get_audio_buffer = ff_null_get_audio_buffer,
         .config_props     = config_input,
         .filter_frame     = filter_frame,
-        .min_perms        = AV_PERM_READ,
     },
     { NULL },
 };

+ 10 - 12
libavfilter/af_asyncts.c

@@ -158,14 +158,13 @@ static int request_frame(AVFilterLink *link)
             handle_trimming(ctx);
 
         if (nb_samples = get_delay(s)) {
-            AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE,
-                                                         nb_samples);
+            AVFrame *buf = ff_get_audio_buffer(link, nb_samples);
             if (!buf)
                 return AVERROR(ENOMEM);
             ret = avresample_convert(s->avr, buf->extended_data,
                                      buf->linesize[0], nb_samples, NULL, 0, 0);
             if (ret <= 0) {
-                avfilter_unref_bufferp(&buf);
+                av_frame_free(&buf);
                 return (ret < 0) ? ret : AVERROR_EOF;
             }
 
@@ -177,20 +176,20 @@ static int request_frame(AVFilterLink *link)
     return ret;
 }
 
-static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
+static int write_to_fifo(ASyncContext *s, AVFrame *buf)
 {
     int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
-                                 buf->linesize[0], buf->audio->nb_samples);
-    avfilter_unref_buffer(buf);
+                                 buf->linesize[0], buf->nb_samples);
+    av_frame_free(&buf);
     return ret;
 }
 
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
 {
     AVFilterContext  *ctx = inlink->dst;
     ASyncContext       *s = ctx->priv;
     AVFilterLink *outlink = ctx->outputs[0];
-    int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
+    int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout);
     int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
                   av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
     int out_size, ret;
@@ -229,8 +228,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
     }
 
     if (out_size > 0) {
-        AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
-                                                         out_size);
+        AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size);
         if (!buf_out) {
             ret = AVERROR(ENOMEM);
             goto fail;
@@ -272,11 +270,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
 
     s->pts = pts - avresample_get_delay(s->avr);
     ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
-                             buf->linesize[0], buf->audio->nb_samples);
+                             buf->linesize[0], buf->nb_samples);
 
     s->first_frame = 0;
 fail:
-    avfilter_unref_buffer(buf);
+    av_frame_free(&buf);
 
     return ret;
 }

+ 2 - 2
libavfilter/af_channelmap.c

@@ -313,7 +313,7 @@ static int channelmap_query_formats(AVFilterContext *ctx)
     return 0;
 }
 
-static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
 {
     AVFilterContext  *ctx = inlink->dst;
     AVFilterLink *outlink = ctx->outputs[0];
@@ -331,7 +331,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
             uint8_t **new_extended_data =
                 av_mallocz(nch_out * sizeof(*buf->extended_data));
             if (!new_extended_data) {
-                avfilter_unref_buffer(buf);
+                av_frame_free(&buf);
                 return AVERROR(ENOMEM);
             }
             if (buf->extended_data == buf->data) {

+ 5 - 5
libavfilter/af_channelsplit.c

@@ -111,13 +111,13 @@ static int query_formats(AVFilterContext *ctx)
     return 0;
 }
 
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
 {
     AVFilterContext *ctx = inlink->dst;
     int i, ret = 0;
 
     for (i = 0; i < ctx->nb_outputs; i++) {
-        AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
+        AVFrame *buf_out = av_frame_clone(buf);
 
         if (!buf_out) {
             ret = AVERROR(ENOMEM);
@@ -125,14 +125,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
         }
 
         buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
-        buf_out->audio->channel_layout =
-            av_channel_layout_extract_channel(buf->audio->channel_layout, i);
+        buf_out->channel_layout =
+            av_channel_layout_extract_channel(buf->channel_layout, i);
 
         ret = ff_filter_frame(ctx->outputs[i], buf_out);
         if (ret < 0)
             break;
     }
-    avfilter_unref_buffer(buf);
+    av_frame_free(&buf);
     return ret;
 }
 

+ 81 - 70
libavfilter/af_join.c

@@ -56,24 +56,14 @@ typedef struct JoinContext {
     /**
      * Temporary storage for input frames, until we get one on each input.
      */
-    AVFilterBufferRef **input_frames;
+    AVFrame **input_frames;
 
     /**
-     *  Temporary storage for data pointers, for assembling the output buffer.
+     *  Temporary storage for buffer references, for assembling the output frame.
      */
-    uint8_t **data;
+    AVBufferRef **buffers;
 } JoinContext;
 
-/**
- * To avoid copying the data from input buffers, this filter creates
- * a custom output buffer that stores references to all inputs and
- * unrefs them on free.
- */
-typedef struct JoinBufferPriv {
-    AVFilterBufferRef **in_buffers;
-    int              nb_in_buffers;
-} JoinBufferPriv;
-
 #define OFFSET(x) offsetof(JoinContext, x)
 #define A AV_OPT_FLAG_AUDIO_PARAM
 static const AVOption join_options[] = {
@@ -93,7 +83,7 @@ static const AVClass join_class = {
     .version    = LIBAVUTIL_VERSION_INT,
 };
 
-static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *link, AVFrame *frame)
 {
     AVFilterContext *ctx = link->dst;
     JoinContext       *s = ctx->priv;
@@ -104,7 +94,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
             break;
     av_assert0(i < ctx->nb_inputs);
     av_assert0(!s->input_frames[i]);
-    s->input_frames[i] = buf;
+    s->input_frames[i] = frame;
 
     return 0;
 }
@@ -208,9 +198,9 @@ static int join_init(AVFilterContext *ctx, const char *args)
 
     s->nb_channels  = av_get_channel_layout_nb_channels(s->channel_layout);
     s->channels     = av_mallocz(sizeof(*s->channels) * s->nb_channels);
-    s->data         = av_mallocz(sizeof(*s->data)     * s->nb_channels);
+    s->buffers      = av_mallocz(sizeof(*s->buffers)  * s->nb_channels);
     s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
-    if (!s->channels || !s->data || !s->input_frames) {
+    if (!s->channels || !s->buffers|| !s->input_frames) {
         ret = AVERROR(ENOMEM);
         goto fail;
     }
@@ -249,11 +239,11 @@ static void join_uninit(AVFilterContext *ctx)
 
     for (i = 0; i < ctx->nb_inputs; i++) {
         av_freep(&ctx->input_pads[i].name);
-        avfilter_unref_bufferp(&s->input_frames[i]);
+        av_frame_free(&s->input_frames[i]);
     }
 
     av_freep(&s->channels);
-    av_freep(&s->data);
+    av_freep(&s->buffers);
     av_freep(&s->input_frames);
 }
 
@@ -395,34 +385,14 @@ fail:
     return ret;
 }
 
-static void join_free_buffer(AVFilterBuffer *buf)
-{
-    JoinBufferPriv *priv = buf->priv;
-
-    if (priv) {
-        int i;
-
-        for (i = 0; i < priv->nb_in_buffers; i++)
-            avfilter_unref_bufferp(&priv->in_buffers[i]);
-
-        av_freep(&priv->in_buffers);
-        av_freep(&buf->priv);
-    }
-
-    if (buf->extended_data != buf->data)
-        av_freep(&buf->extended_data);
-    av_freep(&buf);
-}
-
 static int join_request_frame(AVFilterLink *outlink)
 {
     AVFilterContext *ctx = outlink->src;
     JoinContext *s       = ctx->priv;
-    AVFilterBufferRef *buf;
-    JoinBufferPriv *priv;
+    AVFrame *frame;
     int linesize   = INT_MAX;
-    int perms      = ~0;
     int nb_samples = 0;
+    int nb_buffers = 0;
     int i, j, ret;
 
     /* get a frame on each input */
@@ -435,54 +405,95 @@ static int join_request_frame(AVFilterLink *outlink)
 
         /* request the same number of samples on all inputs */
         if (i == 0) {
-            nb_samples = s->input_frames[0]->audio->nb_samples;
+            nb_samples = s->input_frames[0]->nb_samples;
 
             for (j = 1; !i && j < ctx->nb_inputs; j++)
                 ctx->inputs[j]->request_samples = nb_samples;
         }
     }
 
+    /* setup the output frame */
+    frame = av_frame_alloc();
+    if (!frame)
+        return AVERROR(ENOMEM);
+    if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
+        frame->extended_data = av_mallocz(s->nb_channels *
+                                          sizeof(*frame->extended_data));
+        if (!frame->extended_data) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+
+    /* copy the data pointers */
     for (i = 0; i < s->nb_channels; i++) {
         ChannelMap *ch = &s->channels[i];
-        AVFilterBufferRef *cur_buf = s->input_frames[ch->input];
-
-        s->data[i] = cur_buf->extended_data[ch->in_channel_idx];
-        linesize   = FFMIN(linesize, cur_buf->linesize[0]);
-        perms     &= cur_buf->perms;
-    }
+        AVFrame *cur   = s->input_frames[ch->input];
+        AVBufferRef *buf;
 
-    av_assert0(nb_samples > 0);
-    buf = avfilter_get_audio_buffer_ref_from_arrays(s->data, linesize, perms,
-                                                    nb_samples, outlink->format,
-                                                    outlink->channel_layout);
-    if (!buf)
-        return AVERROR(ENOMEM);
+        frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
+        linesize = FFMIN(linesize, cur->linesize[0]);
 
-    buf->buf->free = join_free_buffer;
-    buf->pts       = s->input_frames[0]->pts;
+        /* add the buffer where this plan is stored to the list if it's
+         * not already there */
+        buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
+        if (!buf) {
+            ret = AVERROR(EINVAL);
+            goto fail;
+        }
+        for (j = 0; j < nb_buffers; j++)
+            if (s->buffers[j]->buffer == buf->buffer)
+                break;
+        if (j == i)
+            s->buffers[nb_buffers++] = buf;
+    }
 
-    if (!(priv = av_mallocz(sizeof(*priv))))
-        goto fail;
-    if (!(priv->in_buffers = av_mallocz(sizeof(*priv->in_buffers) * ctx->nb_inputs)))
-        goto fail;
+    /* create references to the buffers we copied to output */
+    if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
+        frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
+        frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
+                                         frame->nb_extended_buf);
+        if (!frame->extended_buf) {
+            frame->nb_extended_buf = 0;
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+    for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
+        frame->buf[i] = av_buffer_ref(s->buffers[i]);
+        if (!frame->buf[i]) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
+    for (i = 0; i < frame->nb_extended_buf; i++) {
+        frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
+                                               FF_ARRAY_ELEMS(frame->buf)]);
+        if (!frame->extended_buf[i]) {
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+    }
 
-    for (i = 0; i < ctx->nb_inputs; i++)
-        priv->in_buffers[i] = s->input_frames[i];
-    priv->nb_in_buffers = ctx->nb_inputs;
-    buf->buf->priv      = priv;
+    frame->nb_samples     = nb_samples;
+    frame->channel_layout = outlink->channel_layout;
+    frame->sample_rate    = outlink->sample_rate;
+    frame->pts            = s->input_frames[0]->pts;
+    frame->linesize[0]    = linesize;
+    if (frame->data != frame->extended_data) {
+        memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
+               FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
+    }
 
-    ret = ff_filter_frame(outlink, buf);
+    ret = ff_filter_frame(outlink, frame);
 
     memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
 
     return ret;
 
 fail:
-    avfilter_unref_buffer(buf);
-    if (priv)
-        av_freep(&priv->in_buffers);
-    av_freep(&priv);
-    return AVERROR(ENOMEM);
+    av_frame_free(&frame);
+    return ret;
 }
 
 static const AVFilterPad avfilter_af_join_outputs[] = {

+ 28 - 30
libavfilter/af_resample.c

@@ -174,7 +174,7 @@ static int request_frame(AVFilterLink *outlink)
 
     /* flush the lavr delay buffer */
     if (ret == AVERROR_EOF && s->avr) {
-        AVFilterBufferRef *buf;
+        AVFrame *frame;
         int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
                                         outlink->sample_rate,
                                         ctx->inputs[0]->sample_rate,
@@ -183,25 +183,25 @@ static int request_frame(AVFilterLink *outlink)
         if (!nb_samples)
             return ret;
 
-        buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
-        if (!buf)
+        frame = ff_get_audio_buffer(outlink, nb_samples);
+        if (!frame)
             return AVERROR(ENOMEM);
 
-        ret = avresample_convert(s->avr, buf->extended_data,
-                                 buf->linesize[0], nb_samples,
+        ret = avresample_convert(s->avr, frame->extended_data,
+                                 frame->linesize[0], nb_samples,
                                  NULL, 0, 0);
         if (ret <= 0) {
-            avfilter_unref_buffer(buf);
+            av_frame_free(&frame);
             return (ret == 0) ? AVERROR_EOF : ret;
         }
 
-        buf->pts = s->next_pts;
-        return ff_filter_frame(outlink, buf);
+        frame->pts = s->next_pts;
+        return ff_filter_frame(outlink, frame);
     }
     return ret;
 }
 
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 {
     AVFilterContext  *ctx = inlink->dst;
     ResampleContext    *s = ctx->priv;
@@ -209,27 +209,26 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
     int ret;
 
     if (s->avr) {
-        AVFilterBufferRef *buf_out;
+        AVFrame *out;
         int delay, nb_samples;
 
         /* maximum possible samples lavr can output */
         delay      = avresample_get_delay(s->avr);
-        nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
+        nb_samples = av_rescale_rnd(in->nb_samples + delay,
                                     outlink->sample_rate, inlink->sample_rate,
                                     AV_ROUND_UP);
 
-        buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
-        if (!buf_out) {
+        out = ff_get_audio_buffer(outlink, nb_samples);
+        if (!out) {
             ret = AVERROR(ENOMEM);
             goto fail;
         }
 
-        ret     = avresample_convert(s->avr, buf_out->extended_data,
-                                     buf_out->linesize[0], nb_samples,
-                                     buf->extended_data, buf->linesize[0],
-                                     buf->audio->nb_samples);
+        ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
+                                 nb_samples, in->extended_data, in->linesize[0],
+                                 in->nb_samples);
         if (ret <= 0) {
-            avfilter_unref_buffer(buf_out);
+            av_frame_free(&out);
             if (ret < 0)
                 goto fail;
         }
@@ -237,36 +236,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
         av_assert0(!avresample_available(s->avr));
 
         if (s->next_pts == AV_NOPTS_VALUE) {
-            if (buf->pts == AV_NOPTS_VALUE) {
+            if (in->pts == AV_NOPTS_VALUE) {
                 av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
                        "assuming 0.\n");
                 s->next_pts = 0;
             } else
-                s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
+                s->next_pts = av_rescale_q(in->pts, inlink->time_base,
                                            outlink->time_base);
         }
 
         if (ret > 0) {
-            buf_out->audio->nb_samples = ret;
-            if (buf->pts != AV_NOPTS_VALUE) {
-                buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
+            out->nb_samples = ret;
+            if (in->pts != AV_NOPTS_VALUE) {
+                out->pts = av_rescale_q(in->pts, inlink->time_base,
                                             outlink->time_base) -
                                av_rescale(delay, outlink->sample_rate,
                                           inlink->sample_rate);
             } else
-                buf_out->pts = s->next_pts;
+                out->pts = s->next_pts;
 
-            s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
+            s->next_pts = out->pts + out->nb_samples;
 
-            ret = ff_filter_frame(outlink, buf_out);
+            ret = ff_filter_frame(outlink, out);
             s->got_output = 1;
         }
 
 fail:
-        avfilter_unref_buffer(buf);
+        av_frame_free(&in);
     } else {
-        buf->format = outlink->format;
-        ret = ff_filter_frame(outlink, buf);
+        in->format = outlink->format;
+        ret = ff_filter_frame(outlink, in);
         s->got_output = 1;
     }
 
@@ -278,7 +277,6 @@ static const AVFilterPad avfilter_af_resample_inputs[] = {
         .name           = "default",
         .type           = AVMEDIA_TYPE_AUDIO,
         .filter_frame   = filter_frame,
-        .min_perms      = AV_PERM_READ
     },
     { NULL }
 };

+ 6 - 6
libavfilter/af_volume.c

@@ -233,21 +233,21 @@ static int config_output(AVFilterLink *outlink)
     return 0;
 }
 
-static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
+static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
 {
     VolumeContext *vol    = inlink->dst->priv;
     AVFilterLink *outlink = inlink->dst->outputs[0];
-    int nb_samples        = buf->audio->nb_samples;
-    AVFilterBufferRef *out_buf;
+    int nb_samples        = buf->nb_samples;
+    AVFrame *out_buf;
 
     if (vol->volume == 1.0 || vol->volume_i == 256)
         return ff_filter_frame(outlink, buf);
 
     /* do volume scaling in-place if input buffer is writable */
-    if (buf->perms & AV_PERM_WRITE) {
+    if (av_frame_is_writable(buf)) {
         out_buf = buf;
     } else {
-        out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples);
+        out_buf = ff_get_audio_buffer(inlink, nb_samples);
         if (!out_buf)
             return AVERROR(ENOMEM);
         out_buf->pts = buf->pts;
@@ -283,7 +283,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
     }
 
     if (buf != out_buf)
-        avfilter_unref_buffer(buf);
+        av_frame_free(&buf);
 
     return ff_filter_frame(outlink, out_buf);
 }

+ 2 - 2
libavfilter/asink_anullsink.c

@@ -20,9 +20,9 @@
 #include "avfilter.h"
 #include "internal.h"
 
-static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref)
+static int null_filter_frame(AVFilterLink *link, AVFrame *frame)
 {
-    avfilter_unref_bufferp(&samplesref);
+    av_frame_free(&frame);
     return 0;
 }
 

Some files were not shown because too many files changed in this diff