trim.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <stdint.h>
  19. #include "config.h"
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/channel_layout.h"
  22. #include "libavutil/common.h"
  23. #include "libavutil/log.h"
  24. #include "libavutil/mathematics.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/samplefmt.h"
  27. #include "audio.h"
  28. #include "avfilter.h"
  29. #include "internal.h"
  30. typedef struct TrimContext {
  31. const AVClass *class;
  32. /*
  33. * AVOptions
  34. */
  35. int64_t duration;
  36. int64_t start_time, end_time;
  37. int64_t start_frame, end_frame;
  38. /*
  39. * in the link timebase for video,
  40. * in 1/samplerate for audio
  41. */
  42. int64_t start_pts, end_pts;
  43. int64_t start_sample, end_sample;
  44. /*
  45. * number of video frames that arrived on this filter so far
  46. */
  47. int64_t nb_frames;
  48. /*
  49. * number of audio samples that arrived on this filter so far
  50. */
  51. int64_t nb_samples;
  52. /*
  53. * timestamp of the first frame in the output, in the timebase units
  54. */
  55. int64_t first_pts;
  56. /*
  57. * duration in the timebase units
  58. */
  59. int64_t duration_tb;
  60. int64_t next_pts;
  61. int eof;
  62. } TrimContext;
  63. static av_cold int init(AVFilterContext *ctx)
  64. {
  65. TrimContext *s = ctx->priv;
  66. s->first_pts = AV_NOPTS_VALUE;
  67. return 0;
  68. }
  69. static int config_input(AVFilterLink *inlink)
  70. {
  71. AVFilterContext *ctx = inlink->dst;
  72. TrimContext *s = ctx->priv;
  73. AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
  74. inlink->time_base : (AVRational){ 1, inlink->sample_rate };
  75. if (s->start_time != INT64_MAX) {
  76. int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
  77. if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
  78. s->start_pts = start_pts;
  79. }
  80. if (s->end_time != INT64_MAX) {
  81. int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
  82. if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
  83. s->end_pts = end_pts;
  84. }
  85. if (s->duration)
  86. s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
  87. return 0;
  88. }
  89. #define OFFSET(x) offsetof(TrimContext, x)
  90. #define COMMON_OPTS \
  91. { "start", "Timestamp of the first frame that " \
  92. "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  93. { "starti", "Timestamp of the first frame that " \
  94. "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  95. { "end", "Timestamp of the first frame that " \
  96. "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  97. { "endi", "Timestamp of the first frame that " \
  98. "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  99. { "start_pts", "Timestamp of the first frame that should be " \
  100. " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
  101. { "end_pts", "Timestamp of the first frame that should be " \
  102. "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
  103. { "duration", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS }, \
  104. { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
  105. #if CONFIG_TRIM_FILTER
  106. static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  107. {
  108. AVFilterContext *ctx = inlink->dst;
  109. TrimContext *s = ctx->priv;
  110. int drop;
  111. /* drop everything if EOF has already been returned */
  112. if (s->eof) {
  113. av_frame_free(&frame);
  114. return 0;
  115. }
  116. if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
  117. drop = 1;
  118. if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
  119. drop = 0;
  120. if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
  121. frame->pts >= s->start_pts)
  122. drop = 0;
  123. if (drop)
  124. goto drop;
  125. }
  126. if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
  127. s->first_pts = frame->pts;
  128. if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
  129. drop = 1;
  130. if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
  131. drop = 0;
  132. if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
  133. frame->pts < s->end_pts)
  134. drop = 0;
  135. if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
  136. frame->pts - s->first_pts < s->duration_tb)
  137. drop = 0;
  138. if (drop) {
  139. s->eof = 1;
  140. ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE);
  141. goto drop;
  142. }
  143. }
  144. s->nb_frames++;
  145. return ff_filter_frame(ctx->outputs[0], frame);
  146. drop:
  147. s->nb_frames++;
  148. av_frame_free(&frame);
  149. return 0;
  150. }
  151. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  152. static const AVOption trim_options[] = {
  153. COMMON_OPTS
  154. { "start_frame", "Number of the first frame that should be passed "
  155. "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
  156. { "end_frame", "Number of the first frame that should be dropped "
  157. "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
  158. { NULL }
  159. };
  160. #undef FLAGS
  161. AVFILTER_DEFINE_CLASS(trim);
  162. static const AVFilterPad trim_inputs[] = {
  163. {
  164. .name = "default",
  165. .type = AVMEDIA_TYPE_VIDEO,
  166. .filter_frame = trim_filter_frame,
  167. .config_props = config_input,
  168. },
  169. { NULL }
  170. };
  171. static const AVFilterPad trim_outputs[] = {
  172. {
  173. .name = "default",
  174. .type = AVMEDIA_TYPE_VIDEO,
  175. },
  176. { NULL }
  177. };
  178. AVFilter ff_vf_trim = {
  179. .name = "trim",
  180. .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
  181. .init = init,
  182. .priv_size = sizeof(TrimContext),
  183. .priv_class = &trim_class,
  184. .inputs = trim_inputs,
  185. .outputs = trim_outputs,
  186. };
  187. #endif // CONFIG_TRIM_FILTER
  188. #if CONFIG_ATRIM_FILTER
  189. static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  190. {
  191. AVFilterContext *ctx = inlink->dst;
  192. TrimContext *s = ctx->priv;
  193. int64_t start_sample, end_sample;
  194. int64_t pts;
  195. int drop;
  196. /* drop everything if EOF has already been returned */
  197. if (s->eof) {
  198. av_frame_free(&frame);
  199. return 0;
  200. }
  201. if (frame->pts != AV_NOPTS_VALUE)
  202. pts = av_rescale_q(frame->pts, inlink->time_base,
  203. (AVRational){ 1, inlink->sample_rate });
  204. else
  205. pts = s->next_pts;
  206. s->next_pts = pts + frame->nb_samples;
  207. /* check if at least a part of the frame is after the start time */
  208. if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
  209. start_sample = 0;
  210. } else {
  211. drop = 1;
  212. start_sample = frame->nb_samples;
  213. if (s->start_sample >= 0 &&
  214. s->nb_samples + frame->nb_samples > s->start_sample) {
  215. drop = 0;
  216. start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
  217. }
  218. if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
  219. pts + frame->nb_samples > s->start_pts) {
  220. drop = 0;
  221. start_sample = FFMIN(start_sample, s->start_pts - pts);
  222. }
  223. if (drop)
  224. goto drop;
  225. }
  226. if (s->first_pts == AV_NOPTS_VALUE)
  227. s->first_pts = pts + start_sample;
  228. /* check if at least a part of the frame is before the end time */
  229. if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
  230. end_sample = frame->nb_samples;
  231. } else {
  232. drop = 1;
  233. end_sample = 0;
  234. if (s->end_sample != INT64_MAX &&
  235. s->nb_samples < s->end_sample) {
  236. drop = 0;
  237. end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
  238. }
  239. if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
  240. pts < s->end_pts) {
  241. drop = 0;
  242. end_sample = FFMAX(end_sample, s->end_pts - pts);
  243. }
  244. if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
  245. drop = 0;
  246. end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
  247. }
  248. if (drop) {
  249. s->eof = 1;
  250. ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE);
  251. goto drop;
  252. }
  253. }
  254. s->nb_samples += frame->nb_samples;
  255. start_sample = FFMAX(0, start_sample);
  256. end_sample = FFMIN(frame->nb_samples, end_sample);
  257. av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
  258. if (start_sample) {
  259. AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
  260. if (!out) {
  261. av_frame_free(&frame);
  262. return AVERROR(ENOMEM);
  263. }
  264. av_frame_copy_props(out, frame);
  265. av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
  266. out->nb_samples, inlink->channels,
  267. frame->format);
  268. if (out->pts != AV_NOPTS_VALUE)
  269. out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
  270. inlink->time_base);
  271. av_frame_free(&frame);
  272. frame = out;
  273. } else
  274. frame->nb_samples = end_sample;
  275. return ff_filter_frame(ctx->outputs[0], frame);
  276. drop:
  277. s->nb_samples += frame->nb_samples;
  278. av_frame_free(&frame);
  279. return 0;
  280. }
  281. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  282. static const AVOption atrim_options[] = {
  283. COMMON_OPTS
  284. { "start_sample", "Number of the first audio sample that should be "
  285. "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
  286. { "end_sample", "Number of the first audio sample that should be "
  287. "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
  288. { NULL }
  289. };
  290. #undef FLAGS
  291. AVFILTER_DEFINE_CLASS(atrim);
  292. static const AVFilterPad atrim_inputs[] = {
  293. {
  294. .name = "default",
  295. .type = AVMEDIA_TYPE_AUDIO,
  296. .filter_frame = atrim_filter_frame,
  297. .config_props = config_input,
  298. },
  299. { NULL }
  300. };
  301. static const AVFilterPad atrim_outputs[] = {
  302. {
  303. .name = "default",
  304. .type = AVMEDIA_TYPE_AUDIO,
  305. },
  306. { NULL }
  307. };
  308. AVFilter ff_af_atrim = {
  309. .name = "atrim",
  310. .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
  311. .init = init,
  312. .query_formats = ff_query_formats_all,
  313. .priv_size = sizeof(TrimContext),
  314. .priv_class = &atrim_class,
  315. .inputs = atrim_inputs,
  316. .outputs = atrim_outputs,
  317. };
  318. #endif // CONFIG_ATRIM_FILTER