trim.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <float.h>
  19. #include <math.h>
  20. #include <stdint.h>
  21. #include "config.h"
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/channel_layout.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/log.h"
  26. #include "libavutil/mathematics.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/samplefmt.h"
  29. #include "audio.h"
  30. #include "avfilter.h"
  31. #include "internal.h"
  32. typedef struct TrimContext {
  33. const AVClass *class;
  34. /*
  35. * AVOptions
  36. */
  37. int64_t duration;
  38. int64_t start_time, end_time;
  39. int64_t start_frame, end_frame;
  40. double duration_dbl;
  41. double start_time_dbl, end_time_dbl;
  42. /*
  43. * in the link timebase for video,
  44. * in 1/samplerate for audio
  45. */
  46. int64_t start_pts, end_pts;
  47. int64_t start_sample, end_sample;
  48. /*
  49. * number of video frames that arrived on this filter so far
  50. */
  51. int64_t nb_frames;
  52. /*
  53. * number of audio samples that arrived on this filter so far
  54. */
  55. int64_t nb_samples;
  56. /*
  57. * timestamp of the first frame in the output, in the timebase units
  58. */
  59. int64_t first_pts;
  60. /*
  61. * duration in the timebase units
  62. */
  63. int64_t duration_tb;
  64. int64_t next_pts;
  65. int eof;
  66. } TrimContext;
  67. static av_cold int init(AVFilterContext *ctx)
  68. {
  69. TrimContext *s = ctx->priv;
  70. s->first_pts = AV_NOPTS_VALUE;
  71. return 0;
  72. }
  73. static int config_input(AVFilterLink *inlink)
  74. {
  75. AVFilterContext *ctx = inlink->dst;
  76. TrimContext *s = ctx->priv;
  77. AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
  78. inlink->time_base : (AVRational){ 1, inlink->sample_rate };
  79. if (s->start_time_dbl != DBL_MAX)
  80. s->start_time = s->start_time_dbl * 1e6;
  81. if (s->end_time_dbl != DBL_MAX)
  82. s->end_time = s->end_time_dbl * 1e6;
  83. if (s->duration_dbl != 0)
  84. s->duration = s->duration_dbl * 1e6;
  85. if (s->start_time != INT64_MAX) {
  86. int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
  87. if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
  88. s->start_pts = start_pts;
  89. }
  90. if (s->end_time != INT64_MAX) {
  91. int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
  92. if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
  93. s->end_pts = end_pts;
  94. }
  95. if (s->duration)
  96. s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
  97. return 0;
  98. }
  99. static int config_output(AVFilterLink *outlink)
  100. {
  101. outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
  102. return 0;
  103. }
  104. #define OFFSET(x) offsetof(TrimContext, x)
  105. #define COMMON_OPTS \
  106. { "starti", "Timestamp of the first frame that " \
  107. "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  108. { "endi", "Timestamp of the first frame that " \
  109. "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  110. { "start_pts", "Timestamp of the first frame that should be " \
  111. " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
  112. { "end_pts", "Timestamp of the first frame that should be " \
  113. "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
  114. { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
  115. #define COMPAT_OPTS \
  116. { "start", "Timestamp in seconds of the first frame that " \
  117. "should be passed", OFFSET(start_time_dbl),AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
  118. { "end", "Timestamp in seconds of the first frame that " \
  119. "should be dropped again", OFFSET(end_time_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
  120. { "duration", "Maximum duration of the output in seconds", OFFSET(duration_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
  121. #if CONFIG_TRIM_FILTER
  122. static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  123. {
  124. AVFilterContext *ctx = inlink->dst;
  125. TrimContext *s = ctx->priv;
  126. int drop;
  127. /* drop everything if EOF has already been returned */
  128. if (s->eof) {
  129. av_frame_free(&frame);
  130. return 0;
  131. }
  132. if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
  133. drop = 1;
  134. if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
  135. drop = 0;
  136. if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
  137. frame->pts >= s->start_pts)
  138. drop = 0;
  139. if (drop)
  140. goto drop;
  141. }
  142. if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
  143. s->first_pts = frame->pts;
  144. if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
  145. drop = 1;
  146. if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
  147. drop = 0;
  148. if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
  149. frame->pts < s->end_pts)
  150. drop = 0;
  151. if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
  152. frame->pts - s->first_pts < s->duration_tb)
  153. drop = 0;
  154. if (drop) {
  155. s->eof = inlink->closed = 1;
  156. goto drop;
  157. }
  158. }
  159. s->nb_frames++;
  160. return ff_filter_frame(ctx->outputs[0], frame);
  161. drop:
  162. s->nb_frames++;
  163. av_frame_free(&frame);
  164. return 0;
  165. }
  166. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  167. static const AVOption trim_options[] = {
  168. COMMON_OPTS
  169. { "start_frame", "Number of the first frame that should be passed "
  170. "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
  171. { "end_frame", "Number of the first frame that should be dropped "
  172. "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
  173. COMPAT_OPTS
  174. { NULL }
  175. };
  176. #undef FLAGS
  177. AVFILTER_DEFINE_CLASS(trim);
  178. static const AVFilterPad trim_inputs[] = {
  179. {
  180. .name = "default",
  181. .type = AVMEDIA_TYPE_VIDEO,
  182. .filter_frame = trim_filter_frame,
  183. .config_props = config_input,
  184. },
  185. { NULL }
  186. };
  187. static const AVFilterPad trim_outputs[] = {
  188. {
  189. .name = "default",
  190. .type = AVMEDIA_TYPE_VIDEO,
  191. .config_props = config_output,
  192. },
  193. { NULL }
  194. };
  195. AVFilter ff_vf_trim = {
  196. .name = "trim",
  197. .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
  198. .init = init,
  199. .priv_size = sizeof(TrimContext),
  200. .priv_class = &trim_class,
  201. .inputs = trim_inputs,
  202. .outputs = trim_outputs,
  203. };
  204. #endif // CONFIG_TRIM_FILTER
  205. #if CONFIG_ATRIM_FILTER
  206. static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  207. {
  208. AVFilterContext *ctx = inlink->dst;
  209. TrimContext *s = ctx->priv;
  210. int64_t start_sample, end_sample;
  211. int64_t pts;
  212. int drop;
  213. /* drop everything if EOF has already been returned */
  214. if (s->eof) {
  215. av_frame_free(&frame);
  216. return 0;
  217. }
  218. if (frame->pts != AV_NOPTS_VALUE)
  219. pts = av_rescale_q(frame->pts, inlink->time_base,
  220. (AVRational){ 1, inlink->sample_rate });
  221. else
  222. pts = s->next_pts;
  223. s->next_pts = pts + frame->nb_samples;
  224. /* check if at least a part of the frame is after the start time */
  225. if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
  226. start_sample = 0;
  227. } else {
  228. drop = 1;
  229. start_sample = frame->nb_samples;
  230. if (s->start_sample >= 0 &&
  231. s->nb_samples + frame->nb_samples > s->start_sample) {
  232. drop = 0;
  233. start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
  234. }
  235. if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
  236. pts + frame->nb_samples > s->start_pts) {
  237. drop = 0;
  238. start_sample = FFMIN(start_sample, s->start_pts - pts);
  239. }
  240. if (drop)
  241. goto drop;
  242. }
  243. if (s->first_pts == AV_NOPTS_VALUE)
  244. s->first_pts = pts + start_sample;
  245. /* check if at least a part of the frame is before the end time */
  246. if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
  247. end_sample = frame->nb_samples;
  248. } else {
  249. drop = 1;
  250. end_sample = 0;
  251. if (s->end_sample != INT64_MAX &&
  252. s->nb_samples < s->end_sample) {
  253. drop = 0;
  254. end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
  255. }
  256. if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
  257. pts < s->end_pts) {
  258. drop = 0;
  259. end_sample = FFMAX(end_sample, s->end_pts - pts);
  260. }
  261. if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
  262. drop = 0;
  263. end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
  264. }
  265. if (drop) {
  266. s->eof = inlink->closed = 1;
  267. goto drop;
  268. }
  269. }
  270. s->nb_samples += frame->nb_samples;
  271. start_sample = FFMAX(0, start_sample);
  272. end_sample = FFMIN(frame->nb_samples, end_sample);
  273. av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
  274. if (start_sample) {
  275. AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
  276. if (!out) {
  277. av_frame_free(&frame);
  278. return AVERROR(ENOMEM);
  279. }
  280. av_frame_copy_props(out, frame);
  281. av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
  282. out->nb_samples, inlink->channels,
  283. frame->format);
  284. if (out->pts != AV_NOPTS_VALUE)
  285. out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
  286. inlink->time_base);
  287. av_frame_free(&frame);
  288. frame = out;
  289. } else
  290. frame->nb_samples = end_sample;
  291. return ff_filter_frame(ctx->outputs[0], frame);
  292. drop:
  293. s->nb_samples += frame->nb_samples;
  294. av_frame_free(&frame);
  295. return 0;
  296. }
  297. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  298. static const AVOption atrim_options[] = {
  299. COMMON_OPTS
  300. { "start_sample", "Number of the first audio sample that should be "
  301. "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
  302. { "end_sample", "Number of the first audio sample that should be "
  303. "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
  304. COMPAT_OPTS
  305. { NULL }
  306. };
  307. #undef FLAGS
  308. AVFILTER_DEFINE_CLASS(atrim);
  309. static const AVFilterPad atrim_inputs[] = {
  310. {
  311. .name = "default",
  312. .type = AVMEDIA_TYPE_AUDIO,
  313. .filter_frame = atrim_filter_frame,
  314. .config_props = config_input,
  315. },
  316. { NULL }
  317. };
  318. static const AVFilterPad atrim_outputs[] = {
  319. {
  320. .name = "default",
  321. .type = AVMEDIA_TYPE_AUDIO,
  322. .config_props = config_output,
  323. },
  324. { NULL }
  325. };
  326. AVFilter ff_af_atrim = {
  327. .name = "atrim",
  328. .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
  329. .init = init,
  330. .priv_size = sizeof(TrimContext),
  331. .priv_class = &atrim_class,
  332. .inputs = atrim_inputs,
  333. .outputs = atrim_outputs,
  334. };
  335. #endif // CONFIG_ATRIM_FILTER