avf_showwaves.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /*
  2. * Copyright (c) 2012 Stefano Sabatini
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * audio to video multimedia filter
  23. */
  24. #include "libavutil/channel_layout.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/parseutils.h"
  27. #include "avfilter.h"
  28. #include "formats.h"
  29. #include "audio.h"
  30. #include "video.h"
  31. #include "internal.h"
  32. enum ShowWavesMode {
  33. MODE_POINT,
  34. MODE_LINE,
  35. MODE_P2P,
  36. MODE_CENTERED_LINE,
  37. MODE_NB,
  38. };
  39. typedef struct {
  40. const AVClass *class;
  41. int w, h;
  42. AVRational rate;
  43. int buf_idx;
  44. int16_t *buf_idy; /* y coordinate of previous sample for each channel */
  45. AVFrame *outpicref;
  46. int req_fullfilled;
  47. int n;
  48. int sample_count_mod;
  49. enum ShowWavesMode mode;
  50. int split_channels;
  51. void (*draw_sample)(uint8_t *buf, int height, int linesize,
  52. int16_t sample, int16_t *prev_y, int intensity);
  53. } ShowWavesContext;
  54. #define OFFSET(x) offsetof(ShowWavesContext, x)
  55. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  56. static const AVOption showwaves_options[] = {
  57. { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  58. { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
  59. { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
  60. { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
  61. { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
  62. { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
  63. { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
  64. { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
  65. { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  66. { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS },
  67. { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS },
  68. { NULL }
  69. };
  70. AVFILTER_DEFINE_CLASS(showwaves);
  71. static av_cold void uninit(AVFilterContext *ctx)
  72. {
  73. ShowWavesContext *showwaves = ctx->priv;
  74. av_frame_free(&showwaves->outpicref);
  75. av_freep(&showwaves->buf_idy);
  76. }
  77. static int query_formats(AVFilterContext *ctx)
  78. {
  79. AVFilterFormats *formats = NULL;
  80. AVFilterChannelLayouts *layouts = NULL;
  81. AVFilterLink *inlink = ctx->inputs[0];
  82. AVFilterLink *outlink = ctx->outputs[0];
  83. static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
  84. static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
  85. /* set input audio formats */
  86. formats = ff_make_format_list(sample_fmts);
  87. if (!formats)
  88. return AVERROR(ENOMEM);
  89. ff_formats_ref(formats, &inlink->out_formats);
  90. layouts = ff_all_channel_layouts();
  91. if (!layouts)
  92. return AVERROR(ENOMEM);
  93. ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
  94. formats = ff_all_samplerates();
  95. if (!formats)
  96. return AVERROR(ENOMEM);
  97. ff_formats_ref(formats, &inlink->out_samplerates);
  98. /* set output video format */
  99. formats = ff_make_format_list(pix_fmts);
  100. if (!formats)
  101. return AVERROR(ENOMEM);
  102. ff_formats_ref(formats, &outlink->in_formats);
  103. return 0;
  104. }
  105. static int config_output(AVFilterLink *outlink)
  106. {
  107. AVFilterContext *ctx = outlink->src;
  108. AVFilterLink *inlink = ctx->inputs[0];
  109. ShowWavesContext *showwaves = ctx->priv;
  110. int nb_channels = inlink->channels;
  111. if (!showwaves->n)
  112. showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5);
  113. showwaves->buf_idx = 0;
  114. if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
  115. av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
  116. return AVERROR(ENOMEM);
  117. }
  118. outlink->w = showwaves->w;
  119. outlink->h = showwaves->h;
  120. outlink->sample_aspect_ratio = (AVRational){1,1};
  121. outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
  122. (AVRational){showwaves->w,1});
  123. av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
  124. showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
  125. return 0;
  126. }
  127. inline static int push_frame(AVFilterLink *outlink)
  128. {
  129. AVFilterContext *ctx = outlink->src;
  130. AVFilterLink *inlink = ctx->inputs[0];
  131. ShowWavesContext *showwaves = outlink->src->priv;
  132. int nb_channels = inlink->channels;
  133. int ret, i;
  134. if ((ret = ff_filter_frame(outlink, showwaves->outpicref)) >= 0)
  135. showwaves->req_fullfilled = 1;
  136. showwaves->outpicref = NULL;
  137. showwaves->buf_idx = 0;
  138. for (i = 0; i <= nb_channels; i++)
  139. showwaves->buf_idy[i] = 0;
  140. return ret;
  141. }
  142. static int request_frame(AVFilterLink *outlink)
  143. {
  144. ShowWavesContext *showwaves = outlink->src->priv;
  145. AVFilterLink *inlink = outlink->src->inputs[0];
  146. int ret;
  147. showwaves->req_fullfilled = 0;
  148. do {
  149. ret = ff_request_frame(inlink);
  150. } while (!showwaves->req_fullfilled && ret >= 0);
  151. if (ret == AVERROR_EOF && showwaves->outpicref)
  152. push_frame(outlink);
  153. return ret;
  154. }
  155. #define MAX_INT16 ((1<<15) -1)
  156. static void draw_sample_point(uint8_t *buf, int height, int linesize,
  157. int16_t sample, int16_t *prev_y, int intensity)
  158. {
  159. const int h = height/2 - av_rescale(sample, height/2, MAX_INT16);
  160. if (h >= 0 && h < height)
  161. buf[h * linesize] += intensity;
  162. }
  163. static void draw_sample_line(uint8_t *buf, int height, int linesize,
  164. int16_t sample, int16_t *prev_y, int intensity)
  165. {
  166. int k;
  167. const int h = height/2 - av_rescale(sample, height/2, MAX_INT16);
  168. int start = height/2;
  169. int end = av_clip(h, 0, height-1);
  170. if (start > end)
  171. FFSWAP(int16_t, start, end);
  172. for (k = start; k < end; k++)
  173. buf[k * linesize] += intensity;
  174. }
  175. static void draw_sample_p2p(uint8_t *buf, int height, int linesize,
  176. int16_t sample, int16_t *prev_y, int intensity)
  177. {
  178. int k;
  179. const int h = height/2 - av_rescale(sample, height/2, MAX_INT16);
  180. if (h >= 0 && h < height) {
  181. buf[h * linesize] += intensity;
  182. if (*prev_y && h != *prev_y) {
  183. int start = *prev_y;
  184. int end = av_clip(h, 0, height-1);
  185. if (start > end)
  186. FFSWAP(int16_t, start, end);
  187. for (k = start + 1; k < end; k++)
  188. buf[k * linesize] += intensity;
  189. }
  190. }
  191. *prev_y = h;
  192. }
  193. static void draw_sample_cline(uint8_t *buf, int height, int linesize,
  194. int16_t sample, int16_t *prev_y, int intensity)
  195. {
  196. int k;
  197. const int h = av_rescale(abs(sample), height, UINT16_MAX);
  198. const int start = (height - h) / 2;
  199. const int end = start + h;
  200. for (k = start; k < end; k++)
  201. buf[k * linesize] += intensity;
  202. }
  203. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  204. {
  205. AVFilterContext *ctx = inlink->dst;
  206. AVFilterLink *outlink = ctx->outputs[0];
  207. ShowWavesContext *showwaves = ctx->priv;
  208. const int nb_samples = insamples->nb_samples;
  209. AVFrame *outpicref = showwaves->outpicref;
  210. int linesize = outpicref ? outpicref->linesize[0] : 0;
  211. int16_t *p = (int16_t *)insamples->data[0];
  212. int nb_channels = inlink->channels;
  213. int i, j, ret = 0;
  214. const int n = showwaves->n;
  215. const int x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
  216. const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
  217. /* draw data in the buffer */
  218. for (i = 0; i < nb_samples; i++) {
  219. if (!showwaves->outpicref) {
  220. showwaves->outpicref = outpicref =
  221. ff_get_video_buffer(outlink, outlink->w, outlink->h);
  222. if (!outpicref)
  223. return AVERROR(ENOMEM);
  224. outpicref->width = outlink->w;
  225. outpicref->height = outlink->h;
  226. outpicref->pts = insamples->pts +
  227. av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels,
  228. (AVRational){ 1, inlink->sample_rate },
  229. outlink->time_base);
  230. linesize = outpicref->linesize[0];
  231. for (j = 0; j < outlink->h; j++)
  232. memset(outpicref->data[0] + j * linesize, 0, outlink->w);
  233. }
  234. for (j = 0; j < nb_channels; j++) {
  235. uint8_t *buf = outpicref->data[0] + showwaves->buf_idx;
  236. if (showwaves->split_channels)
  237. buf += j*ch_height*linesize;
  238. showwaves->draw_sample(buf, ch_height, linesize, *p++,
  239. &showwaves->buf_idy[j], x);
  240. }
  241. showwaves->sample_count_mod++;
  242. if (showwaves->sample_count_mod == n) {
  243. showwaves->sample_count_mod = 0;
  244. showwaves->buf_idx++;
  245. }
  246. if (showwaves->buf_idx == showwaves->w)
  247. if ((ret = push_frame(outlink)) < 0)
  248. break;
  249. outpicref = showwaves->outpicref;
  250. }
  251. av_frame_free(&insamples);
  252. return ret;
  253. }
  254. static av_cold int init(AVFilterContext *ctx)
  255. {
  256. ShowWavesContext *showwaves = ctx->priv;
  257. switch (showwaves->mode) {
  258. case MODE_POINT: showwaves->draw_sample = draw_sample_point; break;
  259. case MODE_LINE: showwaves->draw_sample = draw_sample_line; break;
  260. case MODE_P2P: showwaves->draw_sample = draw_sample_p2p; break;
  261. case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline; break;
  262. default:
  263. return AVERROR_BUG;
  264. }
  265. return 0;
  266. }
  267. static const AVFilterPad showwaves_inputs[] = {
  268. {
  269. .name = "default",
  270. .type = AVMEDIA_TYPE_AUDIO,
  271. .filter_frame = filter_frame,
  272. },
  273. { NULL }
  274. };
  275. static const AVFilterPad showwaves_outputs[] = {
  276. {
  277. .name = "default",
  278. .type = AVMEDIA_TYPE_VIDEO,
  279. .config_props = config_output,
  280. .request_frame = request_frame,
  281. },
  282. { NULL }
  283. };
  284. AVFilter ff_avf_showwaves = {
  285. .name = "showwaves",
  286. .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
  287. .init = init,
  288. .uninit = uninit,
  289. .query_formats = query_formats,
  290. .priv_size = sizeof(ShowWavesContext),
  291. .inputs = showwaves_inputs,
  292. .outputs = showwaves_outputs,
  293. .priv_class = &showwaves_class,
  294. };