v4l.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. /*
  2. * Linux video grab interface
  3. * Copyright (c) 2000,2001 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avdevice.h"
  22. #undef __STRICT_ANSI__ //workaround due to broken kernel headers
  23. #include "config.h"
  24. #include "libavutil/rational.h"
  25. #include "libavutil/imgutils.h"
  26. #include "libavutil/log.h"
  27. #include "libavutil/opt.h"
  28. #include "libavformat/internal.h"
  29. #include "libavcodec/dsputil.h"
  30. #include <unistd.h>
  31. #include <fcntl.h>
  32. #include <sys/ioctl.h>
  33. #include <sys/mman.h>
  34. #include <sys/time.h>
  35. #define _LINUX_TIME_H 1
  36. #include <linux/videodev.h>
  37. #include <time.h>
  38. typedef struct {
  39. AVClass *class;
  40. int fd;
  41. int frame_format; /* see VIDEO_PALETTE_xxx */
  42. int use_mmap;
  43. AVRational time_base;
  44. int64_t time_frame;
  45. int frame_size;
  46. struct video_capability video_cap;
  47. struct video_audio audio_saved;
  48. struct video_window video_win;
  49. uint8_t *video_buf;
  50. struct video_mbuf gb_buffers;
  51. struct video_mmap gb_buf;
  52. int gb_frame;
  53. int standard;
  54. } VideoData;
  55. static const struct {
  56. int palette;
  57. int depth;
  58. enum AVPixelFormat pix_fmt;
  59. } video_formats [] = {
  60. {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = AV_PIX_FMT_YUV420P },
  61. {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
  62. {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = AV_PIX_FMT_UYVY422 },
  63. {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
  64. /* NOTE: v4l uses BGR24, not RGB24 */
  65. {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = AV_PIX_FMT_BGR24 },
  66. {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = AV_PIX_FMT_BGR565 },
  67. {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = AV_PIX_FMT_GRAY8 },
  68. };
  69. static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
  70. {
  71. VideoData *s = s1->priv_data;
  72. AVStream *st;
  73. int video_fd;
  74. int desired_palette, desired_depth;
  75. struct video_tuner tuner;
  76. struct video_audio audio;
  77. struct video_picture pict;
  78. int j;
  79. int vformat_num = FF_ARRAY_ELEMS(video_formats);
  80. av_log(s1, AV_LOG_WARNING, "V4L input device is deprecated and will be removed in the next release.");
  81. if (ap->time_base.den <= 0) {
  82. av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den);
  83. return -1;
  84. }
  85. s->time_base = ap->time_base;
  86. s->video_win.width = ap->width;
  87. s->video_win.height = ap->height;
  88. st = avformat_new_stream(s1, NULL);
  89. if (!st)
  90. return AVERROR(ENOMEM);
  91. avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
  92. video_fd = open(s1->filename, O_RDWR);
  93. if (video_fd < 0) {
  94. av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
  95. goto fail;
  96. }
  97. if (ioctl(video_fd, VIDIOCGCAP, &s->video_cap) < 0) {
  98. av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
  99. goto fail;
  100. }
  101. if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
  102. av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
  103. goto fail;
  104. }
  105. /* no values set, autodetect them */
  106. if (s->video_win.width <= 0 || s->video_win.height <= 0) {
  107. if (ioctl(video_fd, VIDIOCGWIN, &s->video_win, sizeof(s->video_win)) < 0) {
  108. av_log(s1, AV_LOG_ERROR, "VIDIOCGWIN: %s\n", strerror(errno));
  109. goto fail;
  110. }
  111. }
  112. if(av_image_check_size(s->video_win.width, s->video_win.height, 0, s1) < 0)
  113. return -1;
  114. desired_palette = -1;
  115. desired_depth = -1;
  116. for (j = 0; j < vformat_num; j++) {
  117. if (ap->pix_fmt == video_formats[j].pix_fmt) {
  118. desired_palette = video_formats[j].palette;
  119. desired_depth = video_formats[j].depth;
  120. break;
  121. }
  122. }
  123. /* set tv standard */
  124. if (!ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
  125. tuner.mode = s->standard;
  126. ioctl(video_fd, VIDIOCSTUNER, &tuner);
  127. }
  128. /* unmute audio */
  129. audio.audio = 0;
  130. ioctl(video_fd, VIDIOCGAUDIO, &audio);
  131. memcpy(&s->audio_saved, &audio, sizeof(audio));
  132. audio.flags &= ~VIDEO_AUDIO_MUTE;
  133. ioctl(video_fd, VIDIOCSAUDIO, &audio);
  134. ioctl(video_fd, VIDIOCGPICT, &pict);
  135. av_dlog(s1, "v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
  136. pict.colour, pict.hue, pict.brightness, pict.contrast, pict.whiteness);
  137. /* try to choose a suitable video format */
  138. pict.palette = desired_palette;
  139. pict.depth= desired_depth;
  140. if (desired_palette == -1 || ioctl(video_fd, VIDIOCSPICT, &pict) < 0) {
  141. for (j = 0; j < vformat_num; j++) {
  142. pict.palette = video_formats[j].palette;
  143. pict.depth = video_formats[j].depth;
  144. if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
  145. break;
  146. }
  147. if (j >= vformat_num)
  148. goto fail1;
  149. }
  150. if (ioctl(video_fd, VIDIOCGMBUF, &s->gb_buffers) < 0) {
  151. /* try to use read based access */
  152. int val;
  153. s->video_win.x = 0;
  154. s->video_win.y = 0;
  155. s->video_win.chromakey = -1;
  156. s->video_win.flags = 0;
  157. if (ioctl(video_fd, VIDIOCSWIN, s->video_win) < 0) {
  158. av_log(s1, AV_LOG_ERROR, "VIDIOCSWIN: %s\n", strerror(errno));
  159. goto fail;
  160. }
  161. s->frame_format = pict.palette;
  162. val = 1;
  163. if (ioctl(video_fd, VIDIOCCAPTURE, &val) < 0) {
  164. av_log(s1, AV_LOG_ERROR, "VIDIOCCAPTURE: %s\n", strerror(errno));
  165. goto fail;
  166. }
  167. s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
  168. s->use_mmap = 0;
  169. } else {
  170. s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_SHARED, video_fd, 0);
  171. if ((unsigned char*)-1 == s->video_buf) {
  172. s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_PRIVATE, video_fd, 0);
  173. if ((unsigned char*)-1 == s->video_buf) {
  174. av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
  175. goto fail;
  176. }
  177. }
  178. s->gb_frame = 0;
  179. s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
  180. /* start to grab the first frame */
  181. s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
  182. s->gb_buf.height = s->video_win.height;
  183. s->gb_buf.width = s->video_win.width;
  184. s->gb_buf.format = pict.palette;
  185. if (ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
  186. if (errno != EAGAIN) {
  187. fail1:
  188. av_log(s1, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
  189. } else {
  190. av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not receive any video signal\n");
  191. }
  192. goto fail;
  193. }
  194. for (j = 1; j < s->gb_buffers.frames; j++) {
  195. s->gb_buf.frame = j;
  196. ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
  197. }
  198. s->frame_format = s->gb_buf.format;
  199. s->use_mmap = 1;
  200. }
  201. for (j = 0; j < vformat_num; j++) {
  202. if (s->frame_format == video_formats[j].palette) {
  203. s->frame_size = s->video_win.width * s->video_win.height * video_formats[j].depth / 8;
  204. st->codec->pix_fmt = video_formats[j].pix_fmt;
  205. break;
  206. }
  207. }
  208. if (j >= vformat_num)
  209. goto fail;
  210. s->fd = video_fd;
  211. st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  212. st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
  213. st->codec->width = s->video_win.width;
  214. st->codec->height = s->video_win.height;
  215. st->codec->time_base = s->time_base;
  216. st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
  217. return 0;
  218. fail:
  219. if (video_fd >= 0)
  220. close(video_fd);
  221. return AVERROR(EIO);
  222. }
  223. static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
  224. {
  225. uint8_t *ptr;
  226. while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
  227. (errno == EAGAIN || errno == EINTR));
  228. ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
  229. memcpy(buf, ptr, s->frame_size);
  230. /* Setup to capture the next frame */
  231. s->gb_buf.frame = s->gb_frame;
  232. if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
  233. if (errno == EAGAIN)
  234. av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
  235. else
  236. av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
  237. return AVERROR(EIO);
  238. }
  239. /* This is now the grabbing frame */
  240. s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
  241. return s->frame_size;
  242. }
  243. static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
  244. {
  245. VideoData *s = s1->priv_data;
  246. int64_t curtime, delay;
  247. struct timespec ts;
  248. /* Calculate the time of the next frame */
  249. s->time_frame += INT64_C(1000000);
  250. /* wait based on the frame rate */
  251. for(;;) {
  252. curtime = av_gettime();
  253. delay = s->time_frame * s->time_base.num / s->time_base.den - curtime;
  254. if (delay <= 0) {
  255. if (delay < INT64_C(-1000000) * s->time_base.num / s->time_base.den) {
  256. /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
  257. s->time_frame += INT64_C(1000000);
  258. }
  259. break;
  260. }
  261. ts.tv_sec = delay / 1000000;
  262. ts.tv_nsec = (delay % 1000000) * 1000;
  263. nanosleep(&ts, NULL);
  264. }
  265. if (av_new_packet(pkt, s->frame_size) < 0)
  266. return AVERROR(EIO);
  267. pkt->pts = curtime;
  268. /* read one frame */
  269. if (s->use_mmap) {
  270. return v4l_mm_read_picture(s, pkt->data);
  271. } else {
  272. if (read(s->fd, pkt->data, pkt->size) != pkt->size)
  273. return AVERROR(EIO);
  274. return s->frame_size;
  275. }
  276. }
  277. static int grab_read_close(AVFormatContext *s1)
  278. {
  279. VideoData *s = s1->priv_data;
  280. if (s->use_mmap)
  281. munmap(s->video_buf, s->gb_buffers.size);
  282. /* mute audio. we must force it because the BTTV driver does not
  283. return its state correctly */
  284. s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
  285. ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
  286. close(s->fd);
  287. return 0;
  288. }
  289. static const AVOption options[] = {
  290. { "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_MODE_NTSC}, VIDEO_MODE_PAL, VIDEO_MODE_NTSC, AV_OPT_FLAG_DECODING_PARAM, "standard" },
  291. { "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
  292. { "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
  293. { "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
  294. { NULL },
  295. };
  296. static const AVClass v4l_class = {
  297. .class_name = "V4L indev",
  298. .item_name = av_default_item_name,
  299. .option = options,
  300. .version = LIBAVUTIL_VERSION_INT,
  301. .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
  302. };
  303. AVInputFormat ff_v4l_demuxer = {
  304. .name = "video4linux,v4l",
  305. .long_name = NULL_IF_CONFIG_SMALL("Video4Linux device grab"),
  306. .priv_data_size = sizeof(VideoData),
  307. .read_header = grab_read_header,
  308. .read_packet = grab_read_packet,
  309. .read_close = grab_read_close,
  310. .flags = AVFMT_NOFILE,
  311. .priv_class = &v4l_class,
  312. };