reddit.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. import urllib.parse
  2. from .common import InfoExtractor
  3. from ..utils import (
  4. ExtractorError,
  5. float_or_none,
  6. int_or_none,
  7. parse_qs,
  8. traverse_obj,
  9. try_get,
  10. unescapeHTML,
  11. update_url_query,
  12. url_or_none,
  13. urlencode_postdata,
  14. )
  15. class RedditIE(InfoExtractor):
  16. _NETRC_MACHINE = 'reddit'
  17. _VALID_URL = r'https?://(?P<host>(?:\w+\.)?reddit(?:media)?\.com)/(?P<slug>(?:(?:r|user)/[^/]+/)?comments/(?P<id>[^/?#&]+))'
  18. _TESTS = [{
  19. 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
  20. 'info_dict': {
  21. 'id': 'zv89llsvexdz',
  22. 'ext': 'mp4',
  23. 'display_id': '6rrwyj',
  24. 'title': 'That small heart attack.',
  25. 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
  26. 'thumbnails': 'count:4',
  27. 'timestamp': 1501941939,
  28. 'upload_date': '20170805',
  29. 'uploader': 'Antw87',
  30. 'duration': 12,
  31. 'like_count': int,
  32. 'dislike_count': int,
  33. 'comment_count': int,
  34. 'age_limit': 0,
  35. 'channel_id': 'videos',
  36. },
  37. 'params': {
  38. 'skip_download': True,
  39. },
  40. }, {
  41. # 1080p fallback format
  42. 'url': 'https://www.reddit.com/r/aww/comments/90bu6w/heat_index_was_110_degrees_so_we_offered_him_a/',
  43. 'md5': '8b5902cfda3006bf90faea7adf765a49',
  44. 'info_dict': {
  45. 'id': 'gyh95hiqc0b11',
  46. 'ext': 'mp4',
  47. 'display_id': '90bu6w',
  48. 'title': 'Heat index was 110 degrees so we offered him a cold drink. He went for a full body soak instead',
  49. 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
  50. 'thumbnails': 'count:7',
  51. 'timestamp': 1532051078,
  52. 'upload_date': '20180720',
  53. 'uploader': 'FootLoosePickleJuice',
  54. 'duration': 14,
  55. 'like_count': int,
  56. 'dislike_count': int,
  57. 'comment_count': int,
  58. 'age_limit': 0,
  59. 'channel_id': 'aww',
  60. },
  61. }, {
  62. # User post
  63. 'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
  64. 'info_dict': {
  65. 'id': 'zasobba6wp071',
  66. 'ext': 'mp4',
  67. 'display_id': 'nip71r',
  68. 'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
  69. 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
  70. 'thumbnails': 'count:5',
  71. 'timestamp': 1621709093,
  72. 'upload_date': '20210522',
  73. 'uploader': 'creepyt0es',
  74. 'duration': 6,
  75. 'like_count': int,
  76. 'dislike_count': int,
  77. 'comment_count': int,
  78. 'age_limit': 18,
  79. 'channel_id': 'u_creepyt0es',
  80. },
  81. 'params': {
  82. 'skip_download': True,
  83. },
  84. }, {
  85. # videos embedded in reddit text post
  86. 'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
  87. 'playlist_count': 2,
  88. 'info_dict': {
  89. 'id': 'wzqkxp',
  90. 'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
  91. },
  92. }, {
  93. # crossposted reddit-hosted media
  94. 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
  95. 'md5': '746180895c7b75a9d6b05341f507699a',
  96. 'info_dict': {
  97. 'id': 'a1oneun6pa5a1',
  98. 'ext': 'mp4',
  99. 'display_id': 'zjjw82',
  100. 'title': 'Cringe',
  101. 'uploader': 'Otaku-senpai69420',
  102. 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
  103. 'upload_date': '20221212',
  104. 'timestamp': 1670812309,
  105. 'duration': 16,
  106. 'like_count': int,
  107. 'dislike_count': int,
  108. 'comment_count': int,
  109. 'age_limit': 0,
  110. 'channel_id': 'dumbfuckers_club',
  111. },
  112. }, {
  113. # post link without subreddit
  114. 'url': 'https://www.reddit.com/comments/124pp33',
  115. 'md5': '15eec9d828adcef4468b741a7e45a395',
  116. 'info_dict': {
  117. 'id': 'antsenjc2jqa1',
  118. 'ext': 'mp4',
  119. 'display_id': '124pp33',
  120. 'title': 'Harmless prank of some old friends',
  121. 'uploader': 'Dudezila',
  122. 'channel_id': 'ContagiousLaughter',
  123. 'duration': 17,
  124. 'upload_date': '20230328',
  125. 'timestamp': 1680012043,
  126. 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
  127. 'age_limit': 0,
  128. 'comment_count': int,
  129. 'dislike_count': int,
  130. 'like_count': int,
  131. },
  132. }, {
  133. # quarantined subreddit post
  134. 'url': 'https://old.reddit.com/r/GenZedong/comments/12fujy3/based_hasan/',
  135. 'md5': '3156ea69e3c1f1b6259683c5abd36e71',
  136. 'info_dict': {
  137. 'id': '8bwtclfggpsa1',
  138. 'ext': 'mp4',
  139. 'display_id': '12fujy3',
  140. 'title': 'Based Hasan?',
  141. 'uploader': 'KingNigelXLII',
  142. 'channel_id': 'GenZedong',
  143. 'duration': 16,
  144. 'upload_date': '20230408',
  145. 'timestamp': 1680979138,
  146. 'age_limit': 0,
  147. 'comment_count': int,
  148. 'dislike_count': int,
  149. 'like_count': int,
  150. },
  151. 'skip': 'Requires account that has opted-in to the GenZedong subreddit',
  152. }, {
  153. # subtitles in HLS manifest
  154. 'url': 'https://www.reddit.com/r/Unexpected/comments/1cl9h0u/the_insurance_claim_will_be_interesting/',
  155. 'info_dict': {
  156. 'id': 'a2mdj5d57qyc1',
  157. 'ext': 'mp4',
  158. 'display_id': '1cl9h0u',
  159. 'title': 'The insurance claim will be interesting',
  160. 'uploader': 'darrenpauli',
  161. 'channel_id': 'Unexpected',
  162. 'duration': 53,
  163. 'upload_date': '20240506',
  164. 'timestamp': 1714966382,
  165. 'age_limit': 0,
  166. 'comment_count': int,
  167. 'dislike_count': int,
  168. 'like_count': int,
  169. 'subtitles': {'en': 'mincount:1'},
  170. },
  171. 'params': {
  172. 'skip_download': True,
  173. },
  174. }, {
  175. # subtitles from caption-url
  176. 'url': 'https://www.reddit.com/r/soccer/comments/1cxwzso/tottenham_1_0_newcastle_united_james_maddison_31/',
  177. 'info_dict': {
  178. 'id': 'xbmj4t3igy1d1',
  179. 'ext': 'mp4',
  180. 'display_id': '1cxwzso',
  181. 'title': 'Tottenham [1] - 0 Newcastle United - James Maddison 31\'',
  182. 'uploader': 'Woodstovia',
  183. 'channel_id': 'soccer',
  184. 'duration': 30,
  185. 'upload_date': '20240522',
  186. 'timestamp': 1716373798,
  187. 'age_limit': 0,
  188. 'comment_count': int,
  189. 'dislike_count': int,
  190. 'like_count': int,
  191. 'subtitles': {'en': 'mincount:1'},
  192. },
  193. 'params': {
  194. 'skip_download': True,
  195. 'writesubtitles': True,
  196. },
  197. }, {
  198. 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
  199. 'only_matching': True,
  200. }, {
  201. # imgur
  202. 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
  203. 'only_matching': True,
  204. }, {
  205. # imgur @ old reddit
  206. 'url': 'https://old.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/',
  207. 'only_matching': True,
  208. }, {
  209. # streamable
  210. 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/',
  211. 'only_matching': True,
  212. }, {
  213. # youtube
  214. 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/',
  215. 'only_matching': True,
  216. }, {
  217. # reddit video @ nm reddit
  218. 'url': 'https://nm.reddit.com/r/Cricket/comments/8idvby/lousy_cameraman_finds_himself_in_cairns_line_of/',
  219. 'only_matching': True,
  220. }, {
  221. 'url': 'https://www.redditmedia.com/r/serbia/comments/pu9wbx/ako_vu%C4%8Di%C4%87_izgubi_izbore_ja_%C4%87u_da_crknem/',
  222. 'only_matching': True,
  223. }]
  224. def _perform_login(self, username, password):
  225. captcha = self._download_json(
  226. 'https://www.reddit.com/api/requires_captcha/login.json', None,
  227. 'Checking login requirement')['required']
  228. if captcha:
  229. raise ExtractorError('Reddit is requiring captcha before login', expected=True)
  230. login = self._download_json(
  231. f'https://www.reddit.com/api/login/{username}', None, data=urlencode_postdata({
  232. 'op': 'login-main',
  233. 'user': username,
  234. 'passwd': password,
  235. 'api_type': 'json',
  236. }), note='Logging in', errnote='Login request failed')
  237. errors = '; '.join(traverse_obj(login, ('json', 'errors', ..., 1)))
  238. if errors:
  239. raise ExtractorError(f'Unable to login, Reddit API says {errors}', expected=True)
  240. elif not traverse_obj(login, ('json', 'data', 'cookie', {str})):
  241. raise ExtractorError('Unable to login, no cookie was returned')
  242. def _get_subtitles(self, video_id):
  243. # Fallback if there were no subtitles provided by DASH or HLS manifests
  244. caption_url = f'https://v.redd.it/{video_id}/wh_ben_en.vtt'
  245. if self._is_valid_url(caption_url, video_id, item='subtitles'):
  246. return {'en': [{'url': caption_url}]}
  247. def _real_extract(self, url):
  248. host, slug, video_id = self._match_valid_url(url).group('host', 'slug', 'id')
  249. data = self._download_json(
  250. f'https://{host}/{slug}/.json', video_id, fatal=False, expected_status=403)
  251. if not data:
  252. fallback_host = 'old.reddit.com' if host != 'old.reddit.com' else 'www.reddit.com'
  253. self.to_screen(f'{host} request failed, retrying with {fallback_host}')
  254. data = self._download_json(
  255. f'https://{fallback_host}/{slug}/.json', video_id, expected_status=403)
  256. if traverse_obj(data, 'error') == 403:
  257. reason = data.get('reason')
  258. if reason == 'quarantined':
  259. self.raise_login_required('Quarantined subreddit; an account that has opted in is required')
  260. elif reason == 'private':
  261. self.raise_login_required('Private subreddit; an account that has been approved is required')
  262. else:
  263. raise ExtractorError(f'HTTP Error 403 Forbidden; reason given: {reason}')
  264. data = data[0]['data']['children'][0]['data']
  265. video_url = data['url']
  266. over_18 = data.get('over_18')
  267. if over_18 is True:
  268. age_limit = 18
  269. elif over_18 is False:
  270. age_limit = 0
  271. else:
  272. age_limit = None
  273. thumbnails = []
  274. def add_thumbnail(src):
  275. if not isinstance(src, dict):
  276. return
  277. thumbnail_url = url_or_none(src.get('url'))
  278. if not thumbnail_url:
  279. return
  280. thumbnails.append({
  281. 'url': unescapeHTML(thumbnail_url),
  282. 'width': int_or_none(src.get('width')),
  283. 'height': int_or_none(src.get('height')),
  284. 'http_headers': {'Accept': '*/*'},
  285. })
  286. for image in try_get(data, lambda x: x['preview']['images']) or []:
  287. if not isinstance(image, dict):
  288. continue
  289. add_thumbnail(image.get('source'))
  290. resolutions = image.get('resolutions')
  291. if isinstance(resolutions, list):
  292. for resolution in resolutions:
  293. add_thumbnail(resolution)
  294. info = {
  295. 'title': data.get('title'),
  296. 'thumbnails': thumbnails,
  297. 'timestamp': float_or_none(data.get('created_utc')),
  298. 'uploader': data.get('author'),
  299. 'channel_id': data.get('subreddit'),
  300. 'like_count': int_or_none(data.get('ups')),
  301. 'dislike_count': int_or_none(data.get('downs')),
  302. 'comment_count': int_or_none(data.get('num_comments')),
  303. 'age_limit': age_limit,
  304. }
  305. parsed_url = urllib.parse.urlparse(video_url)
  306. # Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
  307. if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
  308. entries = []
  309. for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
  310. if not media.get('id') or media.get('e') != 'RedditVideo':
  311. continue
  312. formats = []
  313. if media.get('hlsUrl'):
  314. formats.extend(self._extract_m3u8_formats(
  315. unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
  316. if media.get('dashUrl'):
  317. formats.extend(self._extract_mpd_formats(
  318. unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
  319. if formats:
  320. entries.append({
  321. 'id': media['id'],
  322. 'display_id': video_id,
  323. 'formats': formats,
  324. **info,
  325. })
  326. if entries:
  327. return self.playlist_result(entries, video_id, info.get('title'))
  328. raise ExtractorError('No media found', expected=True)
  329. # Check if media is hosted on reddit:
  330. reddit_video = traverse_obj(data, (
  331. (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
  332. if reddit_video:
  333. playlist_urls = [
  334. try_get(reddit_video, lambda x: unescapeHTML(x[y]))
  335. for y in ('dash_url', 'hls_url')
  336. ]
  337. # Update video_id
  338. display_id = video_id
  339. video_id = self._search_regex(
  340. r'https?://v\.redd\.it/(?P<id>[^/?#&]+)', reddit_video['fallback_url'],
  341. 'video_id', default=display_id)
  342. dash_playlist_url = playlist_urls[0] or f'https://v.redd.it/{video_id}/DASHPlaylist.mpd'
  343. hls_playlist_url = playlist_urls[1] or f'https://v.redd.it/{video_id}/HLSPlaylist.m3u8'
  344. qs = traverse_obj(parse_qs(hls_playlist_url), {
  345. 'f': ('f', 0, {lambda x: ','.join([x, 'subsAll']) if x else 'hd,subsAll'}),
  346. })
  347. hls_playlist_url = update_url_query(hls_playlist_url, qs)
  348. formats = [{
  349. 'url': unescapeHTML(reddit_video['fallback_url']),
  350. 'height': int_or_none(reddit_video.get('height')),
  351. 'width': int_or_none(reddit_video.get('width')),
  352. 'tbr': int_or_none(reddit_video.get('bitrate_kbps')),
  353. 'acodec': 'none',
  354. 'vcodec': 'h264',
  355. 'ext': 'mp4',
  356. 'format_id': 'fallback',
  357. 'format_note': 'DASH video, mp4_dash',
  358. }]
  359. hls_fmts, subtitles = self._extract_m3u8_formats_and_subtitles(
  360. hls_playlist_url, display_id, 'mp4', m3u8_id='hls', fatal=False)
  361. formats.extend(hls_fmts)
  362. dash_fmts, dash_subs = self._extract_mpd_formats_and_subtitles(
  363. dash_playlist_url, display_id, mpd_id='dash', fatal=False)
  364. formats.extend(dash_fmts)
  365. self._merge_subtitles(dash_subs, target=subtitles)
  366. return {
  367. **info,
  368. 'id': video_id,
  369. 'display_id': display_id,
  370. 'formats': formats,
  371. 'subtitles': subtitles or self.extract_subtitles(video_id),
  372. 'duration': int_or_none(reddit_video.get('duration')),
  373. }
  374. if parsed_url.netloc == 'v.redd.it':
  375. self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
  376. return {
  377. **info,
  378. 'id': parsed_url.path.split('/')[1],
  379. 'display_id': video_id,
  380. }
  381. # Not hosted on reddit, must continue extraction
  382. return {
  383. **info,
  384. 'display_id': video_id,
  385. '_type': 'url_transparent',
  386. 'url': video_url,
  387. }