naver.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. import itertools
  2. import re
  3. from urllib.parse import urlparse, parse_qs
  4. from .common import InfoExtractor
  5. from ..utils import (
  6. ExtractorError,
  7. clean_html,
  8. dict_get,
  9. int_or_none,
  10. merge_dicts,
  11. parse_duration,
  12. traverse_obj,
  13. try_call,
  14. try_get,
  15. unified_timestamp,
  16. update_url_query,
  17. )
  18. class NaverBaseIE(InfoExtractor):
  19. _CAPTION_EXT_RE = r'\.(?:ttml|vtt)'
  20. def _extract_video_info(self, video_id, vid, key):
  21. video_data = self._download_json(
  22. 'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid,
  23. video_id, query={
  24. 'key': key,
  25. })
  26. meta = video_data['meta']
  27. title = meta['subject']
  28. formats = []
  29. get_list = lambda x: try_get(video_data, lambda y: y[x + 's']['list'], list) or []
  30. def extract_formats(streams, stream_type, query={}):
  31. for stream in streams:
  32. stream_url = stream.get('source')
  33. if not stream_url:
  34. continue
  35. stream_url = update_url_query(stream_url, query)
  36. encoding_option = stream.get('encodingOption', {})
  37. bitrate = stream.get('bitrate', {})
  38. formats.append({
  39. 'format_id': '%s_%s' % (stream.get('type') or stream_type, dict_get(encoding_option, ('name', 'id'))),
  40. 'url': stream_url,
  41. 'ext': 'mp4',
  42. 'width': int_or_none(encoding_option.get('width')),
  43. 'height': int_or_none(encoding_option.get('height')),
  44. 'vbr': int_or_none(bitrate.get('video')),
  45. 'abr': int_or_none(bitrate.get('audio')),
  46. 'filesize': int_or_none(stream.get('size')),
  47. 'protocol': 'm3u8_native' if stream_type == 'HLS' else None,
  48. })
  49. extract_formats(get_list('video'), 'H264')
  50. for stream_set in video_data.get('streams', []):
  51. query = {}
  52. for param in stream_set.get('keys', []):
  53. query[param['name']] = param['value']
  54. stream_type = stream_set.get('type')
  55. videos = stream_set.get('videos')
  56. if videos:
  57. extract_formats(videos, stream_type, query)
  58. elif stream_type == 'HLS':
  59. stream_url = stream_set.get('source')
  60. if not stream_url:
  61. continue
  62. formats.extend(self._extract_m3u8_formats(
  63. update_url_query(stream_url, query), video_id,
  64. 'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
  65. self._sort_formats(formats)
  66. replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x)
  67. def get_subs(caption_url):
  68. if re.search(self._CAPTION_EXT_RE, caption_url):
  69. return [{
  70. 'url': replace_ext(caption_url, 'ttml'),
  71. }, {
  72. 'url': replace_ext(caption_url, 'vtt'),
  73. }]
  74. else:
  75. return [{'url': caption_url}]
  76. automatic_captions = {}
  77. subtitles = {}
  78. for caption in get_list('caption'):
  79. caption_url = caption.get('source')
  80. if not caption_url:
  81. continue
  82. sub_dict = automatic_captions if caption.get('type') == 'auto' else subtitles
  83. sub_dict.setdefault(dict_get(caption, ('locale', 'language')), []).extend(get_subs(caption_url))
  84. user = meta.get('user', {})
  85. return {
  86. 'id': video_id,
  87. 'title': title,
  88. 'formats': formats,
  89. 'subtitles': subtitles,
  90. 'automatic_captions': automatic_captions,
  91. 'thumbnail': try_get(meta, lambda x: x['cover']['source']),
  92. 'view_count': int_or_none(meta.get('count')),
  93. 'uploader_id': user.get('id'),
  94. 'uploader': user.get('name'),
  95. 'uploader_url': user.get('url'),
  96. }
  97. class NaverIE(NaverBaseIE):
  98. _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/(?:v|embed)/(?P<id>\d+)'
  99. _GEO_BYPASS = False
  100. _TESTS = [{
  101. 'url': 'http://tv.naver.com/v/81652',
  102. 'info_dict': {
  103. 'id': '81652',
  104. 'ext': 'mp4',
  105. 'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
  106. 'description': '메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
  107. 'timestamp': 1378200754,
  108. 'upload_date': '20130903',
  109. 'uploader': '메가스터디, 합격불변의 법칙',
  110. 'uploader_id': 'megastudy',
  111. },
  112. }, {
  113. 'url': 'http://tv.naver.com/v/395837',
  114. 'md5': '8a38e35354d26a17f73f4e90094febd3',
  115. 'info_dict': {
  116. 'id': '395837',
  117. 'ext': 'mp4',
  118. 'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
  119. 'description': 'md5:eb6aca9d457b922e43860a2a2b1984d3',
  120. 'timestamp': 1432030253,
  121. 'upload_date': '20150519',
  122. 'uploader': '4가지쇼 시즌2',
  123. 'uploader_id': 'wrappinguser29',
  124. },
  125. 'skip': 'Georestricted',
  126. }, {
  127. 'url': 'http://tvcast.naver.com/v/81652',
  128. 'only_matching': True,
  129. }]
  130. def _real_extract(self, url):
  131. video_id = self._match_id(url)
  132. content = self._download_json(
  133. 'https://tv.naver.com/api/json/v/' + video_id,
  134. video_id, headers=self.geo_verification_headers())
  135. player_info_json = content.get('playerInfoJson') or {}
  136. current_clip = player_info_json.get('currentClip') or {}
  137. vid = current_clip.get('videoId')
  138. in_key = current_clip.get('inKey')
  139. if not vid or not in_key:
  140. player_auth = try_get(player_info_json, lambda x: x['playerOption']['auth'])
  141. if player_auth == 'notCountry':
  142. self.raise_geo_restricted(countries=['KR'])
  143. elif player_auth == 'notLogin':
  144. self.raise_login_required()
  145. raise ExtractorError('couldn\'t extract vid and key')
  146. info = self._extract_video_info(video_id, vid, in_key)
  147. info.update({
  148. 'description': clean_html(current_clip.get('description')),
  149. 'timestamp': int_or_none(current_clip.get('firstExposureTime'), 1000),
  150. 'duration': parse_duration(current_clip.get('displayPlayTime')),
  151. 'like_count': int_or_none(current_clip.get('recommendPoint')),
  152. 'age_limit': 19 if current_clip.get('adult') else None,
  153. })
  154. return info
  155. class NaverLiveIE(InfoExtractor):
  156. IE_NAME = 'Naver:live'
  157. _VALID_URL = r'https?://(?:m\.)?tv(?:cast)?\.naver\.com/l/(?P<id>\d+)'
  158. _GEO_BYPASS = False
  159. _TESTS = [{
  160. 'url': 'https://tv.naver.com/l/52010',
  161. 'info_dict': {
  162. 'id': '52010',
  163. 'ext': 'mp4',
  164. 'title': '[LIVE] 뉴스특보 : "수도권 거리두기, 2주간 2단계로 조정"',
  165. 'description': 'md5:df7f0c237a5ed5e786ce5c91efbeaab3',
  166. 'channel_id': 'NTV-ytnnews24-0',
  167. 'start_time': 1597026780000,
  168. },
  169. }, {
  170. 'url': 'https://tv.naver.com/l/51549',
  171. 'info_dict': {
  172. 'id': '51549',
  173. 'ext': 'mp4',
  174. 'title': '연합뉴스TV - 코로나19 뉴스특보',
  175. 'description': 'md5:c655e82091bc21e413f549c0eaccc481',
  176. 'channel_id': 'NTV-yonhapnewstv-0',
  177. 'start_time': 1596406380000,
  178. },
  179. }, {
  180. 'url': 'https://tv.naver.com/l/54887',
  181. 'only_matching': True,
  182. }]
  183. def _real_extract(self, url):
  184. video_id = self._match_id(url)
  185. page = self._download_webpage(url, video_id, 'Downloading Page', 'Unable to download Page')
  186. secure_url = self._search_regex(r'sApiF:\s+(?:"|\')([^"\']+)', page, 'secureurl')
  187. info = self._extract_video_info(video_id, secure_url)
  188. info.update({
  189. 'description': self._og_search_description(page)
  190. })
  191. return info
  192. def _extract_video_info(self, video_id, url):
  193. video_data = self._download_json(url, video_id, headers=self.geo_verification_headers())
  194. meta = video_data.get('meta')
  195. status = meta.get('status')
  196. if status == 'CLOSED':
  197. raise ExtractorError('Stream is offline.', expected=True)
  198. elif status != 'OPENED':
  199. raise ExtractorError('Unknown status %s' % status)
  200. title = meta.get('title')
  201. stream_list = video_data.get('streams')
  202. if stream_list is None:
  203. raise ExtractorError('Could not get stream data.', expected=True)
  204. formats = []
  205. for quality in stream_list:
  206. if not quality.get('url'):
  207. continue
  208. prop = quality.get('property')
  209. if prop.get('abr'): # This abr doesn't mean Average audio bitrate.
  210. continue
  211. formats.extend(self._extract_m3u8_formats(
  212. quality.get('url'), video_id, 'mp4',
  213. m3u8_id=quality.get('qualityId'), live=True
  214. ))
  215. self._sort_formats(formats)
  216. return {
  217. 'id': video_id,
  218. 'title': title,
  219. 'formats': formats,
  220. 'channel_id': meta.get('channelId'),
  221. 'channel_url': meta.get('channelUrl'),
  222. 'thumbnail': meta.get('imgUrl'),
  223. 'start_time': meta.get('startTime'),
  224. 'categories': [meta.get('categoryId')],
  225. 'is_live': True
  226. }
  227. class NaverNowIE(NaverBaseIE):
  228. IE_NAME = 'navernow'
  229. _VALID_URL = r'https?://now\.naver\.com/show/(?P<id>[0-9]+)'
  230. _PAGE_SIZE = 30
  231. _API_URL = 'https://apis.naver.com/now_web/nowcms-api-xhmac/cms/v1'
  232. _TESTS = [{
  233. 'url': 'https://now.naver.com/show/4759?shareReplayId=5901#replay=',
  234. 'md5': 'e05854162c21c221481de16b2944a0bc',
  235. 'info_dict': {
  236. 'id': '4759-5901',
  237. 'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)',
  238. 'ext': 'mp4',
  239. 'thumbnail': r're:^https?://.*\.jpg',
  240. 'timestamp': 1650369600,
  241. 'upload_date': '20220419',
  242. 'uploader_id': 'now',
  243. 'view_count': int,
  244. },
  245. 'params': {
  246. 'noplaylist': True,
  247. }
  248. }, {
  249. 'url': 'https://now.naver.com/show/4759?shareHightlight=1078#highlight=',
  250. 'md5': '9f6118e398aa0f22b2152f554ea7851b',
  251. 'info_dict': {
  252. 'id': '4759-1078',
  253. 'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.',
  254. 'ext': 'mp4',
  255. 'thumbnail': r're:^https?://.*\.jpg',
  256. 'upload_date': '20220504',
  257. 'timestamp': 1651648042,
  258. 'uploader_id': 'now',
  259. 'view_count': int,
  260. },
  261. 'params': {
  262. 'noplaylist': True,
  263. },
  264. }, {
  265. 'url': 'https://now.naver.com/show/4759',
  266. 'info_dict': {
  267. 'id': '4759',
  268. 'title': '아이키의 떰즈업',
  269. },
  270. 'playlist_mincount': 48
  271. }, {
  272. 'url': 'https://now.naver.com/show/4759?shareReplayId=5901#replay',
  273. 'info_dict': {
  274. 'id': '4759',
  275. 'title': '아이키의 떰즈업',
  276. },
  277. 'playlist_mincount': 48,
  278. }, {
  279. 'url': 'https://now.naver.com/show/4759?shareHightlight=1078#highlight=',
  280. 'info_dict': {
  281. 'id': '4759',
  282. 'title': '아이키의 떰즈업',
  283. },
  284. 'playlist_mincount': 48,
  285. }]
  286. def _extract_replay(self, show_id, replay_id):
  287. vod_info = self._download_json(f'{self._API_URL}/shows/{show_id}/vod/{replay_id}', replay_id)
  288. in_key = self._download_json(f'{self._API_URL}/shows/{show_id}/vod/{replay_id}/inkey', replay_id)['inKey']
  289. return merge_dicts({
  290. 'id': f'{show_id}-{replay_id}',
  291. 'title': traverse_obj(vod_info, ('episode', 'title')),
  292. 'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))),
  293. 'thumbnail': vod_info.get('thumbnail_image_url'),
  294. }, self._extract_video_info(replay_id, vod_info['video_id'], in_key))
  295. def _extract_show_replays(self, show_id):
  296. page = 0
  297. while True:
  298. show_vod_info = self._download_json(
  299. f'{self._API_URL}/vod-shows/{show_id}', show_id,
  300. query={'offset': page * self._PAGE_SIZE, 'limit': self._PAGE_SIZE},
  301. note=f'Downloading JSON vod list for show {show_id} - page {page}'
  302. )['response']['result']
  303. for v in show_vod_info.get('vod_list') or []:
  304. yield self._extract_replay(show_id, v['id'])
  305. if try_call(lambda: show_vod_info['count'] <= self._PAGE_SIZE * (page + 1)):
  306. break
  307. page += 1
  308. def _extract_show_highlights(self, show_id, highlight_id=None):
  309. page = 0
  310. while True:
  311. highlights_videos = self._download_json(
  312. f'{self._API_URL}/shows/{show_id}/highlights/videos/', show_id,
  313. query={'offset': page * self._PAGE_SIZE, 'limit': self._PAGE_SIZE},
  314. note=f'Downloading JSON highlights for show {show_id} - page {page}')
  315. for highlight in highlights_videos.get('results') or []:
  316. if highlight_id and highlight.get('id') != int(highlight_id):
  317. continue
  318. yield merge_dicts({
  319. 'id': f'{show_id}-{highlight["id"]}',
  320. 'title': highlight.get('title'),
  321. 'timestamp': unified_timestamp(highlight.get('regdate')),
  322. 'thumbnail': highlight.get('thumbnail_url'),
  323. }, self._extract_video_info(highlight['id'], highlight['video_id'], highlight['video_inkey']))
  324. if try_call(lambda: highlights_videos['count'] <= self._PAGE_SIZE * (page + 1)):
  325. break
  326. page += 1
  327. def _extract_highlight(self, show_id, highlight_id):
  328. try:
  329. return next(self._extract_show_highlights(show_id, highlight_id))
  330. except StopIteration:
  331. raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}')
  332. def _real_extract(self, url):
  333. show_id = self._match_id(url)
  334. qs = parse_qs(urlparse(url).query)
  335. if not self._yes_playlist(show_id, qs.get('shareHightlight')):
  336. return self._extract_highlight(show_id, qs['shareHightlight'][0])
  337. elif not self._yes_playlist(show_id, qs.get('shareReplayId')):
  338. return self._extract_replay(show_id, qs['shareReplayId'][0])
  339. show_info = self._download_json(
  340. f'{self._API_URL}/shows/{show_id}', show_id,
  341. note=f'Downloading JSON vod list for show {show_id}')
  342. return self.playlist_result(
  343. itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)),
  344. show_id, show_info.get('title'))