rokfin.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. import datetime as dt
  2. import itertools
  3. import json
  4. import re
  5. import urllib.parse
  6. from .common import InfoExtractor, SearchInfoExtractor
  7. from ..utils import (
  8. ExtractorError,
  9. determine_ext,
  10. float_or_none,
  11. format_field,
  12. int_or_none,
  13. str_or_none,
  14. traverse_obj,
  15. try_get,
  16. unescapeHTML,
  17. unified_timestamp,
  18. url_or_none,
  19. urlencode_postdata,
  20. )
  21. _API_BASE_URL = 'https://prod-api-v2.production.rokfin.com/api/v2/public/'
  22. class RokfinIE(InfoExtractor):
  23. _VALID_URL = r'https?://(?:www\.)?rokfin\.com/(?P<id>(?P<type>post|stream)/\d+)'
  24. _NETRC_MACHINE = 'rokfin'
  25. _AUTH_BASE = 'https://secure.rokfin.com/auth/realms/rokfin-web/protocol/openid-connect'
  26. _access_mgmt_tokens = {} # OAuth 2.0: RFC 6749, Sec. 1.4-5
  27. _TESTS = [{
  28. 'url': 'https://www.rokfin.com/post/57548/Mitt-Romneys-Crazy-Solution-To-Climate-Change',
  29. 'info_dict': {
  30. 'id': 'post/57548',
  31. 'ext': 'mp4',
  32. 'title': 'Mitt Romney\'s Crazy Solution To Climate Change',
  33. 'thumbnail': r're:https://img\.production\.rokfin\.com/.+',
  34. 'upload_date': '20211023',
  35. 'timestamp': 1634998029,
  36. 'channel': 'Jimmy Dore',
  37. 'channel_id': '65429',
  38. 'channel_url': 'https://rokfin.com/TheJimmyDoreShow',
  39. 'availability': 'public',
  40. 'live_status': 'not_live',
  41. 'dislike_count': int,
  42. 'like_count': int,
  43. 'duration': 213,
  44. },
  45. }, {
  46. 'url': 'https://rokfin.com/post/223/Julian-Assange-Arrested-Streaming-In-Real-Time',
  47. 'info_dict': {
  48. 'id': 'post/223',
  49. 'ext': 'mp4',
  50. 'title': 'Julian Assange Arrested: Streaming In Real Time',
  51. 'thumbnail': r're:https://img\.production\.rokfin\.com/.+',
  52. 'upload_date': '20190412',
  53. 'timestamp': 1555052644,
  54. 'channel': 'Ron Placone',
  55. 'channel_id': '10',
  56. 'channel_url': 'https://rokfin.com/RonPlacone',
  57. 'availability': 'public',
  58. 'live_status': 'not_live',
  59. 'dislike_count': int,
  60. 'like_count': int,
  61. 'tags': ['FreeThinkingMedia^', 'RealProgressives^'],
  62. },
  63. }, {
  64. 'url': 'https://www.rokfin.com/stream/10543/Its-A-Crazy-Mess-Regional-Director-Blows-Whistle-On-Pfizers-Vaccine-Trial-Data',
  65. 'info_dict': {
  66. 'id': 'stream/10543',
  67. 'ext': 'mp4',
  68. 'title': '"It\'s A Crazy Mess" Regional Director Blows Whistle On Pfizer\'s Vaccine Trial Data',
  69. 'thumbnail': r're:https://img\.production\.rokfin\.com/.+',
  70. 'description': 'md5:324ce2d3e3b62e659506409e458b9d8e',
  71. 'channel': 'TLAVagabond',
  72. 'channel_id': '53856',
  73. 'channel_url': 'https://rokfin.com/TLAVagabond',
  74. 'availability': 'public',
  75. 'is_live': False,
  76. 'was_live': True,
  77. 'live_status': 'was_live',
  78. 'timestamp': 1635874720,
  79. 'release_timestamp': 1635874720,
  80. 'release_date': '20211102',
  81. 'upload_date': '20211102',
  82. 'dislike_count': int,
  83. 'like_count': int,
  84. 'tags': ['FreeThinkingMedia^'],
  85. },
  86. }, {
  87. 'url': 'https://rokfin.com/post/126703/Brave-New-World--Aldous-Huxley-DEEPDIVE--Chpts-13--Quite-Frankly--Jay-Dyer',
  88. 'info_dict': {
  89. 'id': 'post/126703',
  90. 'ext': 'mp4',
  91. 'title': 'Brave New World - Aldous Huxley DEEPDIVE! (Chpts 1-3) - Quite Frankly & Jay Dyer',
  92. 'thumbnail': r're:https://img\.production\.rokfin\.com/.+',
  93. 'channel': 'Jay Dyer',
  94. 'channel_id': '186881',
  95. 'channel_url': 'https://rokfin.com/jaydyer',
  96. 'availability': 'premium_only',
  97. 'live_status': 'not_live',
  98. 'dislike_count': int,
  99. 'like_count': int,
  100. 'timestamp': 1678213357,
  101. 'upload_date': '20230307',
  102. 'tags': ['FreeThinkingMedia^', 'OpenMind^'],
  103. 'description': 'md5:cb04e32e68326c9b2b251b297bacff35',
  104. 'duration': 3100,
  105. },
  106. }, {
  107. 'url': 'https://rokfin.com/stream/31332/The-Grayzone-live-on-Nordstream-blame-game',
  108. 'info_dict': {
  109. 'id': 'stream/31332',
  110. 'ext': 'mp4',
  111. 'title': 'The Grayzone live on Nordstream blame game',
  112. 'thumbnail': r're:https://image\.v\.rokfin\.com/.+',
  113. 'channel': 'Max Blumenthal',
  114. 'channel_id': '248902',
  115. 'channel_url': 'https://rokfin.com/MaxBlumenthal',
  116. 'availability': 'premium_only',
  117. 'live_status': 'was_live',
  118. 'dislike_count': int,
  119. 'like_count': int,
  120. 'timestamp': 1678475166,
  121. 'release_timestamp': 1678475166.0,
  122. 'release_date': '20230310',
  123. 'upload_date': '20230310',
  124. 'tags': ['FreeThinkingMedia^'],
  125. },
  126. }]
  127. def _real_extract(self, url):
  128. video_id, video_type = self._match_valid_url(url).group('id', 'type')
  129. metadata = self._download_json_using_access_token(f'{_API_BASE_URL}{video_id}', video_id)
  130. scheduled = unified_timestamp(metadata.get('scheduledAt'))
  131. live_status = ('was_live' if metadata.get('stoppedAt')
  132. else 'is_upcoming' if scheduled
  133. else 'is_live' if video_type == 'stream'
  134. else 'not_live')
  135. video_url = traverse_obj(metadata, 'url', ('content', 'contentUrl'), expected_type=url_or_none)
  136. if video_url in (None, 'fake.m3u8'):
  137. video_url = format_field(self._search_regex(
  138. r'https?://[^/]+/([^/]+)/storyboard.vtt',
  139. traverse_obj(metadata, 'timelineUrl', ('content', 'timelineUrl'), expected_type=url_or_none),
  140. video_id, default=None), None, 'https://stream.v.rokfin.com/%s.m3u8')
  141. formats, subtitles = [{'url': video_url}] if video_url else [], {}
  142. if determine_ext(video_url) == 'm3u8':
  143. formats, subtitles = self._extract_m3u8_formats_and_subtitles(
  144. video_url, video_id, fatal=False, live=live_status == 'is_live')
  145. if not formats:
  146. if traverse_obj(metadata, 'premiumPlan', 'premium'):
  147. self.raise_login_required('This video is only available to premium users', True, method='cookies')
  148. elif scheduled:
  149. self.raise_no_formats(
  150. f'Stream is offline; scheduled for {dt.datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}',
  151. video_id=video_id, expected=True)
  152. uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username'))
  153. timestamp = (scheduled or float_or_none(metadata.get('postedAtMilli'), 1000)
  154. or unified_timestamp(metadata.get('creationDateTime')))
  155. return {
  156. 'id': video_id,
  157. 'formats': formats,
  158. 'subtitles': subtitles,
  159. 'title': str_or_none(traverse_obj(metadata, 'title', ('content', 'contentTitle'))),
  160. 'duration': float_or_none(traverse_obj(metadata, ('content', 'duration'))),
  161. 'thumbnail': url_or_none(traverse_obj(metadata, 'thumbnail', ('content', 'thumbnailUrl1'))),
  162. 'description': str_or_none(traverse_obj(metadata, 'description', ('content', 'contentDescription'))),
  163. 'like_count': int_or_none(metadata.get('likeCount')),
  164. 'dislike_count': int_or_none(metadata.get('dislikeCount')),
  165. 'channel': str_or_none(traverse_obj(metadata, ('createdBy', 'name'), ('creator', 'name'))),
  166. 'channel_id': str_or_none(traverse_obj(metadata, ('createdBy', 'id'), ('creator', 'id'))),
  167. 'channel_url': url_or_none(f'https://rokfin.com/{uploader}') if uploader else None,
  168. 'timestamp': timestamp,
  169. 'release_timestamp': timestamp if live_status != 'not_live' else None,
  170. 'tags': traverse_obj(metadata, ('tags', ..., 'title'), expected_type=str_or_none),
  171. 'live_status': live_status,
  172. 'availability': self._availability(
  173. needs_premium=bool(traverse_obj(metadata, 'premiumPlan', 'premium')),
  174. is_private=False, needs_subscription=False, needs_auth=False, is_unlisted=False),
  175. # 'comment_count': metadata.get('numComments'), # Data provided by website is wrong
  176. '__post_extractor': self.extract_comments(video_id) if video_type == 'post' else None,
  177. }
  178. def _get_comments(self, video_id):
  179. pages_total = None
  180. for page_n in itertools.count():
  181. raw_comments = self._download_json(
  182. f'{_API_BASE_URL}comment?postId={video_id[5:]}&page={page_n}&size=50',
  183. video_id, note=f'Downloading viewer comments page {page_n + 1}{format_field(pages_total, None, " of %s")}',
  184. fatal=False) or {}
  185. for comment in raw_comments.get('content') or []:
  186. yield {
  187. 'text': str_or_none(comment.get('comment')),
  188. 'author': str_or_none(comment.get('name')),
  189. 'id': comment.get('commentId'),
  190. 'author_id': comment.get('userId'),
  191. 'parent': 'root',
  192. 'like_count': int_or_none(comment.get('numLikes')),
  193. 'dislike_count': int_or_none(comment.get('numDislikes')),
  194. 'timestamp': unified_timestamp(comment.get('postedAt')),
  195. }
  196. pages_total = int_or_none(raw_comments.get('totalPages')) or None
  197. is_last = raw_comments.get('last')
  198. if not raw_comments.get('content') or is_last or (page_n > pages_total if pages_total else is_last is not False):
  199. return
  200. def _perform_login(self, username, password):
  201. # https://openid.net/specs/openid-connect-core-1_0.html#CodeFlowAuth (Sec. 3.1)
  202. login_page = self._download_webpage(
  203. f'{self._AUTH_BASE}/auth?client_id=web&redirect_uri=https%3A%2F%2Frokfin.com%2Ffeed&response_mode=fragment&response_type=code&scope=openid',
  204. None, note='loading login page', errnote='error loading login page')
  205. authentication_point_url = unescapeHTML(self._search_regex(
  206. r'<form\s+[^>]+action\s*=\s*"(https://secure\.rokfin\.com/auth/realms/rokfin-web/login-actions/authenticate\?[^"]+)"',
  207. login_page, name='Authentication URL'))
  208. resp_body = self._download_webpage(
  209. authentication_point_url, None, note='logging in', fatal=False, expected_status=404,
  210. data=urlencode_postdata({'username': username, 'password': password, 'rememberMe': 'off', 'credentialId': ''}))
  211. if not self._authentication_active():
  212. if re.search(r'(?i)(invalid\s+username\s+or\s+password)', resp_body or ''):
  213. raise ExtractorError('invalid username/password', expected=True)
  214. raise ExtractorError('Login failed')
  215. urlh = self._request_webpage(
  216. f'{self._AUTH_BASE}/auth', None,
  217. note='granting user authorization', errnote='user authorization rejected by Rokfin',
  218. query={
  219. 'client_id': 'web',
  220. 'prompt': 'none',
  221. 'redirect_uri': 'https://rokfin.com/silent-check-sso.html',
  222. 'response_mode': 'fragment',
  223. 'response_type': 'code',
  224. 'scope': 'openid',
  225. })
  226. self._access_mgmt_tokens = self._download_json(
  227. f'{self._AUTH_BASE}/token', None,
  228. note='getting access credentials', errnote='error getting access credentials',
  229. data=urlencode_postdata({
  230. 'code': urllib.parse.parse_qs(urllib.parse.urldefrag(urlh.url).fragment).get('code')[0],
  231. 'client_id': 'web',
  232. 'grant_type': 'authorization_code',
  233. 'redirect_uri': 'https://rokfin.com/silent-check-sso.html',
  234. }))
  235. def _authentication_active(self):
  236. return not (
  237. {'KEYCLOAK_IDENTITY', 'KEYCLOAK_IDENTITY_LEGACY', 'KEYCLOAK_SESSION', 'KEYCLOAK_SESSION_LEGACY'}
  238. - set(self._get_cookies(self._AUTH_BASE)))
  239. def _get_auth_token(self):
  240. return try_get(self._access_mgmt_tokens, lambda x: ' '.join([x['token_type'], x['access_token']]))
  241. def _download_json_using_access_token(self, url_or_request, video_id, headers={}, query={}):
  242. assert 'authorization' not in headers
  243. headers = headers.copy()
  244. auth_token = self._get_auth_token()
  245. refresh_token = self._access_mgmt_tokens.get('refresh_token')
  246. if auth_token:
  247. headers['authorization'] = auth_token
  248. json_string, urlh = self._download_webpage_handle(
  249. url_or_request, video_id, headers=headers, query=query, expected_status=401)
  250. if not auth_token or urlh.status != 401 or refresh_token is None:
  251. return self._parse_json(json_string, video_id)
  252. self._access_mgmt_tokens = self._download_json(
  253. f'{self._AUTH_BASE}/token', video_id,
  254. note='User authorization expired or canceled by Rokfin. Re-authorizing ...', errnote='Failed to re-authorize',
  255. data=urlencode_postdata({
  256. 'grant_type': 'refresh_token',
  257. 'refresh_token': refresh_token,
  258. 'client_id': 'web',
  259. }))
  260. headers['authorization'] = self._get_auth_token()
  261. if headers['authorization'] is None:
  262. raise ExtractorError('User authorization lost', expected=True)
  263. return self._download_json(url_or_request, video_id, headers=headers, query=query)
  264. class RokfinPlaylistBaseIE(InfoExtractor):
  265. _TYPES = {
  266. 'video': 'post',
  267. 'audio': 'post',
  268. 'stream': 'stream',
  269. 'dead_stream': 'stream',
  270. 'stack': 'stack',
  271. }
  272. def _get_video_data(self, metadata):
  273. for content in metadata.get('content') or []:
  274. media_type = self._TYPES.get(content.get('mediaType'))
  275. video_id = content.get('id') if media_type == 'post' else content.get('mediaId')
  276. if not media_type or not video_id:
  277. continue
  278. yield self.url_result(f'https://rokfin.com/{media_type}/{video_id}', video_id=f'{media_type}/{video_id}',
  279. video_title=str_or_none(traverse_obj(content, ('content', 'contentTitle'))))
  280. class RokfinStackIE(RokfinPlaylistBaseIE):
  281. IE_NAME = 'rokfin:stack'
  282. IE_DESC = 'Rokfin Stacks'
  283. _VALID_URL = r'https?://(?:www\.)?rokfin\.com/stack/(?P<id>[^/]+)'
  284. _TESTS = [{
  285. 'url': 'https://www.rokfin.com/stack/271/Tulsi-Gabbard-Portsmouth-Townhall-FULL--Feb-9-2020',
  286. 'playlist_count': 8,
  287. 'info_dict': {
  288. 'id': '271',
  289. },
  290. }]
  291. def _real_extract(self, url):
  292. list_id = self._match_id(url)
  293. return self.playlist_result(self._get_video_data(
  294. self._download_json(f'{_API_BASE_URL}stack/{list_id}', list_id)), list_id)
  295. class RokfinChannelIE(RokfinPlaylistBaseIE):
  296. IE_NAME = 'rokfin:channel'
  297. IE_DESC = 'Rokfin Channels'
  298. _VALID_URL = r'https?://(?:www\.)?rokfin\.com/(?!((feed/?)|(discover/?)|(channels/?))$)(?P<id>[^/]+)/?$'
  299. _TESTS = [{
  300. 'url': 'https://rokfin.com/TheConvoCouch',
  301. 'playlist_mincount': 100,
  302. 'info_dict': {
  303. 'id': '12071-new',
  304. 'title': 'TheConvoCouch - New',
  305. 'description': 'md5:bb622b1bca100209b91cd685f7847f06',
  306. },
  307. }]
  308. _TABS = {
  309. 'new': 'posts',
  310. 'top': 'top',
  311. 'videos': 'video',
  312. 'podcasts': 'audio',
  313. 'streams': 'stream',
  314. 'stacks': 'stack',
  315. }
  316. def _real_initialize(self):
  317. self._validate_extractor_args()
  318. def _validate_extractor_args(self):
  319. requested_tabs = self._configuration_arg('tab', None)
  320. if requested_tabs is not None and (len(requested_tabs) > 1 or requested_tabs[0] not in self._TABS):
  321. raise ExtractorError(f'Invalid extractor-arg "tab". Must be one of {", ".join(self._TABS)}', expected=True)
  322. def _entries(self, channel_id, channel_name, tab):
  323. pages_total = None
  324. for page_n in itertools.count(0):
  325. if tab in ('posts', 'top'):
  326. data_url = f'{_API_BASE_URL}user/{channel_name}/{tab}?page={page_n}&size=50'
  327. else:
  328. data_url = f'{_API_BASE_URL}post/search/{tab}?page={page_n}&size=50&creator={channel_id}'
  329. metadata = self._download_json(
  330. data_url, channel_name,
  331. note=f'Downloading video metadata page {page_n + 1}{format_field(pages_total, None, " of %s")}')
  332. yield from self._get_video_data(metadata)
  333. pages_total = int_or_none(metadata.get('totalPages')) or None
  334. is_last = metadata.get('last')
  335. if is_last or (page_n > pages_total if pages_total else is_last is not False):
  336. return
  337. def _real_extract(self, url):
  338. channel_name = self._match_id(url)
  339. channel_info = self._download_json(f'{_API_BASE_URL}user/{channel_name}', channel_name)
  340. channel_id = channel_info['id']
  341. tab = self._configuration_arg('tab', default=['new'])[0]
  342. return self.playlist_result(
  343. self._entries(channel_id, channel_name, self._TABS[tab]),
  344. f'{channel_id}-{tab}', f'{channel_name} - {tab.title()}', str_or_none(channel_info.get('description')))
  345. class RokfinSearchIE(SearchInfoExtractor):
  346. IE_NAME = 'rokfin:search'
  347. IE_DESC = 'Rokfin Search'
  348. _SEARCH_KEY = 'rkfnsearch'
  349. _TYPES = {
  350. 'video': (('id', 'raw'), 'post'),
  351. 'audio': (('id', 'raw'), 'post'),
  352. 'stream': (('content_id', 'raw'), 'stream'),
  353. 'dead_stream': (('content_id', 'raw'), 'stream'),
  354. 'stack': (('content_id', 'raw'), 'stack'),
  355. }
  356. _TESTS = [{
  357. 'url': 'rkfnsearch5:"zelenko"',
  358. 'playlist_count': 5,
  359. 'info_dict': {
  360. 'id': '"zelenko"',
  361. 'title': '"zelenko"',
  362. },
  363. }]
  364. _db_url = None
  365. _db_access_key = None
  366. def _real_initialize(self):
  367. self._db_url, self._db_access_key = self.cache.load(self.ie_key(), 'auth', default=(None, None))
  368. if not self._db_url:
  369. self._get_db_access_credentials()
  370. def _search_results(self, query):
  371. total_pages = None
  372. for page_number in itertools.count(1):
  373. search_results = self._run_search_query(
  374. query, data={'query': query, 'page': {'size': 100, 'current': page_number}},
  375. note=f'Downloading page {page_number}{format_field(total_pages, None, " of ~%s")}')
  376. total_pages = traverse_obj(search_results, ('meta', 'page', 'total_pages'), expected_type=int_or_none)
  377. for result in search_results.get('results') or []:
  378. video_id_key, video_type = self._TYPES.get(traverse_obj(result, ('content_type', 'raw')), (None, None))
  379. video_id = traverse_obj(result, video_id_key, expected_type=int_or_none)
  380. if video_id and video_type:
  381. yield self.url_result(url=f'https://rokfin.com/{video_type}/{video_id}')
  382. if not search_results.get('results'):
  383. return
  384. def _run_search_query(self, video_id, data, **kwargs):
  385. data = json.dumps(data).encode()
  386. for attempt in range(2):
  387. search_results = self._download_json(
  388. self._db_url, video_id, data=data, fatal=(attempt == 1),
  389. headers={'authorization': self._db_access_key}, **kwargs)
  390. if search_results:
  391. return search_results
  392. self.write_debug('Updating access credentials')
  393. self._get_db_access_credentials(video_id)
  394. def _get_db_access_credentials(self, video_id=None):
  395. auth_data = {'SEARCH_KEY': None, 'ENDPOINT_BASE': None}
  396. notfound_err_page = self._download_webpage(
  397. 'https://rokfin.com/discover', video_id, expected_status=404, note='Downloading home page')
  398. for js_file_path in re.findall(r'<script\b[^>]*\ssrc\s*=\s*"(/static/js/[^">]+)"', notfound_err_page):
  399. js_content = self._download_webpage(
  400. f'https://rokfin.com{js_file_path}', video_id, note='Downloading JavaScript file', fatal=False)
  401. auth_data.update(re.findall(
  402. rf'REACT_APP_({"|".join(auth_data.keys())})\s*:\s*"([^"]+)"', js_content or ''))
  403. if not all(auth_data.values()):
  404. continue
  405. self._db_url = url_or_none(f'{auth_data["ENDPOINT_BASE"]}/api/as/v1/engines/rokfin-search/search.json')
  406. self._db_access_key = f'Bearer {auth_data["SEARCH_KEY"]}'
  407. self.cache.store(self.ie_key(), 'auth', (self._db_url, self._db_access_key))
  408. return
  409. raise ExtractorError('Unable to extract access credentials')