instagram.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. import hashlib
  2. import itertools
  3. import json
  4. import re
  5. import time
  6. from .common import InfoExtractor
  7. from ..networking.exceptions import HTTPError
  8. from ..utils import (
  9. ExtractorError,
  10. decode_base_n,
  11. encode_base_n,
  12. filter_dict,
  13. float_or_none,
  14. format_field,
  15. get_element_by_attribute,
  16. int_or_none,
  17. lowercase_escape,
  18. str_or_none,
  19. str_to_int,
  20. traverse_obj,
  21. url_or_none,
  22. urlencode_postdata,
  23. )
  24. _ENCODING_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
  25. def _pk_to_id(media_id):
  26. """Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id"""
  27. return encode_base_n(int(media_id.split('_')[0]), table=_ENCODING_CHARS)
  28. def _id_to_pk(shortcode):
  29. """Covert a shortcode to a numeric value"""
  30. return decode_base_n(shortcode[:11], table=_ENCODING_CHARS)
  31. class InstagramBaseIE(InfoExtractor):
  32. _NETRC_MACHINE = 'instagram'
  33. _IS_LOGGED_IN = False
  34. _API_BASE_URL = 'https://i.instagram.com/api/v1'
  35. _LOGIN_URL = 'https://www.instagram.com/accounts/login'
  36. _API_HEADERS = {
  37. 'X-IG-App-ID': '936619743392459',
  38. 'X-ASBD-ID': '198387',
  39. 'X-IG-WWW-Claim': '0',
  40. 'Origin': 'https://www.instagram.com',
  41. 'Accept': '*/*',
  42. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36',
  43. }
  44. def _perform_login(self, username, password):
  45. if self._IS_LOGGED_IN:
  46. return
  47. login_webpage = self._download_webpage(
  48. self._LOGIN_URL, None, note='Downloading login webpage', errnote='Failed to download login webpage')
  49. shared_data = self._parse_json(self._search_regex(
  50. r'window\._sharedData\s*=\s*({.+?});', login_webpage, 'shared data', default='{}'), None)
  51. login = self._download_json(
  52. f'{self._LOGIN_URL}/ajax/', None, note='Logging in', headers={
  53. **self._API_HEADERS,
  54. 'X-Requested-With': 'XMLHttpRequest',
  55. 'X-CSRFToken': shared_data['config']['csrf_token'],
  56. 'X-Instagram-AJAX': shared_data['rollout_hash'],
  57. 'Referer': 'https://www.instagram.com/',
  58. }, data=urlencode_postdata({
  59. 'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{int(time.time())}:{password}',
  60. 'username': username,
  61. 'queryParams': '{}',
  62. 'optIntoOneTap': 'false',
  63. 'stopDeletionNonce': '',
  64. 'trustedDeviceRecords': '{}',
  65. }))
  66. if not login.get('authenticated'):
  67. if login.get('message'):
  68. raise ExtractorError(f'Unable to login: {login["message"]}')
  69. elif login.get('user'):
  70. raise ExtractorError('Unable to login: Sorry, your password was incorrect. Please double-check your password.', expected=True)
  71. elif login.get('user') is False:
  72. raise ExtractorError('Unable to login: The username you entered doesn\'t belong to an account. Please check your username and try again.', expected=True)
  73. raise ExtractorError('Unable to login')
  74. InstagramBaseIE._IS_LOGGED_IN = True
  75. def _get_count(self, media, kind, *keys):
  76. return traverse_obj(
  77. media, (kind, 'count'), *((f'edge_media_{key}', 'count') for key in keys),
  78. expected_type=int_or_none)
  79. def _get_dimension(self, name, media, webpage=None):
  80. return (
  81. traverse_obj(media, ('dimensions', name), expected_type=int_or_none)
  82. or int_or_none(self._html_search_meta(
  83. (f'og:video:{name}', f'video:{name}'), webpage or '', default=None)))
  84. def _extract_nodes(self, nodes, is_direct=False):
  85. for idx, node in enumerate(nodes, start=1):
  86. if node.get('__typename') != 'GraphVideo' and node.get('is_video') is not True:
  87. continue
  88. video_id = node.get('shortcode')
  89. if is_direct:
  90. info = {
  91. 'id': video_id or node['id'],
  92. 'url': node.get('video_url'),
  93. 'width': self._get_dimension('width', node),
  94. 'height': self._get_dimension('height', node),
  95. 'http_headers': {
  96. 'Referer': 'https://www.instagram.com/',
  97. },
  98. }
  99. elif not video_id:
  100. continue
  101. else:
  102. info = {
  103. '_type': 'url',
  104. 'ie_key': 'Instagram',
  105. 'id': video_id,
  106. 'url': f'https://instagram.com/p/{video_id}',
  107. }
  108. yield {
  109. **info,
  110. 'title': node.get('title') or (f'Video {idx}' if is_direct else None),
  111. 'description': traverse_obj(
  112. node, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str),
  113. 'thumbnail': traverse_obj(
  114. node, 'display_url', 'thumbnail_src', 'display_src', expected_type=url_or_none),
  115. 'duration': float_or_none(node.get('video_duration')),
  116. 'timestamp': int_or_none(node.get('taken_at_timestamp')),
  117. 'view_count': int_or_none(node.get('video_view_count')),
  118. 'comment_count': self._get_count(node, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
  119. 'like_count': self._get_count(node, 'likes', 'preview_like'),
  120. }
  121. def _extract_product_media(self, product_media):
  122. media_id = product_media.get('code') or _pk_to_id(product_media.get('pk'))
  123. vcodec = product_media.get('video_codec')
  124. dash_manifest_raw = product_media.get('video_dash_manifest')
  125. videos_list = product_media.get('video_versions')
  126. if not (dash_manifest_raw or videos_list):
  127. return {}
  128. formats = [{
  129. 'format_id': fmt.get('id'),
  130. 'url': fmt.get('url'),
  131. 'width': fmt.get('width'),
  132. 'height': fmt.get('height'),
  133. 'vcodec': vcodec,
  134. } for fmt in videos_list or []]
  135. if dash_manifest_raw:
  136. formats.extend(self._parse_mpd_formats(self._parse_xml(dash_manifest_raw, media_id), mpd_id='dash'))
  137. thumbnails = [{
  138. 'url': thumbnail.get('url'),
  139. 'width': thumbnail.get('width'),
  140. 'height': thumbnail.get('height'),
  141. } for thumbnail in traverse_obj(product_media, ('image_versions2', 'candidates')) or []]
  142. return {
  143. 'id': media_id,
  144. 'duration': float_or_none(product_media.get('video_duration')),
  145. 'formats': formats,
  146. 'thumbnails': thumbnails,
  147. }
  148. def _extract_product(self, product_info):
  149. if isinstance(product_info, list):
  150. product_info = product_info[0]
  151. user_info = product_info.get('user') or {}
  152. info_dict = {
  153. 'id': _pk_to_id(traverse_obj(product_info, 'pk', 'id', expected_type=str_or_none)[:19]),
  154. 'title': product_info.get('title') or f'Video by {user_info.get("username")}',
  155. 'description': traverse_obj(product_info, ('caption', 'text'), expected_type=str_or_none),
  156. 'timestamp': int_or_none(product_info.get('taken_at')),
  157. 'channel': user_info.get('username'),
  158. 'uploader': user_info.get('full_name'),
  159. 'uploader_id': str_or_none(user_info.get('pk')),
  160. 'view_count': int_or_none(product_info.get('view_count')),
  161. 'like_count': int_or_none(product_info.get('like_count')),
  162. 'comment_count': int_or_none(product_info.get('comment_count')),
  163. '__post_extractor': self.extract_comments(_pk_to_id(product_info.get('pk'))),
  164. 'http_headers': {
  165. 'Referer': 'https://www.instagram.com/',
  166. },
  167. }
  168. carousel_media = product_info.get('carousel_media')
  169. if carousel_media:
  170. return {
  171. '_type': 'playlist',
  172. **info_dict,
  173. 'title': f'Post by {user_info.get("username")}',
  174. 'entries': [{
  175. **info_dict,
  176. **self._extract_product_media(product_media),
  177. } for product_media in carousel_media],
  178. }
  179. return {
  180. **info_dict,
  181. **self._extract_product_media(product_info),
  182. }
  183. def _get_comments(self, video_id):
  184. comments_info = self._download_json(
  185. f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/comments/?can_support_threading=true&permalink_enabled=false', video_id,
  186. fatal=False, errnote='Comments extraction failed', note='Downloading comments info', headers=self._API_HEADERS) or {}
  187. comment_data = traverse_obj(comments_info, ('edge_media_to_parent_comment', 'edges'), 'comments')
  188. for comment_dict in comment_data or []:
  189. yield {
  190. 'author': traverse_obj(comment_dict, ('node', 'owner', 'username'), ('user', 'username')),
  191. 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id'), ('user', 'pk')),
  192. 'author_thumbnail': traverse_obj(comment_dict, ('node', 'owner', 'profile_pic_url'), ('user', 'profile_pic_url'), expected_type=url_or_none),
  193. 'id': traverse_obj(comment_dict, ('node', 'id'), 'pk'),
  194. 'text': traverse_obj(comment_dict, ('node', 'text'), 'text'),
  195. 'like_count': traverse_obj(comment_dict, ('node', 'edge_liked_by', 'count'), 'comment_like_count', expected_type=int_or_none),
  196. 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), 'created_at', expected_type=int_or_none),
  197. }
  198. class InstagramIOSIE(InfoExtractor):
  199. IE_DESC = 'IOS instagram:// URL'
  200. _VALID_URL = r'instagram://media\?id=(?P<id>[\d_]+)'
  201. _TESTS = [{
  202. 'url': 'instagram://media?id=482584233761418119',
  203. 'md5': '0d2da106a9d2631273e192b372806516',
  204. 'info_dict': {
  205. 'id': 'aye83DjauH',
  206. 'ext': 'mp4',
  207. 'title': 'Video by naomipq',
  208. 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
  209. 'thumbnail': r're:^https?://.*\.jpg',
  210. 'duration': 0,
  211. 'timestamp': 1371748545,
  212. 'upload_date': '20130620',
  213. 'uploader_id': 'naomipq',
  214. 'uploader': 'B E A U T Y F O R A S H E S',
  215. 'like_count': int,
  216. 'comment_count': int,
  217. 'comments': list,
  218. },
  219. 'add_ie': ['Instagram'],
  220. }]
  221. def _real_extract(self, url):
  222. video_id = _pk_to_id(self._match_id(url))
  223. return self.url_result(f'http://instagram.com/tv/{video_id}', InstagramIE, video_id)
  224. class InstagramIE(InstagramBaseIE):
  225. _VALID_URL = r'(?P<url>https?://(?:www\.)?instagram\.com(?:/[^/]+)?/(?:p|tv|reels?(?!/audio/))/(?P<id>[^/?#&]+))'
  226. _EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1']
  227. _TESTS = [{
  228. 'url': 'https://instagram.com/p/aye83DjauH/?foo=bar#abc',
  229. 'md5': '0d2da106a9d2631273e192b372806516',
  230. 'info_dict': {
  231. 'id': 'aye83DjauH',
  232. 'ext': 'mp4',
  233. 'title': 'Video by naomipq',
  234. 'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
  235. 'thumbnail': r're:^https?://.*\.jpg',
  236. 'duration': 8.747,
  237. 'timestamp': 1371748545,
  238. 'upload_date': '20130620',
  239. 'uploader_id': '2815873',
  240. 'uploader': 'B E A U T Y F O R A S H E S',
  241. 'channel': 'naomipq',
  242. 'like_count': int,
  243. 'comment_count': int,
  244. 'comments': list,
  245. },
  246. 'expected_warnings': [
  247. 'General metadata extraction failed',
  248. 'Main webpage is locked behind the login page',
  249. ],
  250. }, {
  251. # reel
  252. 'url': 'https://www.instagram.com/reel/Chunk8-jurw/',
  253. 'md5': 'f6d8277f74515fa3ff9f5791426e42b1',
  254. 'info_dict': {
  255. 'id': 'Chunk8-jurw',
  256. 'ext': 'mp4',
  257. 'title': 'Video by instagram',
  258. 'description': 'md5:c9cde483606ed6f80fbe9283a6a2b290',
  259. 'thumbnail': r're:^https?://.*\.jpg',
  260. 'duration': 5.016,
  261. 'timestamp': 1661529231,
  262. 'upload_date': '20220826',
  263. 'uploader_id': '25025320',
  264. 'uploader': 'Instagram',
  265. 'channel': 'instagram',
  266. 'like_count': int,
  267. 'comment_count': int,
  268. 'comments': list,
  269. },
  270. 'expected_warnings': [
  271. 'General metadata extraction failed',
  272. 'Main webpage is locked behind the login page',
  273. ],
  274. }, {
  275. # multi video post
  276. 'url': 'https://www.instagram.com/p/BQ0eAlwhDrw/',
  277. 'playlist': [{
  278. 'info_dict': {
  279. 'id': 'BQ0dSaohpPW',
  280. 'ext': 'mp4',
  281. 'title': 'Video 1',
  282. 'thumbnail': r're:^https?://.*\.jpg',
  283. 'view_count': int,
  284. },
  285. }, {
  286. 'info_dict': {
  287. 'id': 'BQ0dTpOhuHT',
  288. 'ext': 'mp4',
  289. 'title': 'Video 2',
  290. 'thumbnail': r're:^https?://.*\.jpg',
  291. 'view_count': int,
  292. },
  293. }, {
  294. 'info_dict': {
  295. 'id': 'BQ0dT7RBFeF',
  296. 'ext': 'mp4',
  297. 'title': 'Video 3',
  298. 'thumbnail': r're:^https?://.*\.jpg',
  299. 'view_count': int,
  300. },
  301. }],
  302. 'info_dict': {
  303. 'id': 'BQ0eAlwhDrw',
  304. 'title': 'Post by instagram',
  305. 'description': 'md5:0f9203fc6a2ce4d228da5754bcf54957',
  306. },
  307. 'expected_warnings': [
  308. 'General metadata extraction failed',
  309. 'Main webpage is locked behind the login page',
  310. ],
  311. }, {
  312. # IGTV
  313. 'url': 'https://www.instagram.com/tv/BkfuX9UB-eK/',
  314. 'info_dict': {
  315. 'id': 'BkfuX9UB-eK',
  316. 'ext': 'mp4',
  317. 'title': 'Fingerboarding Tricks with @cass.fb',
  318. 'thumbnail': r're:^https?://.*\.jpg',
  319. 'duration': 53.83,
  320. 'timestamp': 1530032919,
  321. 'upload_date': '20180626',
  322. 'uploader_id': '25025320',
  323. 'uploader': 'Instagram',
  324. 'channel': 'instagram',
  325. 'like_count': int,
  326. 'comment_count': int,
  327. 'comments': list,
  328. 'description': 'Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.',
  329. },
  330. 'expected_warnings': [
  331. 'General metadata extraction failed',
  332. 'Main webpage is locked behind the login page',
  333. ],
  334. }, {
  335. 'url': 'https://instagram.com/p/-Cmh1cukG2/',
  336. 'only_matching': True,
  337. }, {
  338. 'url': 'http://instagram.com/p/9o6LshA7zy/embed/',
  339. 'only_matching': True,
  340. }, {
  341. 'url': 'https://www.instagram.com/tv/aye83DjauH/',
  342. 'only_matching': True,
  343. }, {
  344. 'url': 'https://www.instagram.com/reel/CDUMkliABpa/',
  345. 'only_matching': True,
  346. }, {
  347. 'url': 'https://www.instagram.com/marvelskies.fc/reel/CWqAgUZgCku/',
  348. 'only_matching': True,
  349. }, {
  350. 'url': 'https://www.instagram.com/reels/Cop84x6u7CP/',
  351. 'only_matching': True,
  352. }]
  353. @classmethod
  354. def _extract_embed_urls(cls, url, webpage):
  355. res = tuple(super()._extract_embed_urls(url, webpage))
  356. if res:
  357. return res
  358. mobj = re.search(r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1',
  359. get_element_by_attribute('class', 'instagram-media', webpage) or '')
  360. if mobj:
  361. return [mobj.group('link')]
  362. def _real_extract(self, url):
  363. video_id, url = self._match_valid_url(url).group('id', 'url')
  364. media, webpage = {}, ''
  365. if self._get_cookies(url).get('sessionid'):
  366. info = traverse_obj(self._download_json(
  367. f'{self._API_BASE_URL}/media/{_id_to_pk(video_id)}/info/', video_id,
  368. fatal=False, errnote='Video info extraction failed',
  369. note='Downloading video info', headers=self._API_HEADERS), ('items', 0))
  370. if info:
  371. media.update(info)
  372. return self._extract_product(media)
  373. api_check = self._download_json(
  374. f'{self._API_BASE_URL}/web/get_ruling_for_content/?content_type=MEDIA&target_id={_id_to_pk(video_id)}',
  375. video_id, headers=self._API_HEADERS, fatal=False, note='Setting up session', errnote=False) or {}
  376. csrf_token = self._get_cookies('https://www.instagram.com').get('csrftoken')
  377. if not csrf_token:
  378. self.report_warning('No csrf token set by Instagram API', video_id)
  379. else:
  380. csrf_token = csrf_token.value if api_check.get('status') == 'ok' else None
  381. if not csrf_token:
  382. self.report_warning('Instagram API is not granting access', video_id)
  383. variables = {
  384. 'shortcode': video_id,
  385. 'child_comment_count': 3,
  386. 'fetch_comment_count': 40,
  387. 'parent_comment_count': 24,
  388. 'has_threaded_comments': True,
  389. }
  390. general_info = self._download_json(
  391. 'https://www.instagram.com/graphql/query/', video_id, fatal=False, errnote=False,
  392. headers={
  393. **self._API_HEADERS,
  394. 'X-CSRFToken': csrf_token or '',
  395. 'X-Requested-With': 'XMLHttpRequest',
  396. 'Referer': url,
  397. }, query={
  398. 'query_hash': '9f8827793ef34641b2fb195d4d41151c',
  399. 'variables': json.dumps(variables, separators=(',', ':')),
  400. })
  401. media.update(traverse_obj(general_info, ('data', 'shortcode_media')) or {})
  402. if not general_info:
  403. self.report_warning('General metadata extraction failed (some metadata might be missing).', video_id)
  404. webpage, urlh = self._download_webpage_handle(url, video_id)
  405. shared_data = self._search_json(
  406. r'window\._sharedData\s*=', webpage, 'shared data', video_id, fatal=False) or {}
  407. if shared_data and self._LOGIN_URL not in urlh.url:
  408. media.update(traverse_obj(
  409. shared_data, ('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
  410. ('entry_data', 'PostPage', 0, 'media'), expected_type=dict) or {})
  411. else:
  412. self.report_warning('Main webpage is locked behind the login page. Retrying with embed webpage (some metadata might be missing).')
  413. webpage = self._download_webpage(
  414. f'{url}/embed/', video_id, note='Downloading embed webpage', fatal=False) or ''
  415. additional_data = self._search_json(
  416. r'window\.__additionalDataLoaded\s*\(\s*[^,]+,', webpage, 'additional data', video_id, fatal=False)
  417. if not additional_data and not media:
  418. self.raise_login_required('Requested content is not available, rate-limit reached or login required')
  419. product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
  420. if product_item:
  421. media.update(product_item)
  422. return self._extract_product(media)
  423. media.update(traverse_obj(
  424. additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {})
  425. username = traverse_obj(media, ('owner', 'username')) or self._search_regex(
  426. r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'username', fatal=False)
  427. description = (
  428. traverse_obj(media, ('edge_media_to_caption', 'edges', 0, 'node', 'text'), expected_type=str)
  429. or media.get('caption'))
  430. if not description:
  431. description = self._search_regex(
  432. r'"caption"\s*:\s*"(.+?)"', webpage, 'description', default=None)
  433. if description is not None:
  434. description = lowercase_escape(description)
  435. video_url = media.get('video_url')
  436. if not video_url:
  437. nodes = traverse_obj(media, ('edge_sidecar_to_children', 'edges', ..., 'node'), expected_type=dict) or []
  438. if nodes:
  439. return self.playlist_result(
  440. self._extract_nodes(nodes, True), video_id,
  441. format_field(username, None, 'Post by %s'), description)
  442. video_url = self._og_search_video_url(webpage, secure=False)
  443. formats = [{
  444. 'url': video_url,
  445. 'width': self._get_dimension('width', media, webpage),
  446. 'height': self._get_dimension('height', media, webpage),
  447. }]
  448. dash = traverse_obj(media, ('dash_info', 'video_dash_manifest'))
  449. if dash:
  450. formats.extend(self._parse_mpd_formats(self._parse_xml(dash, video_id), mpd_id='dash'))
  451. comment_data = traverse_obj(media, ('edge_media_to_parent_comment', 'edges'))
  452. comments = [{
  453. 'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
  454. 'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
  455. 'id': traverse_obj(comment_dict, ('node', 'id')),
  456. 'text': traverse_obj(comment_dict, ('node', 'text')),
  457. 'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
  458. } for comment_dict in comment_data] if comment_data else None
  459. display_resources = (
  460. media.get('display_resources')
  461. or [{'src': media.get(key)} for key in ('display_src', 'display_url')]
  462. or [{'src': self._og_search_thumbnail(webpage)}])
  463. thumbnails = [{
  464. 'url': thumbnail['src'],
  465. 'width': thumbnail.get('config_width'),
  466. 'height': thumbnail.get('config_height'),
  467. } for thumbnail in display_resources if thumbnail.get('src')]
  468. return {
  469. 'id': video_id,
  470. 'formats': formats,
  471. 'title': media.get('title') or f'Video by {username}',
  472. 'description': description,
  473. 'duration': float_or_none(media.get('video_duration')),
  474. 'timestamp': traverse_obj(media, 'taken_at_timestamp', 'date', expected_type=int_or_none),
  475. 'uploader_id': traverse_obj(media, ('owner', 'id')),
  476. 'uploader': traverse_obj(media, ('owner', 'full_name')),
  477. 'channel': username,
  478. 'like_count': self._get_count(media, 'likes', 'preview_like') or str_to_int(self._search_regex(
  479. r'data-log-event="likeCountClick"[^>]*>[^\d]*([\d,\.]+)', webpage, 'like count', fatal=False)),
  480. 'comment_count': self._get_count(media, 'comments', 'preview_comment', 'to_comment', 'to_parent_comment'),
  481. 'comments': comments,
  482. 'thumbnails': thumbnails,
  483. 'http_headers': {
  484. 'Referer': 'https://www.instagram.com/',
  485. },
  486. }
  487. class InstagramPlaylistBaseIE(InstagramBaseIE):
  488. _gis_tmpl = None # used to cache GIS request type
  489. def _parse_graphql(self, webpage, item_id):
  490. # Reads a webpage and returns its GraphQL data.
  491. return self._parse_json(
  492. self._search_regex(
  493. r'sharedData\s*=\s*({.+?})\s*;\s*[<\n]', webpage, 'data'),
  494. item_id)
  495. def _extract_graphql(self, data, url):
  496. # Parses GraphQL queries containing videos and generates a playlist.
  497. uploader_id = self._match_id(url)
  498. csrf_token = data['config']['csrf_token']
  499. rhx_gis = data.get('rhx_gis') or '3c7ca9dcefcf966d11dacf1f151335e8'
  500. cursor = ''
  501. for page_num in itertools.count(1):
  502. variables = {
  503. 'first': 12,
  504. 'after': cursor,
  505. }
  506. variables.update(self._query_vars_for(data))
  507. variables = json.dumps(variables)
  508. if self._gis_tmpl:
  509. gis_tmpls = [self._gis_tmpl]
  510. else:
  511. gis_tmpls = [
  512. f'{rhx_gis}',
  513. '',
  514. f'{rhx_gis}:{csrf_token}',
  515. '{}:{}:{}'.format(rhx_gis, csrf_token, self.get_param('http_headers')['User-Agent']),
  516. ]
  517. # try all of the ways to generate a GIS query, and not only use the
  518. # first one that works, but cache it for future requests
  519. for gis_tmpl in gis_tmpls:
  520. try:
  521. json_data = self._download_json(
  522. 'https://www.instagram.com/graphql/query/', uploader_id,
  523. f'Downloading JSON page {page_num}', headers={
  524. 'X-Requested-With': 'XMLHttpRequest',
  525. 'X-Instagram-GIS': hashlib.md5(
  526. (f'{gis_tmpl}:{variables}').encode()).hexdigest(),
  527. }, query={
  528. 'query_hash': self._QUERY_HASH,
  529. 'variables': variables,
  530. })
  531. media = self._parse_timeline_from(json_data)
  532. self._gis_tmpl = gis_tmpl
  533. break
  534. except ExtractorError as e:
  535. # if it's an error caused by a bad query, and there are
  536. # more GIS templates to try, ignore it and keep trying
  537. if isinstance(e.cause, HTTPError) and e.cause.status == 403:
  538. if gis_tmpl != gis_tmpls[-1]:
  539. continue
  540. raise
  541. nodes = traverse_obj(media, ('edges', ..., 'node'), expected_type=dict) or []
  542. if not nodes:
  543. break
  544. yield from self._extract_nodes(nodes)
  545. has_next_page = traverse_obj(media, ('page_info', 'has_next_page'))
  546. cursor = traverse_obj(media, ('page_info', 'end_cursor'), expected_type=str)
  547. if not has_next_page or not cursor:
  548. break
  549. def _real_extract(self, url):
  550. user_or_tag = self._match_id(url)
  551. webpage = self._download_webpage(url, user_or_tag)
  552. data = self._parse_graphql(webpage, user_or_tag)
  553. self._set_cookie('instagram.com', 'ig_pr', '1')
  554. return self.playlist_result(
  555. self._extract_graphql(data, url), user_or_tag, user_or_tag)
  556. class InstagramUserIE(InstagramPlaylistBaseIE):
  557. _WORKING = False
  558. _VALID_URL = r'https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])'
  559. IE_DESC = 'Instagram user profile'
  560. IE_NAME = 'instagram:user'
  561. _TESTS = [{
  562. 'url': 'https://instagram.com/porsche',
  563. 'info_dict': {
  564. 'id': 'porsche',
  565. 'title': 'porsche',
  566. },
  567. 'playlist_count': 5,
  568. 'params': {
  569. 'extract_flat': True,
  570. 'skip_download': True,
  571. 'playlistend': 5,
  572. },
  573. }]
  574. _QUERY_HASH = ('42323d64886122307be10013ad2dcc44',)
  575. @staticmethod
  576. def _parse_timeline_from(data):
  577. # extracts the media timeline data from a GraphQL result
  578. return data['data']['user']['edge_owner_to_timeline_media']
  579. @staticmethod
  580. def _query_vars_for(data):
  581. # returns a dictionary of variables to add to the timeline query based
  582. # on the GraphQL of the original page
  583. return {
  584. 'id': data['entry_data']['ProfilePage'][0]['graphql']['user']['id'],
  585. }
  586. class InstagramTagIE(InstagramPlaylistBaseIE):
  587. _VALID_URL = r'https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)'
  588. IE_DESC = 'Instagram hashtag search URLs'
  589. IE_NAME = 'instagram:tag'
  590. _TESTS = [{
  591. 'url': 'https://instagram.com/explore/tags/lolcats',
  592. 'info_dict': {
  593. 'id': 'lolcats',
  594. 'title': 'lolcats',
  595. },
  596. 'playlist_count': 50,
  597. 'params': {
  598. 'extract_flat': True,
  599. 'skip_download': True,
  600. 'playlistend': 50,
  601. },
  602. }]
  603. _QUERY_HASH = ('f92f56d47dc7a55b606908374b43a314',)
  604. @staticmethod
  605. def _parse_timeline_from(data):
  606. # extracts the media timeline data from a GraphQL result
  607. return data['data']['hashtag']['edge_hashtag_to_media']
  608. @staticmethod
  609. def _query_vars_for(data):
  610. # returns a dictionary of variables to add to the timeline query based
  611. # on the GraphQL of the original page
  612. return {
  613. 'tag_name':
  614. data['entry_data']['TagPage'][0]['graphql']['hashtag']['name'],
  615. }
  616. class InstagramStoryIE(InstagramBaseIE):
  617. _VALID_URL = r'https?://(?:www\.)?instagram\.com/stories/(?P<user>[^/]+)/(?P<id>\d+)'
  618. IE_NAME = 'instagram:story'
  619. _TESTS = [{
  620. 'url': 'https://www.instagram.com/stories/highlights/18090946048123978/',
  621. 'info_dict': {
  622. 'id': '18090946048123978',
  623. 'title': 'Rare',
  624. },
  625. 'playlist_mincount': 50,
  626. }]
  627. def _real_extract(self, url):
  628. username, story_id = self._match_valid_url(url).groups()
  629. story_info = self._download_webpage(url, story_id)
  630. user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
  631. if not user_info:
  632. self.raise_login_required('This content is unreachable')
  633. user_id = traverse_obj(user_info, 'pk', 'id', expected_type=str)
  634. story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
  635. if not story_info_url: # user id is only mandatory for non-highlights
  636. raise ExtractorError('Unable to extract user id')
  637. videos = traverse_obj(self._download_json(
  638. f'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}',
  639. story_id, errnote=False, fatal=False, headers=self._API_HEADERS), 'reels')
  640. if not videos:
  641. self.raise_login_required('You need to log in to access this content')
  642. full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (user_id, 'user', 'full_name'))
  643. story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
  644. if not story_title:
  645. story_title = f'Story by {username}'
  646. highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (user_id, 'items'))
  647. info_data = []
  648. for highlight in highlights:
  649. highlight_data = self._extract_product(highlight)
  650. if highlight_data.get('formats'):
  651. info_data.append({
  652. 'uploader': full_name,
  653. 'uploader_id': user_id,
  654. **filter_dict(highlight_data),
  655. })
  656. return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)