microsoftstream.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. import base64
  2. from .common import InfoExtractor
  3. from ..utils import (
  4. merge_dicts,
  5. parse_duration,
  6. parse_iso8601,
  7. parse_resolution,
  8. try_get,
  9. url_basename,
  10. )
  11. class MicrosoftStreamIE(InfoExtractor):
  12. IE_NAME = 'microsoftstream'
  13. IE_DESC = 'Microsoft Stream'
  14. _VALID_URL = r'https?://(?:web|www|msit)\.microsoftstream\.com/video/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
  15. _TESTS = [{
  16. 'url': 'https://web.microsoftstream.com/video/6e51d928-4f46-4f1c-b141-369925e37b62?list=user&userId=f5491e02-e8fe-4e34-b67c-ec2e79a6ecc0',
  17. 'only_matching': True,
  18. }, {
  19. 'url': 'https://msit.microsoftstream.com/video/b60f5987-aabd-4e1c-a42f-c559d138f2ca',
  20. 'only_matching': True,
  21. }]
  22. def _get_all_subtitles(self, api_url, video_id, headers):
  23. subtitles = {}
  24. automatic_captions = {}
  25. text_tracks = self._download_json(
  26. f'{api_url}/videos/{video_id}/texttracks', video_id,
  27. note='Downloading subtitles JSON', fatal=False, headers=headers,
  28. query={'api-version': '1.4-private'}).get('value') or []
  29. for track in text_tracks:
  30. if not track.get('language') or not track.get('url'):
  31. continue
  32. sub_dict = automatic_captions if track.get('autoGenerated') else subtitles
  33. sub_dict.setdefault(track['language'], []).append({
  34. 'ext': 'vtt',
  35. 'url': track.get('url'),
  36. })
  37. return {
  38. 'subtitles': subtitles,
  39. 'automatic_captions': automatic_captions,
  40. }
  41. def extract_all_subtitles(self, *args, **kwargs):
  42. if (self.get_param('writesubtitles', False)
  43. or self.get_param('writeautomaticsub', False)
  44. or self.get_param('listsubtitles')):
  45. return self._get_all_subtitles(*args, **kwargs)
  46. return {}
  47. def _real_extract(self, url):
  48. video_id = self._match_id(url)
  49. webpage = self._download_webpage(url, video_id)
  50. if '<title>Microsoft Stream</title>' not in webpage:
  51. self.raise_login_required(method='cookies')
  52. access_token = self._html_search_regex(r'"AccessToken":"(.+?)"', webpage, 'access token')
  53. api_url = self._html_search_regex(r'"ApiGatewayUri":"(.+?)"', webpage, 'api url')
  54. headers = {'Authorization': f'Bearer {access_token}'}
  55. video_data = self._download_json(
  56. f'{api_url}/videos/{video_id}', video_id,
  57. headers=headers, query={
  58. '$expand': 'creator,tokens,status,liveEvent,extensions',
  59. 'api-version': '1.4-private',
  60. })
  61. video_id = video_data.get('id') or video_id
  62. language = video_data.get('language')
  63. thumbnails = []
  64. for thumbnail_id in ('extraSmall', 'small', 'medium', 'large'):
  65. thumbnail_url = try_get(video_data, lambda x: x['posterImage'][thumbnail_id]['url'], str)
  66. if not thumbnail_url:
  67. continue
  68. thumb = {
  69. 'id': thumbnail_id,
  70. 'url': thumbnail_url,
  71. }
  72. thumb_name = url_basename(thumbnail_url)
  73. thumb_name = str(base64.b64decode(thumb_name + '=' * (-len(thumb_name) % 4)))
  74. thumb.update(parse_resolution(thumb_name))
  75. thumbnails.append(thumb)
  76. formats = []
  77. for playlist in video_data['playbackUrls']:
  78. if playlist['mimeType'] == 'application/vnd.apple.mpegurl':
  79. formats.extend(self._extract_m3u8_formats(
  80. playlist['playbackUrl'], video_id,
  81. ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls',
  82. fatal=False, headers=headers))
  83. elif playlist['mimeType'] == 'application/dash+xml':
  84. formats.extend(self._extract_mpd_formats(
  85. playlist['playbackUrl'], video_id, mpd_id='dash',
  86. fatal=False, headers=headers))
  87. elif playlist['mimeType'] == 'application/vnd.ms-sstr+xml':
  88. formats.extend(self._extract_ism_formats(
  89. playlist['playbackUrl'], video_id, ism_id='mss',
  90. fatal=False, headers=headers))
  91. formats = [merge_dicts(f, {'language': language}) for f in formats]
  92. return {
  93. 'id': video_id,
  94. 'title': video_data['name'],
  95. 'description': video_data.get('description'),
  96. 'uploader': try_get(video_data, lambda x: x['creator']['name'], str),
  97. 'uploader_id': try_get(video_data, (lambda x: x['creator']['mail'],
  98. lambda x: x['creator']['id']), str),
  99. 'thumbnails': thumbnails,
  100. **self.extract_all_subtitles(api_url, video_id, headers),
  101. 'timestamp': parse_iso8601(video_data.get('created')),
  102. 'duration': parse_duration(try_get(video_data, lambda x: x['media']['duration'])),
  103. 'webpage_url': f'https://web.microsoftstream.com/video/{video_id}',
  104. 'view_count': try_get(video_data, lambda x: x['metrics']['views'], int),
  105. 'like_count': try_get(video_data, lambda x: x['metrics']['likes'], int),
  106. 'comment_count': try_get(video_data, lambda x: x['metrics']['comments'], int),
  107. 'formats': formats,
  108. }