Browse Source

Update to ytdl-commit-2dd6c6e

[YouTube] Avoid crash if uploader_id extraction fails
https://github.com/ytdl-org/youtube-dl/commit/2dd6c6edd8e0fc5e45865b8e6d865e35147de772

Except:
    * 295736c9cba714fb5de7d1c3dd31d86e50091cf8 [jsinterp] Improve parsing
    * 384f632e8a9b61e864a26678d85b2b39933b9bae [ITV] Overhaul ITV extractor
    * 33db85c571304bbd6863e3407ad8d08764c9e53b [feat]: Add support to external downloader aria2p
pukkandan 2 years ago
parent
commit
45b2ee6f4f

+ 1 - 1
README.md

@@ -76,7 +76,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
 
 # NEW FEATURES
 
-* Merged with **youtube-dl v2021.12.17+ [commit/195f22f](https://github.com/ytdl-org/youtube-dl/commit/195f22f)** <!--([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))--> and **youtube-dlc v2020.11.11-3+ [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl)
+* Merged with **youtube-dl v2021.12.17+ [commit/2dd6c6e](https://github.com/ytdl-org/youtube-dl/commit/2dd6c6e)** ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21)) and **youtube-dlc v2020.11.11-3+ [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl)
 
 * **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
 

+ 2 - 0
test/test_InfoExtractor.py

@@ -69,6 +69,7 @@ class TestInfoExtractor(unittest.TestCase):
             <meta name="og:test1" content='foo > < bar'/>
             <meta name="og:test2" content="foo >//< bar"/>
             <meta property=og-test3 content='Ill-formatted opengraph'/>
+            <meta property=og:test4 content=unquoted-value/>
             '''
         self.assertEqual(ie._og_search_title(html), 'Foo')
         self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
@@ -81,6 +82,7 @@ class TestInfoExtractor(unittest.TestCase):
         self.assertEqual(ie._og_search_property(('test0', 'test1'), html), 'foo > < bar')
         self.assertRaises(RegexNotFoundError, ie._og_search_property, 'test0', html, None, fatal=True)
         self.assertRaises(RegexNotFoundError, ie._og_search_property, ('test0', 'test00'), html, None, fatal=True)
+        self.assertEqual(ie._og_search_property('test4', html), 'unquoted-value')
 
     def test_html_search_meta(self):
         ie = self.ie

+ 12 - 7
test/test_age_restriction.py

@@ -10,6 +10,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 from test.helper import is_download_test, try_rm
 from yt_dlp import YoutubeDL
+from yt_dlp.utils import DownloadError
 
 
 def _download_restricted(url, filename, age):
@@ -25,10 +26,14 @@ def _download_restricted(url, filename, age):
     ydl.add_default_info_extractors()
     json_filename = os.path.splitext(filename)[0] + '.info.json'
     try_rm(json_filename)
-    ydl.download([url])
-    res = os.path.exists(json_filename)
-    try_rm(json_filename)
-    return res
+    try:
+        ydl.download([url])
+    except DownloadError:
+        pass
+    else:
+        return os.path.exists(json_filename)
+    finally:
+        try_rm(json_filename)
 
 
 @is_download_test
@@ -38,12 +43,12 @@ class TestAgeRestriction(unittest.TestCase):
         self.assertFalse(_download_restricted(url, filename, age))
 
     def test_youtube(self):
-        self._assert_restricted('07FYdnEawAQ', '07FYdnEawAQ.mp4', 10)
+        self._assert_restricted('HtVdAasjOgU', 'HtVdAasjOgU.mp4', 10)
 
     def test_youporn(self):
         self._assert_restricted(
-            'http://www.youporn.com/watch/505835/sex-ed-is-it-safe-to-masturbate-daily/',
-            '505835.mp4', 2, old_age=25)
+            'https://www.youporn.com/watch/16715086/sex-ed-in-detention-18-asmr/',
+            '16715086.mp4', 2, old_age=25)
 
 
 if __name__ == '__main__':

+ 20 - 10
yt_dlp/compat/_legacy.py

@@ -1,5 +1,6 @@
 """ Do not use! """
 
+import base64
 import collections
 import ctypes
 import getpass
@@ -29,6 +30,7 @@ from asyncio import run as compat_asyncio_run  # noqa: F401
 from re import Pattern as compat_Pattern  # noqa: F401
 from re import match as compat_Match  # noqa: F401
 
+from . import compat_expanduser, compat_HTMLParseError, compat_realpath
 from .compat_utils import passthrough_module
 from ..dependencies import Cryptodome_AES as compat_pycrypto_AES  # noqa: F401
 from ..dependencies import brotli as compat_brotli  # noqa: F401
@@ -47,23 +49,25 @@ def compat_setenv(key, value, env=os.environ):
     env[key] = value
 
 
+compat_base64_b64decode = base64.b64decode
 compat_basestring = str
 compat_casefold = str.casefold
 compat_chr = chr
 compat_collections_abc = collections.abc
-compat_cookiejar = http.cookiejar
-compat_cookiejar_Cookie = http.cookiejar.Cookie
-compat_cookies = http.cookies
-compat_cookies_SimpleCookie = http.cookies.SimpleCookie
-compat_etree_Element = etree.Element
-compat_etree_register_namespace = etree.register_namespace
+compat_cookiejar = compat_http_cookiejar = http.cookiejar
+compat_cookiejar_Cookie = compat_http_cookiejar_Cookie = http.cookiejar.Cookie
+compat_cookies = compat_http_cookies = http.cookies
+compat_cookies_SimpleCookie = compat_http_cookies_SimpleCookie = http.cookies.SimpleCookie
+compat_etree_Element = compat_xml_etree_ElementTree_Element = etree.Element
+compat_etree_register_namespace = compat_xml_etree_register_namespace = etree.register_namespace
 compat_filter = filter
 compat_get_terminal_size = shutil.get_terminal_size
 compat_getenv = os.getenv
-compat_getpass = getpass.getpass
+compat_getpass = compat_getpass_getpass = getpass.getpass
 compat_html_entities = html.entities
 compat_html_entities_html5 = html.entities.html5
-compat_HTMLParser = html.parser.HTMLParser
+compat_html_parser_HTMLParseError = compat_HTMLParseError
+compat_HTMLParser = compat_html_parser_HTMLParser = html.parser.HTMLParser
 compat_http_client = http.client
 compat_http_server = http.server
 compat_input = input
@@ -72,6 +76,8 @@ compat_itertools_count = itertools.count
 compat_kwargs = lambda kwargs: kwargs
 compat_map = map
 compat_numeric_types = (int, float, complex)
+compat_os_path_expanduser = compat_expanduser
+compat_os_path_realpath = compat_realpath
 compat_print = print
 compat_shlex_split = shlex.split
 compat_socket_create_connection = socket.create_connection
@@ -81,7 +87,9 @@ compat_struct_unpack = struct.unpack
 compat_subprocess_get_DEVNULL = lambda: DEVNULL
 compat_tokenize_tokenize = tokenize.tokenize
 compat_urllib_error = urllib.error
+compat_urllib_HTTPError = urllib.error.HTTPError
 compat_urllib_parse = urllib.parse
+compat_urllib_parse_parse_qs = urllib.parse.parse_qs
 compat_urllib_parse_quote = urllib.parse.quote
 compat_urllib_parse_quote_plus = urllib.parse.quote_plus
 compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
@@ -90,8 +98,10 @@ compat_urllib_parse_urlunparse = urllib.parse.urlunparse
 compat_urllib_request = urllib.request
 compat_urllib_request_DataHandler = urllib.request.DataHandler
 compat_urllib_response = urllib.response
-compat_urlretrieve = urllib.request.urlretrieve
-compat_xml_parse_error = etree.ParseError
+compat_urlretrieve = compat_urllib_request_urlretrieve = urllib.request.urlretrieve
+compat_xml_parse_error = compat_xml_etree_ElementTree_ParseError = etree.ParseError
 compat_xpath = lambda xpath: xpath
 compat_zip = zip
 workaround_optparse_bug9161 = lambda: None
+
+legacy = []

+ 7 - 0
yt_dlp/extractor/_extractors.py

@@ -239,6 +239,7 @@ from .bleacherreport import (
     BleacherReportIE,
     BleacherReportCMSIE,
 )
+from .blerp import BlerpIE
 from .blogger import BloggerIE
 from .bloomberg import BloombergIE
 from .bokecc import BokeCCIE
@@ -861,6 +862,7 @@ from .kicker import KickerIE
 from .kickstarter import KickStarterIE
 from .kinja import KinjaEmbedIE
 from .kinopoisk import KinoPoiskIE
+from .kommunetv import KommunetvIE
 from .kompas import KompasVideoIE
 from .konserthusetplay import KonserthusetPlayIE
 from .koo import KooIE
@@ -1460,6 +1462,7 @@ from .puhutv import (
     PuhuTVIE,
     PuhuTVSerieIE,
 )
+from .pr0gramm import Pr0grammStaticIE, Pr0grammIE
 from .prankcast import PrankCastIE
 from .premiershiprugby import PremiershipRugbyIE
 from .presstv import PressTVIE
@@ -1521,6 +1524,10 @@ from .raywenderlich import (
     RayWenderlichCourseIE,
 )
 from .rbmaradio import RBMARadioIE
+from .rbgtum import (
+    RbgTumIE,
+    RbgTumCourseIE,
+)
 from .rcs import (
     RCSIE,
     RCSEmbedsIE,

+ 64 - 14
yt_dlp/extractor/americastestkitchen.py

@@ -11,7 +11,7 @@ from ..utils import (
 
 
 class AmericasTestKitchenIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?americastestkitchen\.com/(?:cooks(?:country|illustrated)/)?(?P<resource_type>episode|videos)/(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?:cooks(?:country|illustrated)/)?(?P<resource_type>episode|videos)/(?P<id>\d+)'
     _TESTS = [{
         'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers',
         'md5': 'b861c3e365ac38ad319cfd509c30577f',
@@ -72,6 +72,12 @@ class AmericasTestKitchenIE(InfoExtractor):
     }, {
         'url': 'https://www.americastestkitchen.com/cooksillustrated/videos/4478-beef-wellington',
         'only_matching': True,
+    }, {
+        'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cooksillustrated.com/videos/4478-beef-wellington',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
@@ -100,7 +106,7 @@ class AmericasTestKitchenIE(InfoExtractor):
 
 
 class AmericasTestKitchenSeasonIE(InfoExtractor):
-    _VALID_URL = r'https?://(?:www\.)?americastestkitchen\.com(?P<show>/cookscountry)?/episodes/browse/season_(?P<id>\d+)'
+    _VALID_URL = r'https?://(?:www\.)?(?P<show>americastestkitchen|(?P<cooks>cooks(?:country|illustrated)))\.com(?:(?:/(?P<show2>cooks(?:country|illustrated)))?(?:/?$|(?<!ated)(?<!ated\.com)/episodes/browse/season_(?P<season>\d+)))'
     _TESTS = [{
         # ATK Season
         'url': 'https://www.americastestkitchen.com/episodes/browse/season_1',
@@ -117,29 +123,73 @@ class AmericasTestKitchenSeasonIE(InfoExtractor):
             'title': 'Season 12',
         },
         'playlist_count': 13,
+    }, {
+        # America's Test Kitchen Series
+        'url': 'https://www.americastestkitchen.com/',
+        'info_dict': {
+            'id': 'americastestkitchen',
+            'title': 'America\'s Test Kitchen',
+        },
+        'playlist_count': 558,
+    }, {
+        # Cooks Country Series
+        'url': 'https://www.americastestkitchen.com/cookscountry',
+        'info_dict': {
+            'id': 'cookscountry',
+            'title': 'Cook\'s Country',
+        },
+        'playlist_count': 199,
+    }, {
+        'url': 'https://www.americastestkitchen.com/cookscountry/',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cookscountry.com/episodes/browse/season_12',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cookscountry.com',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.americastestkitchen.com/cooksillustrated/',
+        'only_matching': True,
+    }, {
+        'url': 'https://www.cooksillustrated.com',
+        'only_matching': True,
     }]
 
     def _real_extract(self, url):
-        show_path, season_number = self._match_valid_url(url).group('show', 'id')
-        season_number = int(season_number)
+        season_number, show1, show = self._match_valid_url(url).group('season', 'show', 'show2')
+        show_path = ('/' + show) if show else ''
+        show = show or show1
+        season_number = int_or_none(season_number)
+
+        slug, title = {
+            'americastestkitchen': ('atk', 'America\'s Test Kitchen'),
+            'cookscountry': ('cco', 'Cook\'s Country'),
+            'cooksillustrated': ('cio', 'Cook\'s Illustrated'),
+        }[show]
 
-        slug = 'cco' if show_path == '/cookscountry' else 'atk'
+        facet_filters = [
+            'search_document_klass:episode',
+            'search_show_slug:' + slug,
+        ]
 
-        season = 'Season %d' % season_number
+        if season_number:
+            playlist_id = 'season_%d' % season_number
+            playlist_title = 'Season %d' % season_number
+            facet_filters.append('search_season_list:' + playlist_title)
+        else:
+            playlist_id = show
+            playlist_title = title
 
         season_search = self._download_json(
             'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug,
-            season, headers={
+            playlist_id, headers={
                 'Origin': 'https://www.americastestkitchen.com',
                 'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805',
                 'X-Algolia-Application-Id': 'Y1FNZXUI30',
             }, query={
-                'facetFilters': json.dumps([
-                    'search_season_list:' + season,
-                    'search_document_klass:episode',
-                    'search_show_slug:' + slug,
-                ]),
-                'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title' % slug,
+                'facetFilters': json.dumps(facet_filters),
+                'attributesToRetrieve': 'description,search_%s_episode_number,search_document_date,search_url,title,search_atk_episode_season' % slug,
                 'attributesToHighlight': '',
                 'hitsPerPage': 1000,
             })
@@ -162,4 +212,4 @@ class AmericasTestKitchenSeasonIE(InfoExtractor):
                 }
 
         return self.playlist_result(
-            entries(), 'season_%d' % season_number, season)
+            entries(), playlist_id, playlist_title)

+ 167 - 0
yt_dlp/extractor/blerp.py

@@ -0,0 +1,167 @@
+import json
+
+from .common import InfoExtractor
+from ..utils import strip_or_none, traverse_obj
+
+
+class BlerpIE(InfoExtractor):
+    IE_NAME = 'blerp'
+    _VALID_URL = r'https?://(?:www\.)?blerp\.com/soundbites/(?P<id>[0-9a-zA-Z]+)'
+    _TESTS = [{
+        'url': 'https://blerp.com/soundbites/6320fe8745636cb4dd677a5a',
+        'info_dict': {
+            'id': '6320fe8745636cb4dd677a5a',
+            'title': 'Samsung Galaxy S8 Over the Horizon Ringtone 2016',
+            'uploader': 'luminousaj',
+            'uploader_id': '5fb81e51aa66ae000c395478',
+            'ext': 'mp3',
+            'tags': ['samsung', 'galaxy', 's8', 'over the horizon', '2016', 'ringtone'],
+        }
+    }, {
+        'url': 'https://blerp.com/soundbites/5bc94ef4796001000498429f',
+        'info_dict': {
+            'id': '5bc94ef4796001000498429f',
+            'title': 'Yee',
+            'uploader': '179617322678353920',
+            'uploader_id': '5ba99cf71386730004552c42',
+            'ext': 'mp3',
+            'tags': ['YEE', 'YEET', 'wo ha haah catchy tune yee', 'yee']
+        }
+    }]
+
+    _GRAPHQL_OPERATIONNAME = "webBitePageGetBite"
+    _GRAPHQL_QUERY = (
+        '''query webBitePageGetBite($_id: MongoID!) {
+            web {
+                biteById(_id: $_id) {
+                    ...bitePageFrag
+                    __typename
+                }
+                __typename
+            }
+        }
+
+        fragment bitePageFrag on Bite {
+            _id
+            title
+            userKeywords
+            keywords
+            color
+            visibility
+            isPremium
+            owned
+            price
+            extraReview
+            isAudioExists
+            image {
+                filename
+                original {
+                    url
+                    __typename
+                }
+                __typename
+            }
+            userReactions {
+                _id
+                reactions
+                createdAt
+                __typename
+            }
+            topReactions
+            totalSaveCount
+            saved
+            blerpLibraryType
+            license
+            licenseMetaData
+            playCount
+            totalShareCount
+            totalFavoriteCount
+            totalAddedToBoardCount
+            userCategory
+            userAudioQuality
+            audioCreationState
+            transcription
+            userTranscription
+            description
+            createdAt
+            updatedAt
+            author
+            listingType
+            ownerObject {
+                _id
+                username
+                profileImage {
+                    filename
+                    original {
+                        url
+                        __typename
+                    }
+                    __typename
+                }
+                __typename
+            }
+            transcription
+            favorited
+            visibility
+            isCurated
+            sourceUrl
+            audienceRating
+            strictAudienceRating
+            ownerId
+            reportObject {
+                reportedContentStatus
+                __typename
+            }
+            giphy {
+                mp4
+                gif
+                __typename
+            }
+            audio {
+                filename
+                original {
+                    url
+                    __typename
+                }
+                mp3 {
+                    url
+                    __typename
+                }
+                __typename
+            }
+            __typename
+        }
+
+        ''')
+
+    def _real_extract(self, url):
+        audio_id = self._match_id(url)
+
+        data = {
+            'operationName': self._GRAPHQL_OPERATIONNAME,
+            'query': self._GRAPHQL_QUERY,
+            'variables': {
+                '_id': audio_id
+            }
+        }
+
+        headers = {
+            'Content-Type': 'application/json'
+        }
+
+        json_result = self._download_json('https://api.blerp.com/graphql',
+                                          audio_id, data=json.dumps(data).encode('utf-8'), headers=headers)
+
+        bite_json = json_result['data']['web']['biteById']
+
+        info_dict = {
+            'id': bite_json['_id'],
+            'url': bite_json['audio']['mp3']['url'],
+            'title': bite_json['title'],
+            'uploader': traverse_obj(bite_json, ('ownerObject', 'username'), expected_type=strip_or_none),
+            'uploader_id': traverse_obj(bite_json, ('ownerObject', '_id'), expected_type=strip_or_none),
+            'ext': 'mp3',
+            'tags': list(filter(None, map(strip_or_none, (traverse_obj(bite_json, 'userKeywords', expected_type=list) or []))) or None)
+        }
+
+        return info_dict

+ 50 - 5
yt_dlp/extractor/callin.py

@@ -1,9 +1,5 @@
 from .common import InfoExtractor
-from ..utils import (
-    traverse_obj,
-    float_or_none,
-    int_or_none
-)
+from ..utils import float_or_none, int_or_none, make_archive_id, traverse_obj
 
 
 class CallinIE(InfoExtractor):
@@ -35,6 +31,54 @@ class CallinIE(InfoExtractor):
             'episode_number': 1,
             'episode_id': '218b979630a35ead12c6fd096f2996c56c37e4d0dc1f6dc0feada32dcf7b31cd'
         }
+    }, {
+        'url': 'https://www.callin.com/episode/fcc-commissioner-brendan-carr-on-elons-PrumRdSQJW',
+        'md5': '14ede27ee2c957b7e4db93140fc0745c',
+        'info_dict': {
+            'id': 'c3dab47f237bf953d180d3f243477a84302798be0e0b29bc9ade6d60a69f04f5',
+            'ext': 'ts',
+            'title': 'FCC Commissioner Brendan Carr on Elon’s Starlink',
+            'description': 'Or, why the government doesn’t like SpaceX',
+            'channel': 'The Pull Request',
+            'channel_url': 'https://callin.com/show/the-pull-request-ucnDJmEKAa',
+            'duration': 3182.472,
+            'series_id': '7e9c23156e4aecfdcaef46bfb2ed7ca268509622ec006c0f0f25d90e34496638',
+            'uploader_url': 'http://thepullrequest.com',
+            'upload_date': '20220902',
+            'episode': 'FCC Commissioner Brendan Carr on Elon’s Starlink',
+            'display_id': 'fcc-commissioner-brendan-carr-on-elons-PrumRdSQJW',
+            'series': 'The Pull Request',
+            'channel_id': '7e9c23156e4aecfdcaef46bfb2ed7ca268509622ec006c0f0f25d90e34496638',
+            'view_count': int,
+            'uploader': 'Antonio García Martínez',
+            'thumbnail': 'https://d1z76fhpoqkd01.cloudfront.net/shows/legacy/1ade9142625344045dc17cf523469ced1d93610762f4c886d06aa190a2f979e8.png',
+            'episode_id': 'c3dab47f237bf953d180d3f243477a84302798be0e0b29bc9ade6d60a69f04f5',
+            'timestamp': 1662100688.005,
+        }
+    }, {
+        'url': 'https://www.callin.com/episode/episode-81-elites-melt-down-over-student-debt-lzxMidUnjA',
+        'md5': '16f704ddbf82a27e3930533b12062f07',
+        'info_dict': {
+            'id': '8d06f869798f93a7814e380bceabea72d501417e620180416ff6bd510596e83c',
+            'ext': 'ts',
+            'title': 'Episode 81- Elites MELT DOWN over Student Debt Victory? Rumble in NYC?',
+            'description': 'Let’s talk todays episode about the primary election shake up in NYC and the elites melting down over student debt cancelation.',
+            'channel': 'The DEBRIEF With Briahna Joy Gray',
+            'channel_url': 'https://callin.com/show/the-debrief-with-briahna-joy-gray-siiFDzGegm',
+            'duration': 10043.16,
+            'series_id': '61cea58444465fd26674069703bd8322993bc9e5b4f1a6d0872690554a046ff7',
+            'uploader_url': 'http://patreon.com/badfaithpodcast',
+            'upload_date': '20220826',
+            'episode': 'Episode 81- Elites MELT DOWN over Student Debt Victory? Rumble in NYC?',
+            'display_id': 'episode-',
+            'series': 'The DEBRIEF With Briahna Joy Gray',
+            'channel_id': '61cea58444465fd26674069703bd8322993bc9e5b4f1a6d0872690554a046ff7',
+            'view_count': int,
+            'uploader': 'Briahna Gray',
+            'thumbnail': 'https://d1z76fhpoqkd01.cloudfront.net/shows/legacy/461ea0d86172cb6aff7d6c80fd49259cf5e64bdf737a4650f8bc24cf392ca218.png',
+            'episode_id': '8d06f869798f93a7814e380bceabea72d501417e620180416ff6bd510596e83c',
+            'timestamp': 1661476708.282,
+        }
     }]
 
     def try_get_user_name(self, d):
@@ -86,6 +130,7 @@ class CallinIE(InfoExtractor):
 
         return {
             'id': id,
+            '_old_archive_ids': [make_archive_id(self, display_id.rsplit('-', 1)[-1])],
             'display_id': display_id,
             'title': title,
             'formats': formats,

+ 11 - 28
yt_dlp/extractor/cammodels.py

@@ -1,9 +1,5 @@
 from .common import InfoExtractor
-from ..utils import (
-    ExtractorError,
-    int_or_none,
-    url_or_none,
-)
+from ..utils import int_or_none, url_or_none
 
 
 class CamModelsIE(InfoExtractor):
@@ -17,32 +13,11 @@ class CamModelsIE(InfoExtractor):
     def _real_extract(self, url):
         user_id = self._match_id(url)
 
-        webpage = self._download_webpage(
-            url, user_id, headers=self.geo_verification_headers())
-
-        manifest_root = self._html_search_regex(
-            r'manifestUrlRoot=([^&\']+)', webpage, 'manifest', default=None)
-
-        if not manifest_root:
-            ERRORS = (
-                ("I'm offline, but let's stay connected", 'This user is currently offline'),
-                ('in a private show', 'This user is in a private show'),
-                ('is currently performing LIVE', 'This model is currently performing live'),
-            )
-            for pattern, message in ERRORS:
-                if pattern in webpage:
-                    error = message
-                    expected = True
-                    break
-            else:
-                error = 'Unable to find manifest URL root'
-                expected = False
-            raise ExtractorError(error, expected=expected)
-
         manifest = self._download_json(
-            '%s%s.json' % (manifest_root, user_id), user_id)
+            'https://manifest-server.naiadsystems.com/live/s:%s.json' % user_id, user_id)
 
         formats = []
+        thumbnails = []
         for format_id, format_dict in manifest['formats'].items():
             if not isinstance(format_dict, dict):
                 continue
@@ -82,12 +57,20 @@ class CamModelsIE(InfoExtractor):
                         'quality': -10,
                     })
                 else:
+                    if format_id == 'jpeg':
+                        thumbnails.append({
+                            'url': f['url'],
+                            'width': f['width'],
+                            'height': f['height'],
+                            'format_id': f['format_id'],
+                        })
                     continue
                 formats.append(f)
 
         return {
             'id': user_id,
             'title': user_id,
+            'thumbnails': thumbnails,
             'is_live': True,
             'formats': formats,
             'age_limit': 18

+ 1 - 1
yt_dlp/extractor/common.py

@@ -1338,7 +1338,7 @@ class InfoExtractor:
     # Helper functions for extracting OpenGraph info
     @staticmethod
     def _og_regexes(prop):
-        content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
+        content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?)(?=\s|/?>))'
         property_re = (r'(?:name|property)=(?:\'og%(sep)s%(prop)s\'|"og%(sep)s%(prop)s"|\s*og%(sep)s%(prop)s\b)'
                        % {'prop': re.escape(prop), 'sep': '(?:&#x3A;|[:-])'})
         template = r'<meta[^>]+?%s[^>]+?%s'

Some files were not shown because too many files changed in this diff