http.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. import os
  2. import random
  3. import time
  4. from .common import FileDownloader
  5. from ..networking import Request
  6. from ..networking.exceptions import (
  7. CertificateVerifyError,
  8. HTTPError,
  9. TransportError,
  10. )
  11. from ..utils import (
  12. ContentTooShortError,
  13. RetryManager,
  14. ThrottledDownload,
  15. XAttrMetadataError,
  16. XAttrUnavailableError,
  17. encodeFilename,
  18. int_or_none,
  19. parse_http_range,
  20. try_call,
  21. write_xattr,
  22. )
  23. from ..utils.networking import HTTPHeaderDict
  24. class HttpFD(FileDownloader):
  25. def real_download(self, filename, info_dict):
  26. url = info_dict['url']
  27. request_data = info_dict.get('request_data', None)
  28. class DownloadContext(dict):
  29. __getattr__ = dict.get
  30. __setattr__ = dict.__setitem__
  31. __delattr__ = dict.__delitem__
  32. ctx = DownloadContext()
  33. ctx.filename = filename
  34. ctx.tmpfilename = self.temp_name(filename)
  35. ctx.stream = None
  36. # Disable compression
  37. headers = HTTPHeaderDict({'Accept-Encoding': 'identity'}, info_dict.get('http_headers'))
  38. is_test = self.params.get('test', False)
  39. chunk_size = self._TEST_FILE_SIZE if is_test else (
  40. self.params.get('http_chunk_size')
  41. or info_dict.get('downloader_options', {}).get('http_chunk_size')
  42. or 0)
  43. ctx.open_mode = 'wb'
  44. ctx.resume_len = 0
  45. ctx.block_size = self.params.get('buffersize', 1024)
  46. ctx.start_time = time.time()
  47. # parse given Range
  48. req_start, req_end, _ = parse_http_range(headers.get('Range'))
  49. if self.params.get('continuedl', True):
  50. # Establish possible resume length
  51. if os.path.isfile(encodeFilename(ctx.tmpfilename)):
  52. ctx.resume_len = os.path.getsize(
  53. encodeFilename(ctx.tmpfilename))
  54. ctx.is_resume = ctx.resume_len > 0
  55. class SucceedDownload(Exception):
  56. pass
  57. class RetryDownload(Exception):
  58. def __init__(self, source_error):
  59. self.source_error = source_error
  60. class NextFragment(Exception):
  61. pass
  62. def establish_connection():
  63. ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size)
  64. if not is_test and chunk_size else chunk_size)
  65. if ctx.resume_len > 0:
  66. range_start = ctx.resume_len
  67. if req_start is not None:
  68. # offset the beginning of Range to be within request
  69. range_start += req_start
  70. if ctx.is_resume:
  71. self.report_resuming_byte(ctx.resume_len)
  72. ctx.open_mode = 'ab'
  73. elif req_start is not None:
  74. range_start = req_start
  75. elif ctx.chunk_size > 0:
  76. range_start = 0
  77. else:
  78. range_start = None
  79. ctx.is_resume = False
  80. if ctx.chunk_size:
  81. chunk_aware_end = range_start + ctx.chunk_size - 1
  82. # we're not allowed to download outside Range
  83. range_end = chunk_aware_end if req_end is None else min(chunk_aware_end, req_end)
  84. elif req_end is not None:
  85. # there's no need for chunked downloads, so download until the end of Range
  86. range_end = req_end
  87. else:
  88. range_end = None
  89. if try_call(lambda: range_start > range_end):
  90. ctx.resume_len = 0
  91. ctx.open_mode = 'wb'
  92. raise RetryDownload(Exception(f'Conflicting range. (start={range_start} > end={range_end})'))
  93. if try_call(lambda: range_end >= ctx.content_len):
  94. range_end = ctx.content_len - 1
  95. request = Request(url, request_data, headers)
  96. has_range = range_start is not None
  97. if has_range:
  98. request.headers['Range'] = f'bytes={int(range_start)}-{int_or_none(range_end) or ""}'
  99. # Establish connection
  100. try:
  101. ctx.data = self.ydl.urlopen(request)
  102. # When trying to resume, Content-Range HTTP header of response has to be checked
  103. # to match the value of requested Range HTTP header. This is due to a webservers
  104. # that don't support resuming and serve a whole file with no Content-Range
  105. # set in response despite of requested Range (see
  106. # https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
  107. if has_range:
  108. content_range = ctx.data.headers.get('Content-Range')
  109. content_range_start, content_range_end, content_len = parse_http_range(content_range)
  110. # Content-Range is present and matches requested Range, resume is possible
  111. if range_start == content_range_start and (
  112. # Non-chunked download
  113. not ctx.chunk_size
  114. # Chunked download and requested piece or
  115. # its part is promised to be served
  116. or content_range_end == range_end
  117. or content_len < range_end):
  118. ctx.content_len = content_len
  119. if content_len or req_end:
  120. ctx.data_len = min(content_len or req_end, req_end or content_len) - (req_start or 0)
  121. return
  122. # Content-Range is either not present or invalid. Assuming remote webserver is
  123. # trying to send the whole file, resume is not possible, so wiping the local file
  124. # and performing entire redownload
  125. elif range_start > 0:
  126. self.report_unable_to_resume()
  127. ctx.resume_len = 0
  128. ctx.open_mode = 'wb'
  129. ctx.data_len = ctx.content_len = int_or_none(ctx.data.headers.get('Content-length', None))
  130. except HTTPError as err:
  131. if err.status == 416:
  132. # Unable to resume (requested range not satisfiable)
  133. try:
  134. # Open the connection again without the range header
  135. ctx.data = self.ydl.urlopen(
  136. Request(url, request_data, headers))
  137. content_length = ctx.data.headers['Content-Length']
  138. except HTTPError as err:
  139. if err.status < 500 or err.status >= 600:
  140. raise
  141. else:
  142. # Examine the reported length
  143. if (content_length is not None
  144. and (ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
  145. # The file had already been fully downloaded.
  146. # Explanation to the above condition: in issue #175 it was revealed that
  147. # YouTube sometimes adds or removes a few bytes from the end of the file,
  148. # changing the file size slightly and causing problems for some users. So
  149. # I decided to implement a suggested change and consider the file
  150. # completely downloaded if the file size differs less than 100 bytes from
  151. # the one in the hard drive.
  152. self.report_file_already_downloaded(ctx.filename)
  153. self.try_rename(ctx.tmpfilename, ctx.filename)
  154. self._hook_progress({
  155. 'filename': ctx.filename,
  156. 'status': 'finished',
  157. 'downloaded_bytes': ctx.resume_len,
  158. 'total_bytes': ctx.resume_len,
  159. }, info_dict)
  160. raise SucceedDownload
  161. else:
  162. # The length does not match, we start the download over
  163. self.report_unable_to_resume()
  164. ctx.resume_len = 0
  165. ctx.open_mode = 'wb'
  166. return
  167. elif err.status < 500 or err.status >= 600:
  168. # Unexpected HTTP error
  169. raise
  170. raise RetryDownload(err)
  171. except CertificateVerifyError:
  172. raise
  173. except TransportError as err:
  174. raise RetryDownload(err)
  175. def close_stream():
  176. if ctx.stream is not None:
  177. if ctx.tmpfilename != '-':
  178. ctx.stream.close()
  179. ctx.stream = None
  180. def download():
  181. data_len = ctx.data.headers.get('Content-length')
  182. if ctx.data.headers.get('Content-encoding'):
  183. # Content-encoding is present, Content-length is not reliable anymore as we are
  184. # doing auto decompression. (See: https://github.com/yt-dlp/yt-dlp/pull/6176)
  185. data_len = None
  186. # Range HTTP header may be ignored/unsupported by a webserver
  187. # (e.g. extractor/scivee.py, extractor/bambuser.py).
  188. # However, for a test we still would like to download just a piece of a file.
  189. # To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
  190. # block size when downloading a file.
  191. if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
  192. data_len = self._TEST_FILE_SIZE
  193. if data_len is not None:
  194. data_len = int(data_len) + ctx.resume_len
  195. min_data_len = self.params.get('min_filesize')
  196. max_data_len = self.params.get('max_filesize')
  197. if min_data_len is not None and data_len < min_data_len:
  198. self.to_screen(
  199. f'\r[download] File is smaller than min-filesize ({data_len} bytes < {min_data_len} bytes). Aborting.')
  200. return False
  201. if max_data_len is not None and data_len > max_data_len:
  202. self.to_screen(
  203. f'\r[download] File is larger than max-filesize ({data_len} bytes > {max_data_len} bytes). Aborting.')
  204. return False
  205. byte_counter = 0 + ctx.resume_len
  206. block_size = ctx.block_size
  207. start = time.time()
  208. # measure time over whole while-loop, so slow_down() and best_block_size() work together properly
  209. now = None # needed for slow_down() in the first loop run
  210. before = start # start measuring
  211. def retry(e):
  212. close_stream()
  213. if ctx.tmpfilename == '-':
  214. ctx.resume_len = byte_counter
  215. else:
  216. try:
  217. ctx.resume_len = os.path.getsize(encodeFilename(ctx.tmpfilename))
  218. except FileNotFoundError:
  219. ctx.resume_len = 0
  220. raise RetryDownload(e)
  221. while True:
  222. try:
  223. # Download and write
  224. data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
  225. except TransportError as err:
  226. retry(err)
  227. byte_counter += len(data_block)
  228. # exit loop when download is finished
  229. if len(data_block) == 0:
  230. break
  231. # Open destination file just in time
  232. if ctx.stream is None:
  233. try:
  234. ctx.stream, ctx.tmpfilename = self.sanitize_open(
  235. ctx.tmpfilename, ctx.open_mode)
  236. assert ctx.stream is not None
  237. ctx.filename = self.undo_temp_name(ctx.tmpfilename)
  238. self.report_destination(ctx.filename)
  239. except OSError as err:
  240. self.report_error(f'unable to open for writing: {err}')
  241. return False
  242. if self.params.get('xattr_set_filesize', False) and data_len is not None:
  243. try:
  244. write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
  245. except (XAttrUnavailableError, XAttrMetadataError) as err:
  246. self.report_error(f'unable to set filesize xattr: {err}')
  247. try:
  248. ctx.stream.write(data_block)
  249. except OSError as err:
  250. self.to_stderr('\n')
  251. self.report_error(f'unable to write data: {err}')
  252. return False
  253. # Apply rate limit
  254. self.slow_down(start, now, byte_counter - ctx.resume_len)
  255. # end measuring of one loop run
  256. now = time.time()
  257. after = now
  258. # Adjust block size
  259. if not self.params.get('noresizebuffer', False):
  260. block_size = self.best_block_size(after - before, len(data_block))
  261. before = after
  262. # Progress message
  263. speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
  264. if ctx.data_len is None:
  265. eta = None
  266. else:
  267. eta = self.calc_eta(start, time.time(), ctx.data_len - ctx.resume_len, byte_counter - ctx.resume_len)
  268. self._hook_progress({
  269. 'status': 'downloading',
  270. 'downloaded_bytes': byte_counter,
  271. 'total_bytes': ctx.data_len,
  272. 'tmpfilename': ctx.tmpfilename,
  273. 'filename': ctx.filename,
  274. 'eta': eta,
  275. 'speed': speed,
  276. 'elapsed': now - ctx.start_time,
  277. 'ctx_id': info_dict.get('ctx_id'),
  278. }, info_dict)
  279. if data_len is not None and byte_counter == data_len:
  280. break
  281. if speed and speed < (self.params.get('throttledratelimit') or 0):
  282. # The speed must stay below the limit for 3 seconds
  283. # This prevents raising error when the speed temporarily goes down
  284. if ctx.throttle_start is None:
  285. ctx.throttle_start = now
  286. elif now - ctx.throttle_start > 3:
  287. if ctx.stream is not None and ctx.tmpfilename != '-':
  288. ctx.stream.close()
  289. raise ThrottledDownload
  290. elif speed:
  291. ctx.throttle_start = None
  292. if ctx.stream is None:
  293. self.to_stderr('\n')
  294. self.report_error('Did not get any data blocks')
  295. return False
  296. if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
  297. ctx.resume_len = byte_counter
  298. raise NextFragment
  299. if ctx.tmpfilename != '-':
  300. ctx.stream.close()
  301. if data_len is not None and byte_counter != data_len:
  302. err = ContentTooShortError(byte_counter, int(data_len))
  303. retry(err)
  304. self.try_rename(ctx.tmpfilename, ctx.filename)
  305. # Update file modification time
  306. if self.params.get('updatetime', True):
  307. info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.headers.get('last-modified', None))
  308. self._hook_progress({
  309. 'downloaded_bytes': byte_counter,
  310. 'total_bytes': byte_counter,
  311. 'filename': ctx.filename,
  312. 'status': 'finished',
  313. 'elapsed': time.time() - ctx.start_time,
  314. 'ctx_id': info_dict.get('ctx_id'),
  315. }, info_dict)
  316. return True
  317. for retry in RetryManager(self.params.get('retries'), self.report_retry):
  318. try:
  319. establish_connection()
  320. return download()
  321. except RetryDownload as err:
  322. retry.error = err.source_error
  323. continue
  324. except NextFragment:
  325. retry.error = None
  326. retry.attempt -= 1
  327. continue
  328. except SucceedDownload:
  329. return True
  330. except: # noqa: E722
  331. close_stream()
  332. raise
  333. return False