4 from __future__ import absolute_import, unicode_literals
29 from string import ascii_letters
34 compat_get_terminal_size,
40 compat_tokenize_tokenize,
42 compat_urllib_request,
43 compat_urllib_request_DataHandler,
71 PerRequestProxyHandler,
76 register_socks_protocols,
87 UnavailableVideoError,
93 YoutubeDLCookieProcessor,
96 from .cache import Cache
97 from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
98 from .extractor.openload import PhantomJSwrapper
99 from .downloader import get_suitable_downloader
100 from .downloader.rtmp import rtmpdump_version
101 from .postprocessor import (
104 FFmpegFixupStretchedPP,
109 from .version import __version__
111 if compat_os_name == 'nt':
115 class YoutubeDL(object):
118 YoutubeDL objects are the ones responsible of downloading the
119 actual video file and writing it to disk if the user has requested
120 it, among some other tasks. In most cases there should be one per
121 program. As, given a video URL, the downloader doesn't know how to
122 extract all the needed information, task that InfoExtractors do, it
123 has to pass the URL to one of them.
125 For this, YoutubeDL objects have a method that allows
126 InfoExtractors to be registered in a given order. When it is passed
127 a URL, the YoutubeDL object handles it to the first InfoExtractor it
128 finds that reports being able to handle it. The InfoExtractor extracts
129 all the information about the video or videos the URL refers to, and
130 YoutubeDL process the extracted information, possibly using a File
131 Downloader to download the video.
133 YoutubeDL objects accept a lot of parameters. In order not to saturate
134 the object constructor with arguments, it receives a dictionary of
135 options instead. These options are available through the params
136 attribute for the InfoExtractors to use. The YoutubeDL also
137 registers itself as the downloader in charge for the InfoExtractors
138 that are added to it, so this is a "mutual registration".
142 username: Username for authentication purposes.
143 password: Password for authentication purposes.
144 videopassword: Password for accessing a video.
145 ap_mso: Adobe Pass multiple-system operator identifier.
146 ap_username: Multiple-system operator account username.
147 ap_password: Multiple-system operator account password.
148 usenetrc: Use netrc for authentication instead.
149 verbose: Print additional info to stdout.
150 quiet: Do not print messages to stdout.
151 no_warnings: Do not print out anything for warnings.
152 forceurl: Force printing final URL.
153 forcetitle: Force printing title.
154 forceid: Force printing ID.
155 forcethumbnail: Force printing thumbnail URL.
156 forcedescription: Force printing description.
157 forcefilename: Force printing final filename.
158 forceduration: Force printing duration.
159 forcejson: Force printing info_dict as JSON.
160 dump_single_json: Force printing the info_dict of the whole playlist
161 (or video) as a single JSON line.
162 simulate: Do not download the video files.
163 format: Video format code. See options.py for more information.
164 outtmpl: Template for output names.
165 restrictfilenames: Do not allow "&" and spaces in file names
166 ignoreerrors: Do not stop on download errors.
167 force_generic_extractor: Force downloader to use the generic extractor
168 nooverwrites: Prevent overwriting files.
169 playliststart: Playlist item to start at.
170 playlistend: Playlist item to end at.
171 playlist_items: Specific indices of playlist to download.
172 playlistreverse: Download playlist items in reverse order.
173 playlistrandom: Download playlist items in random order.
174 matchtitle: Download only matching titles.
175 rejecttitle: Reject downloads for matching titles.
176 logger: Log messages to a logging.Logger instance.
177 logtostderr: Log messages to stderr instead of stdout.
178 writedescription: Write the video description to a .description file
179 writeinfojson: Write the video description to a .info.json file
180 writeannotations: Write the video annotations to a .annotations.xml file
181 writethumbnail: Write the thumbnail image to a file
182 write_all_thumbnails: Write all thumbnail formats to files
183 writesubtitles: Write the video subtitles to a file
184 writeautomaticsub: Write the automatically generated subtitles to a file
185 allsubtitles: Downloads all the subtitles of the video
186 (requires writesubtitles or writeautomaticsub)
187 listsubtitles: Lists all available subtitles for the video
188 subtitlesformat: The format code for subtitles
189 subtitleslangs: List of languages of the subtitles to download
190 keepvideo: Keep the video file after post-processing
191 daterange: A DateRange object, download only if the upload_date is in the range.
192 skip_download: Skip the actual download of the video file
193 cachedir: Location of the cache files in the filesystem.
194 False to disable filesystem cache.
195 noplaylist: Download single video instead of a playlist if in doubt.
196 age_limit: An integer representing the user's age in years.
197 Unsuitable videos for the given age are skipped.
198 min_views: An integer representing the minimum view count the video
199 must have in order to not be skipped.
200 Videos without view count information are always
201 downloaded. None for no limit.
202 max_views: An integer representing the maximum view count.
203 Videos that are more popular than that are not
205 Videos without view count information are always
206 downloaded. None for no limit.
207 download_archive: File name of a file where all downloads are recorded.
208 Videos already present in the file are not downloaded
210 cookiefile: File name where cookies should be read from and dumped to.
211 nocheckcertificate:Do not verify SSL certificates
212 prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
213 At the moment, this is only supported by YouTube.
214 proxy: URL of the proxy server to use
215 geo_verification_proxy: URL of the proxy to use for IP address verification
216 on geo-restricted sites.
217 socket_timeout: Time to wait for unresponsive hosts, in seconds
218 bidi_workaround: Work around buggy terminals without bidirectional text
219 support, using fridibi
220 debug_printtraffic:Print out sent and received HTTP traffic
221 include_ads: Download ads as well
222 default_search: Prepend this string if an input url is not valid.
223 'auto' for elaborate guessing
224 encoding: Use this encoding instead of the system-specified.
225 extract_flat: Do not resolve URLs, return the immediate result.
226 Pass in 'in_playlist' to only show this behavior for
228 postprocessors: A list of dictionaries, each with an entry
229 * key: The name of the postprocessor. See
230 youtube_dl/postprocessor/__init__.py for a list.
231 as well as any further keyword arguments for the
233 progress_hooks: A list of functions that get called on download
234 progress, with a dictionary with the entries
235 * status: One of "downloading", "error", or "finished".
236 Check this first and ignore unknown values.
238 If status is one of "downloading", or "finished", the
239 following properties may also be present:
240 * filename: The final filename (always present)
241 * tmpfilename: The filename we're currently writing to
242 * downloaded_bytes: Bytes on disk
243 * total_bytes: Size of the whole file, None if unknown
244 * total_bytes_estimate: Guess of the eventual file size,
246 * elapsed: The number of seconds since download started.
247 * eta: The estimated time in seconds, None if unknown
248 * speed: The download speed in bytes/second, None if
250 * fragment_index: The counter of the currently
251 downloaded video fragment.
252 * fragment_count: The number of fragments (= individual
253 files that will be merged)
255 Progress hooks are guaranteed to be called at least once
256 (with status "finished") if the download is successful.
257 merge_output_format: Extension to use when merging formats.
258 fixup: Automatically correct known faults of the file.
260 - "never": do nothing
261 - "warn": only emit a warning
262 - "detect_or_warn": check whether we can do anything
263 about it, warn otherwise (default)
264 source_address: Client-side IP address to bind to.
265 call_home: Boolean, true iff we are allowed to contact the
266 youtube-dl servers for debugging.
267 sleep_interval: Number of seconds to sleep before each download when
268 used alone or a lower bound of a range for randomized
269 sleep before each download (minimum possible number
270 of seconds to sleep) when used along with
272 max_sleep_interval:Upper bound of a range for randomized sleep before each
273 download (maximum possible number of seconds to sleep).
274 Must only be used along with sleep_interval.
275 Actual sleep time will be a random float from range
276 [sleep_interval; max_sleep_interval].
277 listformats: Print an overview of available video formats and exit.
278 list_thumbnails: Print a table of all thumbnails and exit.
279 match_filter: A function that gets called with the info_dict of
281 If it returns a message, the video is ignored.
282 If it returns None, the video is downloaded.
283 match_filter_func in utils.py is one example for this.
284 no_color: Do not emit color codes in output.
285 geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
288 Two-letter ISO 3166-2 country code that will be used for
289 explicit geographic restriction bypassing via faking
290 X-Forwarded-For HTTP header
292 IP range in CIDR notation that will be used similarly to
295 The following options determine which downloader is picked:
296 external_downloader: Executable of the external downloader to call.
297 None or unset for standard (built-in) downloader.
298 hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
299 if True, otherwise use ffmpeg/avconv if False, otherwise
300 use downloader suggested by extractor if None.
302 The following parameters are not used by YoutubeDL itself, they are used by
303 the downloader (see youtube_dl/downloader/common.py):
304 nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
305 noresizebuffer, retries, continuedl, noprogress, consoletitle,
306 xattr_set_filesize, external_downloader_args, hls_use_mpegts,
309 The following options are used by the post processors:
310 prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
311 otherwise prefer ffmpeg.
312 postprocessor_args: A list of additional command-line arguments for the
315 The following options are used by the Youtube extractor:
316 youtube_include_dash_manifest: If True (default), DASH manifests and related
317 data will be downloaded and processed by extractor.
318 You can reduce network I/O by disabling it if you don't
322 _NUMERIC_FIELDS = set((
323 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
324 'timestamp', 'upload_year', 'upload_month', 'upload_day',
325 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
326 'average_rating', 'comment_count', 'age_limit',
327 'start_time', 'end_time',
328 'chapter_number', 'season_number', 'episode_number',
329 'track_number', 'disc_number', 'release_year',
336 _download_retcode = None
337 _num_downloads = None
340 def __init__(self, params=None, auto_init=True):
341 """Create a FileDownloader object with the given options."""
345 self._ies_instances = {}
347 self._progress_hooks = []
348 self._download_retcode = 0
349 self._num_downloads = 0
350 self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
351 self._err_file = sys.stderr
354 'nocheckcertificate': False,
356 self.params.update(params)
357 self.cache = Cache(self)
359 def check_deprecated(param, option, suggestion):
360 if self.params.get(param) is not None:
362 '%s is deprecated. Use %s instead.' % (option, suggestion))
366 if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
367 if self.params.get('geo_verification_proxy') is None:
368 self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
370 check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
371 check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
372 check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
374 if params.get('bidi_workaround', False):
377 master, slave = pty.openpty()
378 width = compat_get_terminal_size().columns
382 width_args = ['-w', str(width)]
384 stdin=subprocess.PIPE,
386 stderr=self._err_file)
388 self._output_process = subprocess.Popen(
389 ['bidiv'] + width_args, **sp_kwargs
392 self._output_process = subprocess.Popen(
393 ['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
394 self._output_channel = os.fdopen(master, 'rb')
395 except OSError as ose:
396 if ose.errno == errno.ENOENT:
397 self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
401 if (sys.platform != 'win32' and
402 sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
403 not params.get('restrictfilenames', False)):
404 # Unicode filesystem API will throw errors (#1474, #13027)
406 'Assuming --restrict-filenames since file system encoding '
407 'cannot encode all characters. '
408 'Set the LC_ALL environment variable to fix this.')
409 self.params['restrictfilenames'] = True
411 if isinstance(params.get('outtmpl'), bytes):
413 'Parameter outtmpl is bytes, but should be a unicode string. '
414 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
419 self.print_debug_header()
420 self.add_default_info_extractors()
422 for pp_def_raw in self.params.get('postprocessors', []):
423 pp_class = get_postprocessor(pp_def_raw['key'])
424 pp_def = dict(pp_def_raw)
426 pp = pp_class(self, **compat_kwargs(pp_def))
427 self.add_post_processor(pp)
429 for ph in self.params.get('progress_hooks', []):
430 self.add_progress_hook(ph)
432 register_socks_protocols()
434 def warn_if_short_id(self, argv):
435 # short YouTube ID starting with dash?
437 i for i, a in enumerate(argv)
438 if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
442 [a for i, a in enumerate(argv) if i not in idxs] +
443 ['--'] + [argv[i] for i in idxs]
446 'Long argument string detected. '
447 'Use -- to separate parameters and URLs, like this:\n%s\n' %
448 args_to_str(correct_argv))
450 def add_info_extractor(self, ie):
451 """Add an InfoExtractor object to the end of the list."""
453 if not isinstance(ie, type):
454 self._ies_instances[ie.ie_key()] = ie
455 ie.set_downloader(self)
457 def get_info_extractor(self, ie_key):
459 Get an instance of an IE with name ie_key, it will try to get one from
460 the _ies list, if there's no instance it will create a new one and add
461 it to the extractor list.
463 ie = self._ies_instances.get(ie_key)
465 ie = get_info_extractor(ie_key)()
466 self.add_info_extractor(ie)
469 def add_default_info_extractors(self):
471 Add the InfoExtractors returned by gen_extractors to the end of the list
473 for ie in gen_extractor_classes():
474 self.add_info_extractor(ie)
476 def add_post_processor(self, pp):
477 """Add a PostProcessor object to the end of the chain."""
479 pp.set_downloader(self)
481 def add_progress_hook(self, ph):
482 """Add the progress hook (currently only for the file downloader)"""
483 self._progress_hooks.append(ph)
485 def _bidi_workaround(self, message):
486 if not hasattr(self, '_output_channel'):
489 assert hasattr(self, '_output_process')
490 assert isinstance(message, compat_str)
491 line_count = message.count('\n') + 1
492 self._output_process.stdin.write((message + '\n').encode('utf-8'))
493 self._output_process.stdin.flush()
494 res = ''.join(self._output_channel.readline().decode('utf-8')
495 for _ in range(line_count))
496 return res[:-len('\n')]
498 def to_screen(self, message, skip_eol=False):
499 """Print message to stdout if not in quiet mode."""
500 return self.to_stdout(message, skip_eol, check_quiet=True)
502 def _write_string(self, s, out=None):
503 write_string(s, out=out, encoding=self.params.get('encoding'))
505 def to_stdout(self, message, skip_eol=False, check_quiet=False):
506 """Print message to stdout if not in quiet mode."""
507 if self.params.get('logger'):
508 self.params['logger'].debug(message)
509 elif not check_quiet or not self.params.get('quiet', False):
510 message = self._bidi_workaround(message)
511 terminator = ['\n', ''][skip_eol]
512 output = message + terminator
514 self._write_string(output, self._screen_file)
516 def to_stderr(self, message):
517 """Print message to stderr."""
518 assert isinstance(message, compat_str)
519 if self.params.get('logger'):
520 self.params['logger'].error(message)
522 message = self._bidi_workaround(message)
523 output = message + '\n'
524 self._write_string(output, self._err_file)
526 def to_console_title(self, message):
527 if not self.params.get('consoletitle', False):
529 if compat_os_name == 'nt':
530 if ctypes.windll.kernel32.GetConsoleWindow():
531 # c_wchar_p() might not be necessary if `message` is
532 # already of type unicode()
533 ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
534 elif 'TERM' in os.environ:
535 self._write_string('\033]0;%s\007' % message, self._screen_file)
537 def save_console_title(self):
538 if not self.params.get('consoletitle', False):
540 if self.params.get('simulate', False):
542 if compat_os_name != 'nt' and 'TERM' in os.environ:
543 # Save the title on stack
544 self._write_string('\033[22;0t', self._screen_file)
546 def restore_console_title(self):
547 if not self.params.get('consoletitle', False):
549 if self.params.get('simulate', False):
551 if compat_os_name != 'nt' and 'TERM' in os.environ:
552 # Restore the title from stack
553 self._write_string('\033[23;0t', self._screen_file)
556 self.save_console_title()
559 def __exit__(self, *args):
560 self.restore_console_title()
562 if self.params.get('cookiefile') is not None:
563 self.cookiejar.save(ignore_discard=True, ignore_expires=True)
565 def trouble(self, message=None, tb=None):
566 """Determine action to take when a download problem appears.
568 Depending on if the downloader has been configured to ignore
569 download errors or not, this method may throw an exception or
570 not when errors are found, after printing the message.
572 tb, if given, is additional traceback information.
574 if message is not None:
575 self.to_stderr(message)
576 if self.params.get('verbose'):
578 if sys.exc_info()[0]: # if .trouble has been called from an except block
580 if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
581 tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
582 tb += encode_compat_str(traceback.format_exc())
584 tb_data = traceback.format_list(traceback.extract_stack())
585 tb = ''.join(tb_data)
587 if not self.params.get('ignoreerrors', False):
588 if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
589 exc_info = sys.exc_info()[1].exc_info
591 exc_info = sys.exc_info()
592 raise DownloadError(message, exc_info)
593 self._download_retcode = 1
595 def report_warning(self, message):
597 Print the message to stderr, it will be prefixed with 'WARNING:'
598 If stderr is a tty file the 'WARNING:' will be colored
600 if self.params.get('logger') is not None:
601 self.params['logger'].warning(message)
603 if self.params.get('no_warnings'):
605 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
606 _msg_header = '\033[0;33mWARNING:\033[0m'
608 _msg_header = 'WARNING:'
609 warning_message = '%s %s' % (_msg_header, message)
610 self.to_stderr(warning_message)
612 def report_error(self, message, tb=None):
614 Do the same as trouble, but prefixes the message with 'ERROR:', colored
615 in red if stderr is a tty file.
617 if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
618 _msg_header = '\033[0;31mERROR:\033[0m'
620 _msg_header = 'ERROR:'
621 error_message = '%s %s' % (_msg_header, message)
622 self.trouble(error_message, tb)
624 def report_file_already_downloaded(self, file_name):
625 """Report file has already been fully downloaded."""
627 self.to_screen('[download] %s has already been downloaded' % file_name)
628 except UnicodeEncodeError:
629 self.to_screen('[download] The file has already been downloaded')
631 def prepare_filename(self, info_dict):
632 """Generate the output filename."""
634 template_dict = dict(info_dict)
636 template_dict['epoch'] = int(time.time())
637 autonumber_size = self.params.get('autonumber_size')
638 if autonumber_size is None:
640 template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
641 if template_dict.get('resolution') is None:
642 if template_dict.get('width') and template_dict.get('height'):
643 template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
644 elif template_dict.get('height'):
645 template_dict['resolution'] = '%sp' % template_dict['height']
646 elif template_dict.get('width'):
647 template_dict['resolution'] = '%dx?' % template_dict['width']
649 sanitize = lambda k, v: sanitize_filename(
651 restricted=self.params.get('restrictfilenames'),
652 is_id=(k == 'id' or k.endswith('_id')))
653 template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
654 for k, v in template_dict.items()
655 if v is not None and not isinstance(v, (list, tuple, dict)))
656 template_dict = collections.defaultdict(lambda: 'NA', template_dict)
658 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
660 # For fields playlist_index and autonumber convert all occurrences
661 # of %(field)s to %(field)0Nd for backward compatibility
662 field_size_compat_map = {
663 'playlist_index': len(str(template_dict['n_entries'])),
664 'autonumber': autonumber_size,
666 FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
667 mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
670 FIELD_SIZE_COMPAT_RE,
671 r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
674 # Missing numeric fields used together with integer presentation types
675 # in format specification will break the argument substitution since
676 # string 'NA' is returned for missing fields. We will patch output
677 # template for missing fields to meet string presentation type.
678 for numeric_field in self._NUMERIC_FIELDS:
679 if numeric_field not in template_dict:
680 # As of [1] format syntax is:
681 # %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
682 # 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
686 \({0}\) # mapping key
687 (?:[#0\-+ ]+)? # conversion flags (optional)
688 (?:\d+)? # minimum field width (optional)
689 (?:\.\d+)? # precision (optional)
690 [hlL]? # length modifier (optional)
691 [diouxXeEfFgGcrs%] # conversion type
694 FORMAT_RE.format(numeric_field),
695 r'%({0})s'.format(numeric_field), outtmpl)
697 # expand_path translates '%%' into '%' and '$$' into '$'
698 # correspondingly that is not what we want since we need to keep
699 # '%%' intact for template dict substitution step. Working around
700 # with boundary-alike separator hack.
701 sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
702 outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
704 # outtmpl should be expand_path'ed before template dict substitution
705 # because meta fields may contain env variables we don't want to
706 # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
707 # title "Hello $PATH", we don't want `$PATH` to be expanded.
708 filename = expand_path(outtmpl).replace(sep, '') % template_dict
710 # Temporary fix for #4787
711 # 'Treat' all problem characters by passing filename through preferredencoding
712 # to workaround encoding issues with subprocess on python2 @ Windows
713 if sys.version_info < (3, 0) and sys.platform == 'win32':
714 filename = encodeFilename(filename, True).decode(preferredencoding())
715 return sanitize_path(filename)
716 except ValueError as err:
717 self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
720 def _match_entry(self, info_dict, incomplete):
721 """ Returns None iff the file should be downloaded """
723 video_title = info_dict.get('title', info_dict.get('id', 'video'))
724 if 'title' in info_dict:
725 # This can happen when we're just evaluating the playlist
726 title = info_dict['title']
727 matchtitle = self.params.get('matchtitle', False)
729 if not re.search(matchtitle, title, re.IGNORECASE):
730 return '"' + title + '" title did not match pattern "' + matchtitle + '"'
731 rejecttitle = self.params.get('rejecttitle', False)
733 if re.search(rejecttitle, title, re.IGNORECASE):
734 return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
735 date = info_dict.get('upload_date')
737 dateRange = self.params.get('daterange', DateRange())
738 if date not in dateRange:
739 return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
740 view_count = info_dict.get('view_count')
741 if view_count is not None:
742 min_views = self.params.get('min_views')
743 if min_views is not None and view_count < min_views:
744 return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
745 max_views = self.params.get('max_views')
746 if max_views is not None and view_count > max_views:
747 return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
748 if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
749 return 'Skipping "%s" because it is age restricted' % video_title
750 if self.in_download_archive(info_dict):
751 return '%s has already been recorded in archive' % video_title
754 match_filter = self.params.get('match_filter')
755 if match_filter is not None:
756 ret = match_filter(info_dict)
763 def add_extra_info(info_dict, extra_info):
764 '''Set the keys from extra_info in info dict if they are missing'''
765 for key, value in extra_info.items():
766 info_dict.setdefault(key, value)
768 def extract_info(self, url, download=True, ie_key=None, extra_info={},
769 process=True, force_generic_extractor=False):
771 Returns a list with a dictionary for each video we find.
772 If 'download', also downloads the videos.
773 extra_info is a dict containing the extra values to add to each result
776 if not ie_key and force_generic_extractor:
780 ies = [self.get_info_extractor(ie_key)]
785 if not ie.suitable(url):
788 ie = self.get_info_extractor(ie.ie_key())
790 self.report_warning('The program functionality for this site has been marked as broken, '
791 'and will probably not work.')
794 ie_result = ie.extract(url)
795 if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
797 if isinstance(ie_result, list):
798 # Backwards compatibility: old IE result format
800 '_type': 'compat_list',
801 'entries': ie_result,
803 self.add_default_extra_info(ie_result, ie, url)
805 return self.process_ie_result(ie_result, download, extra_info)
808 except GeoRestrictedError as e:
811 msg += '\nThis video is available in %s.' % ', '.join(
812 map(ISO3166Utils.short2full, e.countries))
813 msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
814 self.report_error(msg)
816 except ExtractorError as e: # An error we somewhat expected
817 self.report_error(compat_str(e), e.format_traceback())
819 except MaxDownloadsReached:
821 except Exception as e:
822 if self.params.get('ignoreerrors', False):
823 self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
828 self.report_error('no suitable InfoExtractor for URL %s' % url)
830 def add_default_extra_info(self, ie_result, ie, url):
831 self.add_extra_info(ie_result, {
832 'extractor': ie.IE_NAME,
834 'webpage_url_basename': url_basename(url),
835 'extractor_key': ie.ie_key(),
838 def process_ie_result(self, ie_result, download=True, extra_info={}):
840 Take the result of the ie(may be modified) and resolve all unresolved
841 references (URLs, playlist items).
843 It will also download the videos if 'download'.
844 Returns the resolved ie_result.
846 result_type = ie_result.get('_type', 'video')
848 if result_type in ('url', 'url_transparent'):
849 ie_result['url'] = sanitize_url(ie_result['url'])
850 extract_flat = self.params.get('extract_flat', False)
851 if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
852 extract_flat is True):
853 if self.params.get('forcejson', False):
854 self.to_stdout(json.dumps(ie_result))
857 if result_type == 'video':
858 self.add_extra_info(ie_result, extra_info)
859 return self.process_video_result(ie_result, download=download)
860 elif result_type == 'url':
861 # We have to add extra_info to the results because it may be
862 # contained in a playlist
863 return self.extract_info(ie_result['url'],
865 ie_key=ie_result.get('ie_key'),
866 extra_info=extra_info)
867 elif result_type == 'url_transparent':
868 # Use the information from the embedding page
869 info = self.extract_info(
870 ie_result['url'], ie_key=ie_result.get('ie_key'),
871 extra_info=extra_info, download=False, process=False)
873 # extract_info may return None when ignoreerrors is enabled and
874 # extraction failed with an error, don't crash and return early
879 force_properties = dict(
880 (k, v) for k, v in ie_result.items() if v is not None)
881 for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
882 if f in force_properties:
883 del force_properties[f]
884 new_result = info.copy()
885 new_result.update(force_properties)
887 # Extracted info may not be a video result (i.e.
888 # info.get('_type', 'video') != video) but rather an url or
889 # url_transparent. In such cases outer metadata (from ie_result)
890 # should be propagated to inner one (info). For this to happen
891 # _type of info should be overridden with url_transparent. This
892 # fixes issue from https://github.com/rg3/youtube-dl/pull/11163.
893 if new_result.get('_type') == 'url':
894 new_result['_type'] = 'url_transparent'
896 return self.process_ie_result(
897 new_result, download=download, extra_info=extra_info)
898 elif result_type in ('playlist', 'multi_video'):
899 # We process each entry in the playlist
900 playlist = ie_result.get('title') or ie_result.get('id')
901 self.to_screen('[download] Downloading playlist: %s' % playlist)
903 playlist_results = []
905 playliststart = self.params.get('playliststart', 1) - 1
906 playlistend = self.params.get('playlistend')
907 # For backwards compatibility, interpret -1 as whole list
908 if playlistend == -1:
911 playlistitems_str = self.params.get('playlist_items')
913 if playlistitems_str is not None:
914 def iter_playlistitems(format):
915 for string_segment in format.split(','):
916 if '-' in string_segment:
917 start, end = string_segment.split('-')
918 for item in range(int(start), int(end) + 1):
921 yield int(string_segment)
922 playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
924 ie_entries = ie_result['entries']
926 def make_playlistitems_entries(list_ie_entries):
927 num_entries = len(list_ie_entries)
929 list_ie_entries[i - 1] for i in playlistitems
930 if -num_entries <= i - 1 < num_entries]
932 def report_download(num_entries):
934 '[%s] playlist %s: Downloading %d videos' %
935 (ie_result['extractor'], playlist, num_entries))
937 if isinstance(ie_entries, list):
938 n_all_entries = len(ie_entries)
940 entries = make_playlistitems_entries(ie_entries)
942 entries = ie_entries[playliststart:playlistend]
943 n_entries = len(entries)
945 '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
946 (ie_result['extractor'], playlist, n_all_entries, n_entries))
947 elif isinstance(ie_entries, PagedList):
950 for item in playlistitems:
951 entries.extend(ie_entries.getslice(
955 entries = ie_entries.getslice(
956 playliststart, playlistend)
957 n_entries = len(entries)
958 report_download(n_entries)
961 entries = make_playlistitems_entries(list(itertools.islice(
962 ie_entries, 0, max(playlistitems))))
964 entries = list(itertools.islice(
965 ie_entries, playliststart, playlistend))
966 n_entries = len(entries)
967 report_download(n_entries)
969 if self.params.get('playlistreverse', False):
970 entries = entries[::-1]
972 if self.params.get('playlistrandom', False):
973 random.shuffle(entries)
975 x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
977 for i, entry in enumerate(entries, 1):
978 self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
979 # This __x_forwarded_for_ip thing is a bit ugly but requires
982 entry['__x_forwarded_for_ip'] = x_forwarded_for
984 'n_entries': n_entries,
985 'playlist': playlist,
986 'playlist_id': ie_result.get('id'),
987 'playlist_title': ie_result.get('title'),
988 'playlist_uploader': ie_result.get('uploader'),
989 'playlist_uploader_id': ie_result.get('uploader_id'),
990 'playlist_index': i + playliststart,
991 'extractor': ie_result['extractor'],
992 'webpage_url': ie_result['webpage_url'],
993 'webpage_url_basename': url_basename(ie_result['webpage_url']),
994 'extractor_key': ie_result['extractor_key'],
997 reason = self._match_entry(entry, incomplete=True)
998 if reason is not None:
999 self.to_screen('[download] ' + reason)
1002 entry_result = self.process_ie_result(entry,
1005 playlist_results.append(entry_result)
1006 ie_result['entries'] = playlist_results
1007 self.to_screen('[download] Finished downloading playlist: %s' % playlist)
1009 elif result_type == 'compat_list':
1010 self.report_warning(
1011 'Extractor %s returned a compat_list result. '
1012 'It needs to be updated.' % ie_result.get('extractor'))
1015 self.add_extra_info(
1018 'extractor': ie_result['extractor'],
1019 'webpage_url': ie_result['webpage_url'],
1020 'webpage_url_basename': url_basename(ie_result['webpage_url']),
1021 'extractor_key': ie_result['extractor_key'],
1025 ie_result['entries'] = [
1026 self.process_ie_result(_fixup(r), download, extra_info)
1027 for r in ie_result['entries']
1031 raise Exception('Invalid result type: %s' % result_type)
1033 def _build_format_filter(self, filter_spec):
1034 " Returns a function to filter the formats according to the filter_spec "
1044 operator_rex = re.compile(r'''(?x)\s*
1045 (?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)
1046 \s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
1047 (?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
1049 ''' % '|'.join(map(re.escape, OPERATORS.keys())))
1050 m = operator_rex.search(filter_spec)
1053 comparison_value = int(m.group('value'))
1055 comparison_value = parse_filesize(m.group('value'))
1056 if comparison_value is None:
1057 comparison_value = parse_filesize(m.group('value') + 'B')
1058 if comparison_value is None:
1060 'Invalid value %r in format specification %r' % (
1061 m.group('value'), filter_spec))
1062 op = OPERATORS[m.group('op')]
1067 '^=': lambda attr, value: attr.startswith(value),
1068 '$=': lambda attr, value: attr.endswith(value),
1069 '*=': lambda attr, value: value in attr,
1071 str_operator_rex = re.compile(r'''(?x)
1072 \s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
1073 \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
1074 \s*(?P<value>[a-zA-Z0-9._-]+)
1076 ''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
1077 m = str_operator_rex.search(filter_spec)
1079 comparison_value = m.group('value')
1080 str_op = STR_OPERATORS[m.group('op')]
1081 if m.group('negation'):
1082 op = lambda attr, value: not str_op(attr, value)
1087 raise ValueError('Invalid filter specification %r' % filter_spec)
1090 actual_value = f.get(m.group('key'))
1091 if actual_value is None:
1092 return m.group('none_inclusive')
1093 return op(actual_value, comparison_value)
1096 def _default_format_spec(self, info_dict, download=True):
1099 merger = FFmpegMergerPP(self)
1100 return merger.available and merger.can_merge()
1103 if self.params.get('simulate', False):
1107 if self.params.get('outtmpl', DEFAULT_OUTTMPL) == '-':
1109 if info_dict.get('is_live'):
1115 req_format_list = ['bestvideo+bestaudio', 'best']
1117 req_format_list.reverse()
1118 return '/'.join(req_format_list)
1120 def build_format_selector(self, format_spec):
1121 def syntax_error(note, start):
1123 'Invalid format specification: '
1124 '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
1125 return SyntaxError(message)
1127 PICKFIRST = 'PICKFIRST'
1131 FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
1133 def _parse_filter(tokens):
1135 for type, string, start, _, _ in tokens:
1136 if type == tokenize.OP and string == ']':
1137 return ''.join(filter_parts)
1139 filter_parts.append(string)
1141 def _remove_unused_ops(tokens):
1142 # Remove operators that we don't use and join them with the surrounding strings
1143 # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
1144 ALLOWED_OPS = ('/', '+', ',', '(', ')')
1145 last_string, last_start, last_end, last_line = None, None, None, None
1146 for type, string, start, end, line in tokens:
1147 if type == tokenize.OP and string == '[':
1149 yield tokenize.NAME, last_string, last_start, last_end, last_line
1151 yield type, string, start, end, line
1152 # everything inside brackets will be handled by _parse_filter
1153 for type, string, start, end, line in tokens:
1154 yield type, string, start, end, line
1155 if type == tokenize.OP and string == ']':
1157 elif type == tokenize.OP and string in ALLOWED_OPS:
1159 yield tokenize.NAME, last_string, last_start, last_end, last_line
1161 yield type, string, start, end, line
1162 elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
1164 last_string = string
1168 last_string += string
1170 yield tokenize.NAME, last_string, last_start, last_end, last_line
1172 def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
1174 current_selector = None
1175 for type, string, start, _, _ in tokens:
1176 # ENCODING is only defined in python 3.x
1177 if type == getattr(tokenize, 'ENCODING', None):
1179 elif type in [tokenize.NAME, tokenize.NUMBER]:
1180 current_selector = FormatSelector(SINGLE, string, [])
1181 elif type == tokenize.OP:
1183 if not inside_group:
1184 # ')' will be handled by the parentheses group
1185 tokens.restore_last_token()
1187 elif inside_merge and string in ['/', ',']:
1188 tokens.restore_last_token()
1190 elif inside_choice and string == ',':
1191 tokens.restore_last_token()
1194 if not current_selector:
1195 raise syntax_error('"," must follow a format selector', start)
1196 selectors.append(current_selector)
1197 current_selector = None
1199 if not current_selector:
1200 raise syntax_error('"/" must follow a format selector', start)
1201 first_choice = current_selector
1202 second_choice = _parse_format_selection(tokens, inside_choice=True)
1203 current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
1205 if not current_selector:
1206 current_selector = FormatSelector(SINGLE, 'best', [])
1207 format_filter = _parse_filter(tokens)
1208 current_selector.filters.append(format_filter)
1210 if current_selector:
1211 raise syntax_error('Unexpected "("', start)
1212 group = _parse_format_selection(tokens, inside_group=True)
1213 current_selector = FormatSelector(GROUP, group, [])
1215 video_selector = current_selector
1216 audio_selector = _parse_format_selection(tokens, inside_merge=True)
1217 if not video_selector or not audio_selector:
1218 raise syntax_error('"+" must be between two format selectors', start)
1219 current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
1221 raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
1222 elif type == tokenize.ENDMARKER:
1224 if current_selector:
1225 selectors.append(current_selector)
1228 def _build_selector_function(selector):
1229 if isinstance(selector, list):
1230 fs = [_build_selector_function(s) for s in selector]
1232 def selector_function(ctx):
1234 for format in f(ctx):
1236 return selector_function
1237 elif selector.type == GROUP:
1238 selector_function = _build_selector_function(selector.selector)
1239 elif selector.type == PICKFIRST:
1240 fs = [_build_selector_function(s) for s in selector.selector]
1242 def selector_function(ctx):
1244 picked_formats = list(f(ctx))
1246 return picked_formats
1248 elif selector.type == SINGLE:
1249 format_spec = selector.selector
1251 def selector_function(ctx):
1252 formats = list(ctx['formats'])
1255 if format_spec == 'all':
1258 elif format_spec in ['best', 'worst', None]:
1259 format_idx = 0 if format_spec == 'worst' else -1
1260 audiovideo_formats = [
1262 if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
1263 if audiovideo_formats:
1264 yield audiovideo_formats[format_idx]
1265 # for extractors with incomplete formats (audio only (soundcloud)
1266 # or video only (imgur)) we will fallback to best/worst
1267 # {video,audio}-only format
1268 elif ctx['incomplete_formats']:
1269 yield formats[format_idx]
1270 elif format_spec == 'bestaudio':
1273 if f.get('vcodec') == 'none']
1275 yield audio_formats[-1]
1276 elif format_spec == 'worstaudio':
1279 if f.get('vcodec') == 'none']
1281 yield audio_formats[0]
1282 elif format_spec == 'bestvideo':
1285 if f.get('acodec') == 'none']
1287 yield video_formats[-1]
1288 elif format_spec == 'worstvideo':
1291 if f.get('acodec') == 'none']
1293 yield video_formats[0]
1295 extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
1296 if format_spec in extensions:
1297 filter_f = lambda f: f['ext'] == format_spec
1299 filter_f = lambda f: f['format_id'] == format_spec
1300 matches = list(filter(filter_f, formats))
1303 elif selector.type == MERGE:
1304 def _merge(formats_info):
1305 format_1, format_2 = [f['format_id'] for f in formats_info]
1306 # The first format must contain the video and the
1308 if formats_info[0].get('vcodec') == 'none':
1309 self.report_error('The first format must '
1310 'contain the video, try using '
1311 '"-f %s+%s"' % (format_2, format_1))
1313 # Formats must be opposite (video+audio)
1314 if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
1316 'Both formats %s and %s are video-only, you must specify "-f video+audio"'
1317 % (format_1, format_2))
1320 formats_info[0]['ext']
1321 if self.params.get('merge_output_format') is None
1322 else self.params['merge_output_format'])
1324 'requested_formats': formats_info,
1325 'format': '%s+%s' % (formats_info[0].get('format'),
1326 formats_info[1].get('format')),
1327 'format_id': '%s+%s' % (formats_info[0].get('format_id'),
1328 formats_info[1].get('format_id')),
1329 'width': formats_info[0].get('width'),
1330 'height': formats_info[0].get('height'),
1331 'resolution': formats_info[0].get('resolution'),
1332 'fps': formats_info[0].get('fps'),
1333 'vcodec': formats_info[0].get('vcodec'),
1334 'vbr': formats_info[0].get('vbr'),
1335 'stretched_ratio': formats_info[0].get('stretched_ratio'),
1336 'acodec': formats_info[1].get('acodec'),
1337 'abr': formats_info[1].get('abr'),
1340 video_selector, audio_selector = map(_build_selector_function, selector.selector)
1342 def selector_function(ctx):
1343 for pair in itertools.product(
1344 video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
1347 filters = [self._build_format_filter(f) for f in selector.filters]
1349 def final_selector(ctx):
1350 ctx_copy = copy.deepcopy(ctx)
1351 for _filter in filters:
1352 ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
1353 return selector_function(ctx_copy)
1354 return final_selector
1356 stream = io.BytesIO(format_spec.encode('utf-8'))
1358 tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
1359 except tokenize.TokenError:
1360 raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
1362 class TokenIterator(object):
1363 def __init__(self, tokens):
1364 self.tokens = tokens
1371 if self.counter >= len(self.tokens):
1372 raise StopIteration()
1373 value = self.tokens[self.counter]
1379 def restore_last_token(self):
1382 parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
1383 return _build_selector_function(parsed_selector)
1385 def _calc_headers(self, info_dict):
1386 res = std_headers.copy()
1388 add_headers = info_dict.get('http_headers')
1390 res.update(add_headers)
1392 cookies = self._calc_cookies(info_dict)
1394 res['Cookie'] = cookies
1396 if 'X-Forwarded-For' not in res:
1397 x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
1398 if x_forwarded_for_ip:
1399 res['X-Forwarded-For'] = x_forwarded_for_ip
1403 def _calc_cookies(self, info_dict):
1404 pr = sanitized_Request(info_dict['url'])
1405 self.cookiejar.add_cookie_header(pr)
1406 return pr.get_header('Cookie')
1408 def process_video_result(self, info_dict, download=True):
1409 assert info_dict.get('_type', 'video') == 'video'
1411 if 'id' not in info_dict:
1412 raise ExtractorError('Missing "id" field in extractor result')
1413 if 'title' not in info_dict:
1414 raise ExtractorError('Missing "title" field in extractor result')
1416 def report_force_conversion(field, field_not, conversion):
1417 self.report_warning(
1418 '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
1419 % (field, field_not, conversion))
1421 def sanitize_string_field(info, string_field):
1422 field = info.get(string_field)
1423 if field is None or isinstance(field, compat_str):
1425 report_force_conversion(string_field, 'a string', 'string')
1426 info[string_field] = compat_str(field)
1428 def sanitize_numeric_fields(info):
1429 for numeric_field in self._NUMERIC_FIELDS:
1430 field = info.get(numeric_field)
1431 if field is None or isinstance(field, compat_numeric_types):
1433 report_force_conversion(numeric_field, 'numeric', 'int')
1434 info[numeric_field] = int_or_none(field)
1436 sanitize_string_field(info_dict, 'id')
1437 sanitize_numeric_fields(info_dict)
1439 if 'playlist' not in info_dict:
1440 # It isn't part of a playlist
1441 info_dict['playlist'] = None
1442 info_dict['playlist_index'] = None
1444 thumbnails = info_dict.get('thumbnails')
1445 if thumbnails is None:
1446 thumbnail = info_dict.get('thumbnail')
1448 info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
1450 thumbnails.sort(key=lambda t: (
1451 t.get('preference') if t.get('preference') is not None else -1,
1452 t.get('width') if t.get('width') is not None else -1,
1453 t.get('height') if t.get('height') is not None else -1,
1454 t.get('id') if t.get('id') is not None else '', t.get('url')))
1455 for i, t in enumerate(thumbnails):
1456 t['url'] = sanitize_url(t['url'])
1457 if t.get('width') and t.get('height'):
1458 t['resolution'] = '%dx%d' % (t['width'], t['height'])
1459 if t.get('id') is None:
1462 if self.params.get('list_thumbnails'):
1463 self.list_thumbnails(info_dict)
1466 thumbnail = info_dict.get('thumbnail')
1468 info_dict['thumbnail'] = sanitize_url(thumbnail)
1470 info_dict['thumbnail'] = thumbnails[-1]['url']
1472 if 'display_id' not in info_dict and 'id' in info_dict:
1473 info_dict['display_id'] = info_dict['id']
1475 if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
1476 # Working around out-of-range timestamp values (e.g. negative ones on Windows,
1477 # see http://bugs.python.org/issue1646728)
1479 upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
1480 info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
1481 except (ValueError, OverflowError, OSError):
1484 # Auto generate title fields corresponding to the *_number fields when missing
1485 # in order to always have clean titles. This is very common for TV series.
1486 for field in ('chapter', 'season', 'episode'):
1487 if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
1488 info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
1490 for cc_kind in ('subtitles', 'automatic_captions'):
1491 cc = info_dict.get(cc_kind)
1493 for _, subtitle in cc.items():
1494 for subtitle_format in subtitle:
1495 if subtitle_format.get('url'):
1496 subtitle_format['url'] = sanitize_url(subtitle_format['url'])
1497 if subtitle_format.get('ext') is None:
1498 subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
1500 automatic_captions = info_dict.get('automatic_captions')
1501 subtitles = info_dict.get('subtitles')
1503 if self.params.get('listsubtitles', False):
1504 if 'automatic_captions' in info_dict:
1505 self.list_subtitles(
1506 info_dict['id'], automatic_captions, 'automatic captions')
1507 self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
1510 info_dict['requested_subtitles'] = self.process_subtitles(
1511 info_dict['id'], subtitles, automatic_captions)
1513 # We now pick which formats have to be downloaded
1514 if info_dict.get('formats') is None:
1515 # There's only one format available
1516 formats = [info_dict]
1518 formats = info_dict['formats']
1521 raise ExtractorError('No video formats found!')
1523 def is_wellformed(f):
1526 self.report_warning(
1527 '"url" field is missing or empty - skipping format, '
1528 'there is an error in extractor')
1530 if isinstance(url, bytes):
1531 sanitize_string_field(f, 'url')
1534 # Filter out malformed formats for better extraction robustness
1535 formats = list(filter(is_wellformed, formats))
1539 # We check that all the formats have the format and format_id fields
1540 for i, format in enumerate(formats):
1541 sanitize_string_field(format, 'format_id')
1542 sanitize_numeric_fields(format)
1543 format['url'] = sanitize_url(format['url'])
1544 if not format.get('format_id'):
1545 format['format_id'] = compat_str(i)
1547 # Sanitize format_id from characters used in format selector expression
1548 format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
1549 format_id = format['format_id']
1550 if format_id not in formats_dict:
1551 formats_dict[format_id] = []
1552 formats_dict[format_id].append(format)
1554 # Make sure all formats have unique format_id
1555 for format_id, ambiguous_formats in formats_dict.items():
1556 if len(ambiguous_formats) > 1:
1557 for i, format in enumerate(ambiguous_formats):
1558 format['format_id'] = '%s-%d' % (format_id, i)
1560 for i, format in enumerate(formats):
1561 if format.get('format') is None:
1562 format['format'] = '{id} - {res}{note}'.format(
1563 id=format['format_id'],
1564 res=self.format_resolution(format),
1565 note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
1567 # Automatically determine file extension if missing
1568 if format.get('ext') is None:
1569 format['ext'] = determine_ext(format['url']).lower()
1570 # Automatically determine protocol if missing (useful for format
1571 # selection purposes)
1572 if format.get('protocol') is None:
1573 format['protocol'] = determine_protocol(format)
1574 # Add HTTP headers, so that external programs can use them from the
1576 full_format_info = info_dict.copy()
1577 full_format_info.update(format)
1578 format['http_headers'] = self._calc_headers(full_format_info)
1579 # Remove private housekeeping stuff
1580 if '__x_forwarded_for_ip' in info_dict:
1581 del info_dict['__x_forwarded_for_ip']
1583 # TODO Central sorting goes here
1585 if formats[0] is not info_dict:
1586 # only set the 'formats' fields if the original info_dict list them
1587 # otherwise we end up with a circular reference, the first (and unique)
1588 # element in the 'formats' field in info_dict is info_dict itself,
1589 # which can't be exported to json
1590 info_dict['formats'] = formats
1591 if self.params.get('listformats'):
1592 self.list_formats(info_dict)
1595 req_format = self.params.get('format')
1596 if req_format is None:
1597 req_format = self._default_format_spec(info_dict, download=download)
1598 if self.params.get('verbose'):
1599 self.to_stdout('[debug] Default format spec: %s' % req_format)
1601 format_selector = self.build_format_selector(req_format)
1603 # While in format selection we may need to have an access to the original
1604 # format set in order to calculate some metrics or do some processing.
1605 # For now we need to be able to guess whether original formats provided
1606 # by extractor are incomplete or not (i.e. whether extractor provides only
1607 # video-only or audio-only formats) for proper formats selection for
1608 # extractors with such incomplete formats (see
1609 # https://github.com/rg3/youtube-dl/pull/5556).
1610 # Since formats may be filtered during format selection and may not match
1611 # the original formats the results may be incorrect. Thus original formats
1612 # or pre-calculated metrics should be passed to format selection routines
1614 # We will pass a context object containing all necessary additional data
1615 # instead of just formats.
1616 # This fixes incorrect format selection issue (see
1617 # https://github.com/rg3/youtube-dl/issues/10083).
1618 incomplete_formats = (
1619 # All formats are video-only or
1620 all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
1621 # all formats are audio-only
1622 all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
1626 'incomplete_formats': incomplete_formats,
1629 formats_to_download = list(format_selector(ctx))
1630 if not formats_to_download:
1631 raise ExtractorError('requested format not available',
1635 if len(formats_to_download) > 1:
1636 self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
1637 for format in formats_to_download:
1638 new_info = dict(info_dict)
1639 new_info.update(format)
1640 self.process_info(new_info)
1641 # We update the info dict with the best quality format (backwards compatibility)
1642 info_dict.update(formats_to_download[-1])
1645 def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
1646 """Select the requested subtitles and their format"""
1648 if normal_subtitles and self.params.get('writesubtitles'):
1649 available_subs.update(normal_subtitles)
1650 if automatic_captions and self.params.get('writeautomaticsub'):
1651 for lang, cap_info in automatic_captions.items():
1652 if lang not in available_subs:
1653 available_subs[lang] = cap_info
1655 if (not self.params.get('writesubtitles') and not
1656 self.params.get('writeautomaticsub') or not
1660 if self.params.get('allsubtitles', False):
1661 requested_langs = available_subs.keys()
1663 if self.params.get('subtitleslangs', False):
1664 requested_langs = self.params.get('subtitleslangs')
1665 elif 'en' in available_subs:
1666 requested_langs = ['en']
1668 requested_langs = [list(available_subs.keys())[0]]
1670 formats_query = self.params.get('subtitlesformat', 'best')
1671 formats_preference = formats_query.split('/') if formats_query else []
1673 for lang in requested_langs:
1674 formats = available_subs.get(lang)
1676 self.report_warning('%s subtitles not available for %s' % (lang, video_id))
1678 for ext in formats_preference:
1682 matches = list(filter(lambda f: f['ext'] == ext, formats))
1688 self.report_warning(
1689 'No subtitle format found matching "%s" for language %s, '
1690 'using %s' % (formats_query, lang, f['ext']))
1694 def process_info(self, info_dict):
1695 """Process a single resolved IE result."""
1697 assert info_dict.get('_type', 'video') == 'video'
1699 max_downloads = self.params.get('max_downloads')
1700 if max_downloads is not None:
1701 if self._num_downloads >= int(max_downloads):
1702 raise MaxDownloadsReached()
1704 info_dict['fulltitle'] = info_dict['title']
1705 if len(info_dict['title']) > 200:
1706 info_dict['title'] = info_dict['title'][:197] + '...'
1708 if 'format' not in info_dict:
1709 info_dict['format'] = info_dict['ext']
1711 reason = self._match_entry(info_dict, incomplete=False)
1712 if reason is not None:
1713 self.to_screen('[download] ' + reason)
1716 self._num_downloads += 1
1718 info_dict['_filename'] = filename = self.prepare_filename(info_dict)
1721 if self.params.get('forcetitle', False):
1722 self.to_stdout(info_dict['fulltitle'])
1723 if self.params.get('forceid', False):
1724 self.to_stdout(info_dict['id'])
1725 if self.params.get('forceurl', False):
1726 if info_dict.get('requested_formats') is not None:
1727 for f in info_dict['requested_formats']:
1728 self.to_stdout(f['url'] + f.get('play_path', ''))
1730 # For RTMP URLs, also include the playpath
1731 self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
1732 if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
1733 self.to_stdout(info_dict['thumbnail'])
1734 if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
1735 self.to_stdout(info_dict['description'])
1736 if self.params.get('forcefilename', False) and filename is not None:
1737 self.to_stdout(filename)
1738 if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
1739 self.to_stdout(formatSeconds(info_dict['duration']))
1740 if self.params.get('forceformat', False):
1741 self.to_stdout(info_dict['format'])
1742 if self.params.get('forcejson', False):
1743 self.to_stdout(json.dumps(info_dict))
1745 # Do nothing else if in simulate mode
1746 if self.params.get('simulate', False):
1749 if filename is None:
1752 def ensure_dir_exists(path):
1754 dn = os.path.dirname(path)
1755 if dn and not os.path.exists(dn):
1758 except (OSError, IOError) as err:
1759 self.report_error('unable to create directory ' + error_to_compat_str(err))
1762 if not ensure_dir_exists(sanitize_path(encodeFilename(filename))):
1765 if self.params.get('writedescription', False):
1766 descfn = replace_extension(filename, 'description', info_dict.get('ext'))
1767 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
1768 self.to_screen('[info] Video description is already present')
1769 elif info_dict.get('description') is None:
1770 self.report_warning('There\'s no description to write.')
1773 self.to_screen('[info] Writing video description to: ' + descfn)
1774 with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
1775 descfile.write(info_dict['description'])
1776 except (OSError, IOError):
1777 self.report_error('Cannot write description file ' + descfn)
1780 if self.params.get('writeannotations', False):
1781 annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
1782 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
1783 self.to_screen('[info] Video annotations are already present')
1786 self.to_screen('[info] Writing video annotations to: ' + annofn)
1787 with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
1788 annofile.write(info_dict['annotations'])
1789 except (KeyError, TypeError):
1790 self.report_warning('There are no annotations to write.')
1791 except (OSError, IOError):
1792 self.report_error('Cannot write annotations file: ' + annofn)
1795 subtitles_are_requested = any([self.params.get('writesubtitles', False),
1796 self.params.get('writeautomaticsub')])
1798 if subtitles_are_requested and info_dict.get('requested_subtitles'):
1799 # subtitles download errors are already managed as troubles in relevant IE
1800 # that way it will silently go on when used with unsupporting IE
1801 subtitles = info_dict['requested_subtitles']
1802 ie = self.get_info_extractor(info_dict['extractor_key'])
1803 for sub_lang, sub_info in subtitles.items():
1804 sub_format = sub_info['ext']
1805 sub_filename = subtitles_filename(filename, sub_lang, sub_format)
1806 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
1807 self.to_screen('[info] Video subtitle %s.%s is already present' % (sub_lang, sub_format))
1809 self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
1810 if sub_info.get('data') is not None:
1812 # Use newline='' to prevent conversion of newline characters
1813 # See https://github.com/rg3/youtube-dl/issues/10268
1814 with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
1815 subfile.write(sub_info['data'])
1816 except (OSError, IOError):
1817 self.report_error('Cannot write subtitles file ' + sub_filename)
1821 sub_data = ie._request_webpage(
1822 sub_info['url'], info_dict['id'], note=False).read()
1823 with io.open(encodeFilename(sub_filename), 'wb') as subfile:
1824 subfile.write(sub_data)
1825 except (ExtractorError, IOError, OSError, ValueError) as err:
1826 self.report_warning('Unable to download subtitle for "%s": %s' %
1827 (sub_lang, error_to_compat_str(err)))
1830 if self.params.get('writeinfojson', False):
1831 infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
1832 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
1833 self.to_screen('[info] Video description metadata is already present')
1835 self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
1837 write_json_file(self.filter_requested_info(info_dict), infofn)
1838 except (OSError, IOError):
1839 self.report_error('Cannot write metadata to JSON file ' + infofn)
1842 self._write_thumbnails(info_dict, filename)
1844 if not self.params.get('skip_download', False):
1847 fd = get_suitable_downloader(info, self.params)(self, self.params)
1848 for ph in self._progress_hooks:
1849 fd.add_progress_hook(ph)
1850 if self.params.get('verbose'):
1851 self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
1852 return fd.download(name, info)
1854 if info_dict.get('requested_formats') is not None:
1857 merger = FFmpegMergerPP(self)
1858 if not merger.available:
1860 self.report_warning('You have requested multiple '
1861 'formats but ffmpeg or avconv are not installed.'
1862 ' The formats won\'t be merged.')
1864 postprocessors = [merger]
1866 def compatible_formats(formats):
1867 video, audio = formats
1869 video_ext, audio_ext = video.get('ext'), audio.get('ext')
1870 if video_ext and audio_ext:
1872 ('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
1875 for exts in COMPATIBLE_EXTS:
1876 if video_ext in exts and audio_ext in exts:
1878 # TODO: Check acodec/vcodec
1881 filename_real_ext = os.path.splitext(filename)[1][1:]
1883 os.path.splitext(filename)[0]
1884 if filename_real_ext == info_dict['ext']
1886 requested_formats = info_dict['requested_formats']
1887 if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
1888 info_dict['ext'] = 'mkv'
1889 self.report_warning(
1890 'Requested formats are incompatible for merge and will be merged into mkv.')
1891 # Ensure filename always has a correct extension for successful merge
1892 filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
1893 if os.path.exists(encodeFilename(filename)):
1895 '[download] %s has already been downloaded and '
1896 'merged' % filename)
1898 for f in requested_formats:
1899 new_info = dict(info_dict)
1901 fname = prepend_extension(
1902 self.prepare_filename(new_info),
1903 'f%s' % f['format_id'], new_info['ext'])
1904 if not ensure_dir_exists(fname):
1906 downloaded.append(fname)
1907 partial_success = dl(fname, new_info)
1908 success = success and partial_success
1909 info_dict['__postprocessors'] = postprocessors
1910 info_dict['__files_to_merge'] = downloaded
1912 # Just a single file
1913 success = dl(filename, info_dict)
1914 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
1915 self.report_error('unable to download video data: %s' % error_to_compat_str(err))
1917 except (OSError, IOError) as err:
1918 raise UnavailableVideoError(err)
1919 except (ContentTooShortError, ) as err:
1920 self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
1923 if success and filename != '-':
1925 fixup_policy = self.params.get('fixup')
1926 if fixup_policy is None:
1927 fixup_policy = 'detect_or_warn'
1929 INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
1931 stretched_ratio = info_dict.get('stretched_ratio')
1932 if stretched_ratio is not None and stretched_ratio != 1:
1933 if fixup_policy == 'warn':
1934 self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
1935 info_dict['id'], stretched_ratio))
1936 elif fixup_policy == 'detect_or_warn':
1937 stretched_pp = FFmpegFixupStretchedPP(self)
1938 if stretched_pp.available:
1939 info_dict.setdefault('__postprocessors', [])
1940 info_dict['__postprocessors'].append(stretched_pp)
1942 self.report_warning(
1943 '%s: Non-uniform pixel ratio (%s). %s'
1944 % (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
1946 assert fixup_policy in ('ignore', 'never')
1948 if (info_dict.get('requested_formats') is None and
1949 info_dict.get('container') == 'm4a_dash'):
1950 if fixup_policy == 'warn':
1951 self.report_warning(
1952 '%s: writing DASH m4a. '
1953 'Only some players support this container.'
1955 elif fixup_policy == 'detect_or_warn':
1956 fixup_pp = FFmpegFixupM4aPP(self)
1957 if fixup_pp.available:
1958 info_dict.setdefault('__postprocessors', [])
1959 info_dict['__postprocessors'].append(fixup_pp)
1961 self.report_warning(
1962 '%s: writing DASH m4a. '
1963 'Only some players support this container. %s'
1964 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
1966 assert fixup_policy in ('ignore', 'never')
1968 if (info_dict.get('protocol') == 'm3u8_native' or
1969 info_dict.get('protocol') == 'm3u8' and
1970 self.params.get('hls_prefer_native')):
1971 if fixup_policy == 'warn':
1972 self.report_warning('%s: malformed AAC bitstream detected.' % (
1974 elif fixup_policy == 'detect_or_warn':
1975 fixup_pp = FFmpegFixupM3u8PP(self)
1976 if fixup_pp.available:
1977 info_dict.setdefault('__postprocessors', [])
1978 info_dict['__postprocessors'].append(fixup_pp)
1980 self.report_warning(
1981 '%s: malformed AAC bitstream detected. %s'
1982 % (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
1984 assert fixup_policy in ('ignore', 'never')
1987 self.post_process(filename, info_dict)
1988 except (PostProcessingError) as err:
1989 self.report_error('postprocessing: %s' % str(err))
1991 self.record_download_archive(info_dict)
1993 def download(self, url_list):
1994 """Download a given list of URLs."""
1995 outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
1996 if (len(url_list) > 1 and
1998 '%' not in outtmpl and
1999 self.params.get('max_downloads') != 1):
2000 raise SameFileError(outtmpl)
2002 for url in url_list:
2004 # It also downloads the videos
2005 res = self.extract_info(
2006 url, force_generic_extractor=self.params.get('force_generic_extractor', False))
2007 except UnavailableVideoError:
2008 self.report_error('unable to download video')
2009 except MaxDownloadsReached:
2010 self.to_screen('[info] Maximum number of downloaded files reached.')
2013 if self.params.get('dump_single_json', False):
2014 self.to_stdout(json.dumps(res))
2016 return self._download_retcode
2018 def download_with_info_file(self, info_filename):
2019 with contextlib.closing(fileinput.FileInput(
2020 [info_filename], mode='r',
2021 openhook=fileinput.hook_encoded('utf-8'))) as f:
2022 # FileInput doesn't have a read method, we can't call json.load
2023 info = self.filter_requested_info(json.loads('\n'.join(f)))
2025 self.process_ie_result(info, download=True)
2026 except DownloadError:
2027 webpage_url = info.get('webpage_url')
2028 if webpage_url is not None:
2029 self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
2030 return self.download([webpage_url])
2033 return self._download_retcode
2036 def filter_requested_info(info_dict):
2038 (k, v) for k, v in info_dict.items()
2039 if k not in ['requested_formats', 'requested_subtitles'])
2041 def post_process(self, filename, ie_info):
2042 """Run all the postprocessors on the given file."""
2043 info = dict(ie_info)
2044 info['filepath'] = filename
2046 if ie_info.get('__postprocessors') is not None:
2047 pps_chain.extend(ie_info['__postprocessors'])
2048 pps_chain.extend(self._pps)
2049 for pp in pps_chain:
2050 files_to_delete = []
2052 files_to_delete, info = pp.run(info)
2053 except PostProcessingError as e:
2054 self.report_error(e.msg)
2055 if files_to_delete and not self.params.get('keepvideo', False):
2056 for old_filename in files_to_delete:
2057 self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
2059 os.remove(encodeFilename(old_filename))
2060 except (IOError, OSError):
2061 self.report_warning('Unable to remove downloaded original file')
2063 def _make_archive_id(self, info_dict):
2064 video_id = info_dict.get('id')
2067 # Future-proof against any change in case
2068 # and backwards compatibility with prior versions
2069 extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
2070 if extractor is None:
2071 url = str_or_none(info_dict.get('url'))
2074 # Try to find matching extractor for the URL and take its ie_key
2075 for ie in self._ies:
2076 if ie.suitable(url):
2077 extractor = ie.ie_key()
2081 return extractor.lower() + ' ' + video_id
2083 def in_download_archive(self, info_dict):
2084 fn = self.params.get('download_archive')
2088 vid_id = self._make_archive_id(info_dict)
2090 return False # Incomplete video information
2093 with locked_file(fn, 'r', encoding='utf-8') as archive_file:
2094 for line in archive_file:
2095 if line.strip() == vid_id:
2097 except IOError as ioe:
2098 if ioe.errno != errno.ENOENT:
2102 def record_download_archive(self, info_dict):
2103 fn = self.params.get('download_archive')
2106 vid_id = self._make_archive_id(info_dict)
2108 with locked_file(fn, 'a', encoding='utf-8') as archive_file:
2109 archive_file.write(vid_id + '\n')
2112 def format_resolution(format, default='unknown'):
2113 if format.get('vcodec') == 'none':
2115 if format.get('resolution') is not None:
2116 return format['resolution']
2117 if format.get('height') is not None:
2118 if format.get('width') is not None:
2119 res = '%sx%s' % (format['width'], format['height'])
2121 res = '%sp' % format['height']
2122 elif format.get('width') is not None:
2123 res = '%dx?' % format['width']
2128 def _format_note(self, fdict):
2130 if fdict.get('ext') in ['f4f', 'f4m']:
2131 res += '(unsupported) '
2132 if fdict.get('language'):
2135 res += '[%s] ' % fdict['language']
2136 if fdict.get('format_note') is not None:
2137 res += fdict['format_note'] + ' '
2138 if fdict.get('tbr') is not None:
2139 res += '%4dk ' % fdict['tbr']
2140 if fdict.get('container') is not None:
2143 res += '%s container' % fdict['container']
2144 if (fdict.get('vcodec') is not None and
2145 fdict.get('vcodec') != 'none'):
2148 res += fdict['vcodec']
2149 if fdict.get('vbr') is not None:
2151 elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
2153 if fdict.get('vbr') is not None:
2154 res += '%4dk' % fdict['vbr']
2155 if fdict.get('fps') is not None:
2158 res += '%sfps' % fdict['fps']
2159 if fdict.get('acodec') is not None:
2162 if fdict['acodec'] == 'none':
2165 res += '%-5s' % fdict['acodec']
2166 elif fdict.get('abr') is not None:
2170 if fdict.get('abr') is not None:
2171 res += '@%3dk' % fdict['abr']
2172 if fdict.get('asr') is not None:
2173 res += ' (%5dHz)' % fdict['asr']
2174 if fdict.get('filesize') is not None:
2177 res += format_bytes(fdict['filesize'])
2178 elif fdict.get('filesize_approx') is not None:
2181 res += '~' + format_bytes(fdict['filesize_approx'])
2184 def list_formats(self, info_dict):
2185 formats = info_dict.get('formats', [info_dict])
2187 [f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
2189 if f.get('preference') is None or f['preference'] >= -1000]
2190 if len(formats) > 1:
2191 table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
2193 header_line = ['format code', 'extension', 'resolution', 'note']
2195 '[info] Available formats for %s:\n%s' %
2196 (info_dict['id'], render_table(header_line, table)))
2198 def list_thumbnails(self, info_dict):
2199 thumbnails = info_dict.get('thumbnails')
2201 self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
2205 '[info] Thumbnails for %s:' % info_dict['id'])
2206 self.to_screen(render_table(
2207 ['ID', 'width', 'height', 'URL'],
2208 [[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
2210 def list_subtitles(self, video_id, subtitles, name='subtitles'):
2212 self.to_screen('%s has no %s' % (video_id, name))
2215 'Available %s for %s:' % (name, video_id))
2216 self.to_screen(render_table(
2217 ['Language', 'formats'],
2218 [[lang, ', '.join(f['ext'] for f in reversed(formats))]
2219 for lang, formats in subtitles.items()]))
2221 def urlopen(self, req):
2222 """ Start an HTTP download """
2223 if isinstance(req, compat_basestring):
2224 req = sanitized_Request(req)
2225 return self._opener.open(req, timeout=self._socket_timeout)
2227 def print_debug_header(self):
2228 if not self.params.get('verbose'):
2231 if type('') is not compat_str:
2232 # Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
2233 self.report_warning(
2234 'Your Python is broken! Update to a newer and supported version')
2236 stdout_encoding = getattr(
2237 sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
2239 '[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
2240 locale.getpreferredencoding(),
2241 sys.getfilesystemencoding(),
2243 self.get_encoding()))
2244 write_string(encoding_str, encoding=None)
2246 self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
2248 self._write_string('[debug] Lazy loading extractors enabled' + '\n')
2250 sp = subprocess.Popen(
2251 ['git', 'rev-parse', '--short', 'HEAD'],
2252 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
2253 cwd=os.path.dirname(os.path.abspath(__file__)))
2254 out, err = sp.communicate()
2255 out = out.decode().strip()
2256 if re.match('[0-9a-f]+', out):
2257 self._write_string('[debug] Git HEAD: ' + out + '\n')
2264 def python_implementation():
2265 impl_name = platform.python_implementation()
2266 if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
2267 return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
2270 self._write_string('[debug] Python version %s (%s) - %s\n' % (
2271 platform.python_version(), python_implementation(),
2274 exe_versions = FFmpegPostProcessor.get_versions(self)
2275 exe_versions['rtmpdump'] = rtmpdump_version()
2276 exe_versions['phantomjs'] = PhantomJSwrapper._version()
2277 exe_str = ', '.join(
2279 for exe, v in sorted(exe_versions.items())
2284 self._write_string('[debug] exe versions: %s\n' % exe_str)
2287 for handler in self._opener.handlers:
2288 if hasattr(handler, 'proxies'):
2289 proxy_map.update(handler.proxies)
2290 self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
2292 if self.params.get('call_home', False):
2293 ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
2294 self._write_string('[debug] Public IP address: %s\n' % ipaddr)
2295 latest_version = self.urlopen(
2296 'https://yt-dl.org/latest/version').read().decode('utf-8')
2297 if version_tuple(latest_version) > version_tuple(__version__):
2298 self.report_warning(
2299 'You are using an outdated version (newest version: %s)! '
2300 'See https://yt-dl.org/update if you need help updating.' %
2303 def _setup_opener(self):
2304 timeout_val = self.params.get('socket_timeout')
2305 self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
2307 opts_cookiefile = self.params.get('cookiefile')
2308 opts_proxy = self.params.get('proxy')
2310 if opts_cookiefile is None:
2311 self.cookiejar = compat_cookiejar.CookieJar()
2313 opts_cookiefile = expand_path(opts_cookiefile)
2314 self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
2315 if os.access(opts_cookiefile, os.R_OK):
2316 self.cookiejar.load(ignore_discard=True, ignore_expires=True)
2318 cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
2319 if opts_proxy is not None:
2320 if opts_proxy == '':
2323 proxies = {'http': opts_proxy, 'https': opts_proxy}
2325 proxies = compat_urllib_request.getproxies()
2326 # Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
2327 if 'http' in proxies and 'https' not in proxies:
2328 proxies['https'] = proxies['http']
2329 proxy_handler = PerRequestProxyHandler(proxies)
2331 debuglevel = 1 if self.params.get('debug_printtraffic') else 0
2332 https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
2333 ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
2334 data_handler = compat_urllib_request_DataHandler()
2336 # When passing our own FileHandler instance, build_opener won't add the
2337 # default FileHandler and allows us to disable the file protocol, which
2338 # can be used for malicious purposes (see
2339 # https://github.com/rg3/youtube-dl/issues/8227)
2340 file_handler = compat_urllib_request.FileHandler()
2342 def file_open(*args, **kwargs):
2343 raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
2344 file_handler.file_open = file_open
2346 opener = compat_urllib_request.build_opener(
2347 proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler)
2349 # Delete the default user-agent header, which would otherwise apply in
2350 # cases where our custom HTTP handler doesn't come into play
2351 # (See https://github.com/rg3/youtube-dl/issues/1309 for details)
2352 opener.addheaders = []
2353 self._opener = opener
2355 def encode(self, s):
2356 if isinstance(s, bytes):
2357 return s # Already encoded
2360 return s.encode(self.get_encoding())
2361 except UnicodeEncodeError as err:
2362 err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
2365 def get_encoding(self):
2366 encoding = self.params.get('encoding')
2367 if encoding is None:
2368 encoding = preferredencoding()
2371 def _write_thumbnails(self, info_dict, filename):
2372 if self.params.get('writethumbnail', False):
2373 thumbnails = info_dict.get('thumbnails')
2375 thumbnails = [thumbnails[-1]]
2376 elif self.params.get('write_all_thumbnails', False):
2377 thumbnails = info_dict.get('thumbnails')
2382 # No thumbnails present, so return immediately
2385 for t in thumbnails:
2386 thumb_ext = determine_ext(t['url'], 'jpg')
2387 suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
2388 thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
2389 t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
2391 if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
2392 self.to_screen('[%s] %s: Thumbnail %sis already present' %
2393 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2395 self.to_screen('[%s] %s: Downloading thumbnail %s...' %
2396 (info_dict['extractor'], info_dict['id'], thumb_display_id))
2398 uf = self.urlopen(t['url'])
2399 with open(encodeFilename(thumb_filename), 'wb') as thumbf:
2400 shutil.copyfileobj(uf, thumbf)
2401 self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
2402 (info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
2403 except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
2404 self.report_warning('Unable to download thumbnail "%s": %s' %
2405 (t['url'], error_to_compat_str(err)))