Compare commits
44 Commits
5e9fe86cd6
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
| d55bc3ae97 | |||
| 5c808c579a | |||
| f58bc53c24 | |||
| 21120cd61e | |||
| c74f97aca7 | |||
| 5e9354ba58 | |||
| 6a3bfbeb13 | |||
| ad06ea50ad | |||
| 9932f34782 | |||
| 76a4b2e0e2 | |||
| 0942af249b | |||
| 28fefafa6f | |||
| d96ace1504 | |||
| e0723e7b9e | |||
| d68d9ce4f9 | |||
| 3a2e8eeb08 | |||
| 35456f2bca | |||
| 9d3a5b9f3b | |||
| ce00970171 | |||
| 2f954f4c79 | |||
| a362039e73 | |||
| 02c31719d1 | |||
| d65ea8dec8 | |||
| fec0d26ab7 | |||
| a47bd23e78 | |||
| 44ef6ea2bb | |||
| be7be00f78 | |||
| 8e5ae4824c | |||
| 37e3265be5 | |||
| 4cafb7ff9f | |||
| 9336df2afa | |||
| d936b17429 | |||
| e6739c3087 | |||
| 2de4de6b22 | |||
| 39959dc947 | |||
| ce0f45e168 | |||
| 8862a80eea | |||
| 6fe4db7c63 | |||
| 153cc450d9 | |||
| 457bf09b52 | |||
| 43f56b283c | |||
| 39aa0eeea4 | |||
| 8046fec794 | |||
| fd282a6595 |
2
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
cookies.txt
|
||||
|
||||
docs/
|
||||
.idea/
|
||||
*.zip
|
||||
|
||||
11
README.md
@@ -2,22 +2,20 @@
|
||||
SolarFM is a Gtk+ Python file manager.
|
||||
|
||||
# Notes
|
||||
<b>Still Work in progress! Use at own risk!</b>
|
||||
|
||||
Additionally, if not building a .deb then just move the contents of user_config to their respective folders.
|
||||
If not building a .deb then just move the contents of user_config to their respective folders.
|
||||
Copy the share/solarfm folder to your user .config/ directory too.
|
||||
|
||||
`pyrightconfig.json`
|
||||
<p>The pyrightconfig file needs to stay on same level as the .git folders in order to have settings detected when using pyright with lsp functionality.</p>
|
||||
<p>The pyrightconfig file needs to stay on same level as the .git folders in order to have settings detected when using pyright with lsp functionality. "pyrightconfig.json" can prompt IDEs such as Zed on settings to use and where imports are located- look at venvPath and venv. "venvPath" is parent path of "venv" where "venv" is just the name of the folder under the parent path that is the python created venv.
|
||||
|
||||
<h6>Install Setup</h6>
|
||||
```
|
||||
sudo apt-get install python3.8 python3-setproctitle python3-gi wget ffmpegthumbnailer steamcmd
|
||||
sudo apt-get install xclip python3.8 python3-setproctitle python3-gi wget ffmpegthumbnailer steamcmd
|
||||
```
|
||||
|
||||
# Known Issues
|
||||
<ul>
|
||||
<li>There's a memory leak. Still analyzing where exactly.</li>
|
||||
<li>There is a memory leak that has been slowed down but can get to 2GB over a long enough time period OR active accessing image based dirs.</li>
|
||||
<li>Doing Ctrl+D when in Terminator (maybe other terminals too) somehow propagates the signal to SolarFM too.
|
||||
A selected file in the active quad-pane will move to trash since it is the default key-binding for that action.</li>
|
||||
</ul>
|
||||
@@ -25,7 +23,6 @@ A selected file in the active quad-pane will move to trash since it is the defau
|
||||
# TODO
|
||||
<ul>
|
||||
<li>Add simpleish preview plugin for various file types.</li>
|
||||
<li>Add simpleish bulk-renamer.</li>
|
||||
</ul>
|
||||
|
||||
# Images
|
||||
|
||||
BIN
images/pic1.png
|
Before Width: | Height: | Size: 504 KiB After Width: | Height: | Size: 1.0 MiB |
BIN
images/pic2.png
|
Before Width: | Height: | Size: 316 KiB After Width: | Height: | Size: 1.1 MiB |
BIN
images/pic3.png
|
Before Width: | Height: | Size: 307 KiB After Width: | Height: | Size: 1.2 MiB |
BIN
images/pic4.png
|
Before Width: | Height: | Size: 464 KiB After Width: | Height: | Size: 1.3 MiB |
@@ -14,6 +14,7 @@ class Manifest:
|
||||
'ui_target': "plugin_control_list",
|
||||
'pass_fm_events': "true"
|
||||
}
|
||||
pre_launch: bool = False
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Archiver",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "context_menu_plugins",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Disk Usage",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "context_menu_plugins",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Favorites",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "main_menu_bttn_box_bar",
|
||||
"pass_fm_events": "true",
|
||||
"pass_fm_events": true,
|
||||
"pass_ui_objects": ["path_entry"],
|
||||
"bind_keys": ["Favorites||show_favorites_menu:<Control>f"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Properties",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "context_menu",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -122,7 +122,6 @@ class Plugin(PluginBase):
|
||||
uri = state.uris[0]
|
||||
path = state.tab.get_current_directory()
|
||||
|
||||
|
||||
properties = self._set_ui_data(uri, path)
|
||||
response = self._properties_dialog.run()
|
||||
if response in [Gtk.ResponseType.CANCEL, Gtk.ResponseType.DELETE_EVENT]:
|
||||
@@ -168,13 +167,13 @@ class Plugin(PluginBase):
|
||||
|
||||
def _set_ui_data(self, uri, path):
|
||||
properties = Properties()
|
||||
file_info = Gio.File.new_for_path(uri).query_info(attributes="standard::*,owner::*,time::access,time::changed",
|
||||
flags=Gio.FileQueryInfoFlags.NONE,
|
||||
cancellable=None)
|
||||
file_info = Gio.File.new_for_path(uri).query_info(attributes = "standard::*,owner::*,time::access,time::changed",
|
||||
flags = Gio.FileQueryInfoFlags.NONE,
|
||||
cancellable = None)
|
||||
|
||||
is_symlink = file_info.get_attribute_as_string("standard::is-symlink")
|
||||
properties.file_uri = uri
|
||||
properties.file_target = file_info.get_attribute_as_string("standard::symlink-target") if is_symlink else ""
|
||||
properties.file_target = file_info.get_attribute_as_string("standard::symlink-target") if is_symlink in [True, "TRUE"] else ""
|
||||
properties.file_name = file_info.get_display_name()
|
||||
properties.file_location = path
|
||||
properties.mime_type = file_info.get_content_type()
|
||||
@@ -186,7 +185,7 @@ class Plugin(PluginBase):
|
||||
|
||||
# NOTE: Read = 4, Write = 2, Exec = 1
|
||||
command = ["stat", "-c", "%a", uri]
|
||||
with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:
|
||||
with subprocess.Popen(command, stdout = subprocess.PIPE) as proc:
|
||||
properties.chmod_stat = list(proc.stdout.read().decode("UTF-8").strip())
|
||||
owner = self._chmod_map[f"{properties.chmod_stat[0]}"]
|
||||
group = self._chmod_map[f"{properties.chmod_stat[1]}"]
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Git Clone",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "plugin_control_list",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Movie/TV Info",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "context_menu_plugins",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
3
plugins/movie_tv_info/tmdbscraper/lib/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Pligin Module
|
||||
"""
|
||||
@@ -0,0 +1,17 @@
|
||||
|
||||
def get_imdb_id(uniqueids):
|
||||
imdb_id = uniqueids.get('imdb')
|
||||
if not imdb_id or not imdb_id.startswith('tt'):
|
||||
return None
|
||||
return imdb_id
|
||||
|
||||
# example format for scraper results
|
||||
_ScraperResults = {
|
||||
'info',
|
||||
'ratings',
|
||||
'uniqueids',
|
||||
'cast',
|
||||
'available_art',
|
||||
'error',
|
||||
'warning' # not handled
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
# coding: utf-8
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""Functions to interact with various web site APIs."""
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import json
|
||||
# from pprint import pformat
|
||||
try: #PY2 / PY3
|
||||
from urllib2 import Request, urlopen
|
||||
from urllib2 import URLError
|
||||
from urllib import urlencode
|
||||
except ImportError:
|
||||
from urllib.request import Request, urlopen
|
||||
from urllib.error import URLError
|
||||
from urllib.parse import urlencode
|
||||
try:
|
||||
from typing import Text, Optional, Union, List, Dict, Any # pylint: disable=unused-import
|
||||
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
HEADERS = {}
|
||||
|
||||
|
||||
def set_headers(headers):
|
||||
HEADERS.update(headers)
|
||||
|
||||
|
||||
def load_info(url, params=None, default=None, resp_type = 'json'):
|
||||
# type: (Text, Optional[Dict[Text, Union[Text, List[Text]]]]) -> Union[dict, list]
|
||||
"""
|
||||
Load info from external api
|
||||
|
||||
:param url: API endpoint URL
|
||||
:param params: URL query params
|
||||
:default: object to return if there is an error
|
||||
:resp_type: what to return to the calling function
|
||||
:return: API response or default on error
|
||||
"""
|
||||
theerror = ''
|
||||
if params:
|
||||
url = url + '?' + urlencode(params)
|
||||
req = Request(url, headers=HEADERS)
|
||||
try:
|
||||
response = urlopen(req)
|
||||
except URLError as e:
|
||||
if hasattr(e, 'reason'):
|
||||
theerror = {'error': 'failed to reach the remote site\nReason: {}'.format(e.reason)}
|
||||
elif hasattr(e, 'code'):
|
||||
theerror = {'error': 'remote site unable to fulfill the request\nError code: {}'.format(e.code)}
|
||||
if default is not None:
|
||||
return default
|
||||
else:
|
||||
return theerror
|
||||
if resp_type.lower() == 'json':
|
||||
resp = json.loads(response.read().decode('utf-8'))
|
||||
else:
|
||||
resp = response.read().decode('utf-8')
|
||||
return resp
|
||||
@@ -0,0 +1,87 @@
|
||||
from . import api_utils
|
||||
try:
|
||||
from urllib import quote
|
||||
except ImportError: # py2 / py3
|
||||
from urllib.parse import quote
|
||||
|
||||
API_KEY = '384afe262ee0962545a752ff340e3ce4'
|
||||
API_URL = 'https://webservice.fanart.tv/v3/movies/{}'
|
||||
|
||||
ARTMAP = {
|
||||
'movielogo': 'clearlogo',
|
||||
'hdmovielogo': 'clearlogo',
|
||||
'hdmovieclearart': 'clearart',
|
||||
'movieart': 'clearart',
|
||||
'moviedisc': 'discart',
|
||||
'moviebanner': 'banner',
|
||||
'moviethumb': 'landscape',
|
||||
'moviebackground': 'fanart',
|
||||
'movieposter': 'poster'
|
||||
}
|
||||
|
||||
def get_details(uniqueids, clientkey, language, set_tmdbid):
|
||||
media_id = _get_mediaid(uniqueids)
|
||||
if not media_id:
|
||||
return {}
|
||||
|
||||
movie_data = _get_data(media_id, clientkey)
|
||||
movieset_data = _get_data(set_tmdbid, clientkey)
|
||||
if not movie_data and not movieset_data:
|
||||
return {}
|
||||
|
||||
movie_art = {}
|
||||
movieset_art = {}
|
||||
if movie_data:
|
||||
movie_art = _parse_data(movie_data, language)
|
||||
if movieset_data:
|
||||
movieset_art = _parse_data(movieset_data, language)
|
||||
movieset_art = {'set.' + key: value for key, value in movieset_art.items()}
|
||||
|
||||
available_art = movie_art
|
||||
available_art.update(movieset_art)
|
||||
|
||||
return {'available_art': available_art}
|
||||
|
||||
def _get_mediaid(uniqueids):
|
||||
for source in ('tmdb', 'imdb', 'unknown'):
|
||||
if source in uniqueids:
|
||||
return uniqueids[source]
|
||||
|
||||
def _get_data(media_id, clientkey):
|
||||
headers = {'api-key': API_KEY}
|
||||
if clientkey:
|
||||
headers['client-key'] = clientkey
|
||||
api_utils.set_headers(headers)
|
||||
fanarttv_url = API_URL.format(media_id)
|
||||
return api_utils.load_info(fanarttv_url, default={})
|
||||
|
||||
def _parse_data(data, language):
|
||||
result = {}
|
||||
for arttype, artlist in data.items():
|
||||
if arttype not in ARTMAP:
|
||||
continue
|
||||
for image in artlist:
|
||||
image_lang = _get_imagelanguage(arttype, image)
|
||||
if image_lang and image_lang != language:
|
||||
continue
|
||||
|
||||
generaltype = ARTMAP[arttype]
|
||||
if generaltype == 'poster' and not image_lang:
|
||||
generaltype = 'keyart'
|
||||
if artlist and generaltype not in result:
|
||||
result[generaltype] = []
|
||||
|
||||
url = quote(image['url'], safe="%/:=&?~#+!$,;'@()*[]")
|
||||
resultimage = {'url': url, 'preview': url.replace('.fanart.tv/fanart/', '.fanart.tv/preview/')}
|
||||
result[generaltype].append(resultimage)
|
||||
|
||||
return result
|
||||
|
||||
def _get_imagelanguage(arttype, image):
|
||||
if 'lang' not in image or arttype == 'moviebackground':
|
||||
return None
|
||||
if arttype in ('movielogo', 'hdmovielogo', 'hdmovieclearart', 'movieart', 'moviebanner',
|
||||
'moviethumb', 'moviedisc'):
|
||||
return image['lang'] if image['lang'] not in ('', '00') else 'en'
|
||||
# movieposter may or may not have a title and thus need a language
|
||||
return image['lang'] if image['lang'] not in ('', '00') else None
|
||||
@@ -0,0 +1,72 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
# IMDb ratings based on code in metadata.themoviedb.org.python by Team Kodi
|
||||
# pylint: disable=missing-docstring
|
||||
|
||||
import re
|
||||
from . import api_utils
|
||||
from . import get_imdb_id
|
||||
|
||||
IMDB_RATINGS_URL = 'https://www.imdb.com/title/{}/'
|
||||
IMDB_RATING_REGEX = re.compile(r'itemprop="ratingValue".*?>.*?([\d.]+).*?<')
|
||||
IMDB_VOTES_REGEX = re.compile(r'itemprop="ratingCount".*?>.*?([\d,]+).*?<')
|
||||
IMDB_TOP250_REGEX = re.compile(r'Top Rated Movies #(\d+)')
|
||||
|
||||
def get_details(uniqueids):
|
||||
imdb_id = get_imdb_id(uniqueids)
|
||||
if not imdb_id:
|
||||
return {}
|
||||
votes, rating, top250 = _get_ratinginfo(imdb_id)
|
||||
return _assemble_imdb_result(votes, rating, top250)
|
||||
|
||||
def _get_ratinginfo(imdb_id):
|
||||
response = api_utils.load_info(IMDB_RATINGS_URL.format(imdb_id), default = '', resp_type='text')
|
||||
return _parse_imdb_result(response)
|
||||
|
||||
def _assemble_imdb_result(votes, rating, top250):
|
||||
result = {}
|
||||
if top250:
|
||||
result['info'] = {'top250': top250}
|
||||
if votes and rating:
|
||||
result['ratings'] = {'imdb': {'votes': votes, 'rating': rating}}
|
||||
return result
|
||||
|
||||
def _parse_imdb_result(input_html):
|
||||
rating = _parse_imdb_rating(input_html)
|
||||
votes = _parse_imdb_votes(input_html)
|
||||
top250 = _parse_imdb_top250(input_html)
|
||||
|
||||
return votes, rating, top250
|
||||
|
||||
def _parse_imdb_rating(input_html):
|
||||
match = re.search(IMDB_RATING_REGEX, input_html)
|
||||
if (match):
|
||||
return float(match.group(1))
|
||||
return None
|
||||
|
||||
def _parse_imdb_votes(input_html):
|
||||
match = re.search(IMDB_VOTES_REGEX, input_html)
|
||||
if (match):
|
||||
return int(match.group(1).replace(',', ''))
|
||||
return None
|
||||
|
||||
def _parse_imdb_top250(input_html):
|
||||
match = re.search(IMDB_TOP250_REGEX, input_html)
|
||||
if (match):
|
||||
return int(match.group(1))
|
||||
return None
|
||||
249
plugins/movie_tv_info/tmdbscraper/lib/tmdbscraper/tmdb.py
Normal file
@@ -0,0 +1,249 @@
|
||||
from datetime import datetime, timedelta
|
||||
from . import tmdbapi
|
||||
|
||||
|
||||
class TMDBMovieScraper(object):
|
||||
def __init__(self, url_settings, language, certification_country):
|
||||
self.url_settings = url_settings
|
||||
self.language = language
|
||||
self.certification_country = certification_country
|
||||
self._urls = None
|
||||
self.tmdbapi = tmdbapi
|
||||
|
||||
@property
|
||||
def urls(self):
|
||||
if not self._urls:
|
||||
self._urls = _load_base_urls(self.url_settings)
|
||||
return self._urls
|
||||
|
||||
def search(self, title, year=None):
|
||||
search_media_id = _parse_media_id(title)
|
||||
if search_media_id:
|
||||
if search_media_id['type'] == 'tmdb':
|
||||
result = _get_movie(search_media_id['id'], self.language, True)
|
||||
result = [result]
|
||||
else:
|
||||
response = tmdbapi.find_movie_by_external_id(search_media_id['id'], language=self.language)
|
||||
theerror = response.get('error')
|
||||
if theerror:
|
||||
return 'error: {}'.format(theerror)
|
||||
result = response.get('movie_results')
|
||||
if 'error' in result:
|
||||
return result
|
||||
else:
|
||||
response = tmdbapi.search_movie(query=title, year=year, language=self.language)
|
||||
theerror = response.get('error')
|
||||
if theerror:
|
||||
return 'error: {}'.format(theerror)
|
||||
result = response['results']
|
||||
urls = self.urls
|
||||
|
||||
def is_best(item):
|
||||
return item['title'].lower() == title and (
|
||||
not year or item.get('release_date', '').startswith(year))
|
||||
if result and not is_best(result[0]):
|
||||
best_first = next((item for item in result if is_best(item)), None)
|
||||
if best_first:
|
||||
result = [best_first] + [item for item in result if item is not best_first]
|
||||
|
||||
for item in result:
|
||||
if item.get('poster_path'):
|
||||
item['poster_path'] = urls['preview'] + item['poster_path']
|
||||
if item.get('backdrop_path'):
|
||||
item['backdrop_path'] = urls['preview'] + item['backdrop_path']
|
||||
return result
|
||||
|
||||
def get_details(self, uniqueids):
|
||||
media_id = uniqueids.get('tmdb') or uniqueids.get('imdb')
|
||||
details = self._gather_details(media_id)
|
||||
if not details:
|
||||
return None
|
||||
if details.get('error'):
|
||||
return details
|
||||
return self._assemble_details(**details)
|
||||
|
||||
def _gather_details(self, media_id):
|
||||
movie = _get_movie(media_id, self.language)
|
||||
if not movie or movie.get('error'):
|
||||
return movie
|
||||
|
||||
# Don't specify language to get English text for fallback
|
||||
movie_fallback = _get_movie(media_id)
|
||||
|
||||
collection = _get_moviecollection(movie['belongs_to_collection'].get('id'), self.language) if \
|
||||
movie['belongs_to_collection'] else None
|
||||
collection_fallback = _get_moviecollection(movie['belongs_to_collection'].get('id')) if \
|
||||
movie['belongs_to_collection'] else None
|
||||
|
||||
return {'movie': movie, 'movie_fallback': movie_fallback, 'collection': collection,
|
||||
'collection_fallback': collection_fallback}
|
||||
|
||||
def _assemble_details(self, movie, movie_fallback, collection, collection_fallback):
|
||||
info = {
|
||||
'title': movie['title'],
|
||||
'originaltitle': movie['original_title'],
|
||||
'plot': movie.get('overview') or movie_fallback.get('overview'),
|
||||
'tagline': movie.get('tagline') or movie_fallback.get('tagline'),
|
||||
'studio': _get_names(movie['production_companies']),
|
||||
'genre': _get_names(movie['genres']),
|
||||
'country': _get_names(movie['production_countries']),
|
||||
'credits': _get_cast_members(movie['casts'], 'crew', 'Writing', ['Screenplay', 'Writer', 'Author']),
|
||||
'director': _get_cast_members(movie['casts'], 'crew', 'Directing', ['Director']),
|
||||
'premiered': movie['release_date'],
|
||||
'tag': _get_names(movie['keywords']['keywords'])
|
||||
}
|
||||
|
||||
if 'countries' in movie['releases']:
|
||||
certcountry = self.certification_country.upper()
|
||||
for country in movie['releases']['countries']:
|
||||
if country['iso_3166_1'] == certcountry and country['certification']:
|
||||
info['mpaa'] = country['certification']
|
||||
break
|
||||
|
||||
trailer = _parse_trailer(movie.get('trailers', {}), movie_fallback.get('trailers', {}))
|
||||
if trailer:
|
||||
info['trailer'] = trailer
|
||||
if collection:
|
||||
info['set'] = collection.get('name') or collection_fallback.get('name')
|
||||
info['setoverview'] = collection.get('overview') or collection_fallback.get('overview')
|
||||
if movie.get('runtime'):
|
||||
info['duration'] = movie['runtime'] * 60
|
||||
|
||||
ratings = {'themoviedb': {'rating': float(movie['vote_average']), 'votes': int(movie['vote_count'])}}
|
||||
uniqueids = {'tmdb': movie['id'], 'imdb': movie['imdb_id']}
|
||||
cast = [{
|
||||
'name': actor['name'],
|
||||
'role': actor['character'],
|
||||
'thumbnail': self.urls['original'] + actor['profile_path']
|
||||
if actor['profile_path'] else "",
|
||||
'order': actor['order']
|
||||
}
|
||||
for actor in movie['casts'].get('cast', [])
|
||||
]
|
||||
available_art = _parse_artwork(movie, collection, self.urls, self.language)
|
||||
|
||||
_info = {'set_tmdbid': movie['belongs_to_collection'].get('id')
|
||||
if movie['belongs_to_collection'] else None}
|
||||
|
||||
return {'info': info, 'ratings': ratings, 'uniqueids': uniqueids, 'cast': cast,
|
||||
'available_art': available_art, '_info': _info}
|
||||
|
||||
def _parse_media_id(title):
|
||||
if title.startswith('tt') and title[2:].isdigit():
|
||||
return {'type': 'imdb', 'id':title} # IMDB ID works alone because it is clear
|
||||
title = title.lower()
|
||||
if title.startswith('tmdb/') and title[5:].isdigit(): # TMDB ID
|
||||
return {'type': 'tmdb', 'id':title[5:]}
|
||||
elif title.startswith('imdb/tt') and title[7:].isdigit(): # IMDB ID with prefix to match
|
||||
return {'type': 'imdb', 'id':title[5:]}
|
||||
return None
|
||||
|
||||
def _get_movie(mid, language=None, search=False):
|
||||
details = None if search else \
|
||||
'trailers,images,releases,casts,keywords' if language is not None else \
|
||||
'trailers'
|
||||
response = tmdbapi.get_movie(mid, language=language, append_to_response=details)
|
||||
theerror = response.get('error')
|
||||
if theerror:
|
||||
return 'error: {}'.format(theerror)
|
||||
else:
|
||||
return response
|
||||
|
||||
def _get_moviecollection(collection_id, language=None):
|
||||
if not collection_id:
|
||||
return None
|
||||
details = 'images'
|
||||
response = tmdbapi.get_collection(collection_id, language=language, append_to_response=details)
|
||||
theerror = response.get('error')
|
||||
if theerror:
|
||||
return 'error: {}'.format(theerror)
|
||||
else:
|
||||
return response
|
||||
|
||||
def _parse_artwork(movie, collection, urlbases, language):
|
||||
if language:
|
||||
# Image languages don't have regional variants
|
||||
language = language.split('-')[0]
|
||||
posters = []
|
||||
landscape = []
|
||||
fanart = []
|
||||
if 'images' in movie:
|
||||
posters = _get_images_with_fallback(movie['images']['posters'], urlbases, language)
|
||||
landscape = _get_images(movie['images']['backdrops'], urlbases, language)
|
||||
fanart = _get_images(movie['images']['backdrops'], urlbases, None)
|
||||
|
||||
setposters = []
|
||||
setlandscape = []
|
||||
setfanart = []
|
||||
if collection and 'images' in collection:
|
||||
setposters = _get_images_with_fallback(collection['images']['posters'], urlbases, language)
|
||||
setlandscape = _get_images(collection['images']['backdrops'], urlbases, language)
|
||||
setfanart = _get_images(collection['images']['backdrops'], urlbases, None)
|
||||
|
||||
return {'poster': posters, 'landscape': landscape, 'fanart': fanart,
|
||||
'set.poster': setposters, 'set.landscape': setlandscape, 'set.fanart': setfanart}
|
||||
|
||||
def _get_images_with_fallback(imagelist, urlbases, language, language_fallback='en'):
|
||||
images = _get_images(imagelist, urlbases, language)
|
||||
|
||||
# Add backup images
|
||||
if language != language_fallback:
|
||||
images.extend(_get_images(imagelist, urlbases, language_fallback))
|
||||
|
||||
# Add any images if nothing set so far
|
||||
if not images:
|
||||
images = _get_images(imagelist, urlbases)
|
||||
|
||||
return images
|
||||
|
||||
def _get_images(imagelist, urlbases, language='_any'):
|
||||
result = []
|
||||
for img in imagelist:
|
||||
if language != '_any' and img['iso_639_1'] != language:
|
||||
continue
|
||||
result.append({
|
||||
'url': urlbases['original'] + img['file_path'],
|
||||
'preview': urlbases['preview'] + img['file_path'],
|
||||
})
|
||||
return result
|
||||
|
||||
def _get_date_numeric(datetime_):
|
||||
return (datetime_ - datetime(1970, 1, 1)).total_seconds()
|
||||
|
||||
def _load_base_urls(url_settings):
|
||||
urls = {}
|
||||
# urls['original'] = url_settings.getSettingString('originalUrl')
|
||||
# urls['preview'] = url_settings.getSettingString('previewUrl')
|
||||
# last_updated = url_settings.getSettingString('lastUpdated')
|
||||
urls['original'] = ""
|
||||
urls['preview'] = ""
|
||||
last_updated = "0"
|
||||
|
||||
if not urls['original'] or not urls['preview'] or not last_updated or \
|
||||
float(last_updated) < _get_date_numeric(datetime.now() - timedelta(days=30)):
|
||||
conf = tmdbapi.get_configuration()
|
||||
if conf:
|
||||
urls['original'] = conf['images']['secure_base_url'] + 'original'
|
||||
urls['preview'] = conf['images']['secure_base_url'] + 'w780'
|
||||
# url_settings.setSetting('originalUrl', urls['original'])
|
||||
# url_settings.setSetting('previewUrl', urls['preview'])
|
||||
# url_settings.setSetting('lastUpdated', str(_get_date_numeric(datetime.now())))
|
||||
return urls
|
||||
|
||||
def _parse_trailer(trailers, fallback):
|
||||
if trailers.get('youtube'):
|
||||
return 'plugin://plugin.video.youtube/?action=play_video&videoid='+trailers['youtube'][0]['source']
|
||||
if fallback.get('youtube'):
|
||||
return 'plugin://plugin.video.youtube/?action=play_video&videoid='+fallback['youtube'][0]['source']
|
||||
return None
|
||||
|
||||
def _get_names(items):
|
||||
return [item['name'] for item in items] if items else []
|
||||
|
||||
def _get_cast_members(casts, casttype, department, jobs):
|
||||
result = []
|
||||
if casttype in casts:
|
||||
for cast in casts[casttype]:
|
||||
if cast['department'] == department and cast['job'] in jobs and cast['name'] not in result:
|
||||
result.append(cast['name'])
|
||||
return result
|
||||
129
plugins/movie_tv_info/tmdbscraper/lib/tmdbscraper/tmdbapi.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
# pylint: disable=missing-docstring
|
||||
|
||||
"""Functions to interact with TMDb API."""
|
||||
|
||||
from . import api_utils
|
||||
try:
|
||||
from typing import Optional, Text, Dict, List, Any # pylint: disable=unused-import
|
||||
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
HEADERS = (
|
||||
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
|
||||
('Accept', 'application/json'),
|
||||
)
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
|
||||
TMDB_PARAMS = {'api_key': 'f090bb54758cabf231fb605d3e3e0468'}
|
||||
BASE_URL = 'https://api.themoviedb.org/3/{}'
|
||||
SEARCH_URL = BASE_URL.format('search/movie')
|
||||
FIND_URL = BASE_URL.format('find/{}')
|
||||
MOVIE_URL = BASE_URL.format('movie/{}')
|
||||
COLLECTION_URL = BASE_URL.format('collection/{}')
|
||||
CONFIG_URL = BASE_URL.format('configuration')
|
||||
|
||||
|
||||
def search_movie(query, year=None, language=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Search for a movie
|
||||
|
||||
:param title: movie title to search
|
||||
:param year: the year to search (optional)
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:return: a list with found movies
|
||||
"""
|
||||
theurl = SEARCH_URL
|
||||
params = _set_params(None, language)
|
||||
params['query'] = query
|
||||
if year is not None:
|
||||
params['year'] = str(year)
|
||||
return api_utils.load_info(theurl, params=params)
|
||||
|
||||
|
||||
def find_movie_by_external_id(external_id, language=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Find movie based on external ID
|
||||
|
||||
:param mid: external ID
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:return: the movie or error
|
||||
"""
|
||||
theurl = FIND_URL.format(external_id)
|
||||
params = _set_params(None, language)
|
||||
params['external_source'] = 'imdb_id'
|
||||
return api_utils.load_info(theurl, params=params)
|
||||
|
||||
|
||||
|
||||
def get_movie(mid, language=None, append_to_response=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Get movie details
|
||||
|
||||
:param mid: TMDb movie ID
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:append_to_response: the additional data to get from TMDb (optional)
|
||||
:return: the movie or error
|
||||
"""
|
||||
try:
|
||||
theurl = MOVIE_URL.format(mid)
|
||||
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
|
||||
except Exception as e:
|
||||
print(repr(e))
|
||||
|
||||
|
||||
def get_collection(collection_id, language=None, append_to_response=None):
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Get movie collection information
|
||||
|
||||
:param collection_id: TMDb collection ID
|
||||
:param language: the language filter for TMDb (optional)
|
||||
:append_to_response: the additional data to get from TMDb (optional)
|
||||
:return: the movie or error
|
||||
"""
|
||||
theurl = COLLECTION_URL.format(collection_id)
|
||||
return api_utils.load_info(theurl, params=_set_params(append_to_response, language))
|
||||
|
||||
|
||||
def get_configuration():
|
||||
# type: (Text) -> List[InfoType]
|
||||
"""
|
||||
Get configuration information
|
||||
|
||||
:return: configuration details or error
|
||||
"""
|
||||
return api_utils.load_info(CONFIG_URL, params=TMDB_PARAMS.copy())
|
||||
|
||||
|
||||
def _set_params(append_to_response, language):
|
||||
params = TMDB_PARAMS.copy()
|
||||
img_lang = 'en,null'
|
||||
if language is not None:
|
||||
params['language'] = language
|
||||
img_lang = '%s,en,null' % language[0:2]
|
||||
if append_to_response is not None:
|
||||
params['append_to_response'] = append_to_response
|
||||
if 'images' in append_to_response:
|
||||
params['include_image_language'] = img_lang
|
||||
return params
|
||||
@@ -0,0 +1,55 @@
|
||||
# -*- coding: UTF-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020, Team Kodi
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
# pylint: disable=missing-docstring
|
||||
|
||||
"""Functions to interact with Trakt API."""
|
||||
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
from . import api_utils
|
||||
from . import get_imdb_id
|
||||
try:
|
||||
from typing import Optional, Text, Dict, List, Any # pylint: disable=unused-import
|
||||
InfoType = Dict[Text, Any] # pylint: disable=invalid-name
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
HEADERS = (
|
||||
('User-Agent', 'Kodi Movie scraper by Team Kodi'),
|
||||
('Accept', 'application/json'),
|
||||
('trakt-api-key', '5f2dc73b6b11c2ac212f5d8b4ec8f3dc4b727bb3f026cd254d89eda997fe64ae'),
|
||||
('trakt-api-version', '2'),
|
||||
('Content-Type', 'application/json'),
|
||||
)
|
||||
api_utils.set_headers(dict(HEADERS))
|
||||
|
||||
MOVIE_URL = 'https://api.trakt.tv/movies/{}'
|
||||
|
||||
|
||||
def get_trakt_ratinginfo(uniqueids):
|
||||
imdb_id = get_imdb_id(uniqueids)
|
||||
result = {}
|
||||
url = MOVIE_URL.format(imdb_id)
|
||||
params = {'extended': 'full'}
|
||||
movie_info = api_utils.load_info(url, params=params, default={})
|
||||
if(movie_info):
|
||||
if 'votes' in movie_info and 'rating' in movie_info:
|
||||
result['ratings'] = {'trakt': {'votes': int(movie_info['votes']), 'rating': float(movie_info['rating'])}}
|
||||
elif 'rating' in movie_info:
|
||||
result['ratings'] = {'trakt': {'rating': float(movie_info['rating'])}}
|
||||
return result
|
||||
@@ -1,13 +1,11 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "PyRun",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "plugin_control_list",
|
||||
"pass_fm_events": "true",
|
||||
"pass_fm_events": true,
|
||||
"bind_keys": ["PyRun||send_message:<Shift><Control>r"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Search",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "context_menu",
|
||||
"pass_fm_events": "true",
|
||||
"pass_fm_events": true,
|
||||
"bind_keys": ["Search||show_search_page:<Control>s"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,22 +18,22 @@ from ..widgets.file_preview_widget import FilePreviewWidget
|
||||
|
||||
|
||||
# NOTE: Threads WILL NOT die with parent's destruction.
|
||||
def threaded(fn):
|
||||
def wrapper(*args, **kwargs):
|
||||
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start()
|
||||
|
||||
return wrapper
|
||||
|
||||
# def threaded(fn):
|
||||
# def wrapper(*args, **kwargs):
|
||||
# threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start()
|
||||
#
|
||||
# return wrapper
|
||||
#
|
||||
# NOTE: Threads WILL die with parent's destruction.
|
||||
def daemon_threaded(fn):
|
||||
def wrapper(*args, **kwargs):
|
||||
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
|
||||
|
||||
return wrapper
|
||||
# def daemon_threaded(fn):
|
||||
# def wrapper(*args, **kwargs):
|
||||
# threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
|
||||
#
|
||||
# return wrapper
|
||||
|
||||
|
||||
class FileSearchMixin:
|
||||
def _run_find_file_query(self, widget=None, eve=None):
|
||||
def _run_find_file_query(self, widget = None, eve = None):
|
||||
self._queue_search = True
|
||||
|
||||
if not self._search_watcher_running:
|
||||
@@ -43,6 +43,50 @@ class FileSearchMixin:
|
||||
self.reset_file_list_box()
|
||||
self.run_fsearch_watcher(query=widget)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Need to implement this over the threaded stuffs....
|
||||
|
||||
#
|
||||
# def cancel_timer(self):
|
||||
# if self.timer:
|
||||
# self.timer.cancel()
|
||||
# GLib.idle_remove_by_data(None)
|
||||
#
|
||||
# def delay_search_glib(self, query):
|
||||
# GLib.idle_add(self._exec_find_file_query, *(query,))
|
||||
#
|
||||
# def delay_search(self):
|
||||
# wait_time = self.search_time / len(self.search_text)
|
||||
# wait_time = max(wait_time, 0.05)
|
||||
#
|
||||
# self.timer = threading.Timer(wait_time, self.delay_search_glib, *(query,))
|
||||
# self.timer.daemon = True
|
||||
# self.timer.start()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@daemon_threaded
|
||||
def run_fsearch_watcher(self, query):
|
||||
while True:
|
||||
|
||||
@@ -48,7 +48,7 @@ class GrepPreviewWidget(Gtk.Box):
|
||||
return bytes(f"\n<span foreground='{color}'>{target}</span>", "utf-8").decode("utf-8")
|
||||
|
||||
def make_utf8_line_highlight(self, buffer, itr, i, color, target, query):
|
||||
parts = re.split(r"(" + query + ")(?i)", target.replace("\n", ""))
|
||||
parts = re.split(r"(?i)(" + query + ")", target.replace("\n", ""))
|
||||
for part in parts:
|
||||
itr = buffer.get_end_iter()
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Example Plugin",
|
||||
"author": "John Doe",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "plugin_control_list",
|
||||
"pass_fm_events": "true",
|
||||
"pass_fm_events": true,
|
||||
"bind_keys": ["Example Plugin||send_message:<Control>f"]
|
||||
}
|
||||
}
|
||||
},
|
||||
"pre_launch": false
|
||||
}
|
||||
|
||||
3
plugins/thumbnailer/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Pligin Module
|
||||
"""
|
||||
3
plugins/thumbnailer/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""
|
||||
Pligin Package
|
||||
"""
|
||||
75
plugins/thumbnailer/icons/controller.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# Python imports
|
||||
import json
|
||||
import os
|
||||
from os import path
|
||||
|
||||
# Lib imports
|
||||
import gi
|
||||
gi.require_version('Gtk', '3.0')
|
||||
from gi.repository import Gtk
|
||||
|
||||
# Application imports
|
||||
from .icon import Icon
|
||||
|
||||
|
||||
|
||||
class IconController(Icon):
|
||||
def __init__(self):
|
||||
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
# NOTE: app_name should be defined using python 'builtins' and so too must be logger used in the various classes
|
||||
app_name_exists = False
|
||||
try:
|
||||
app_name
|
||||
app_name_exists = True
|
||||
except Exception as e:
|
||||
...
|
||||
|
||||
APP_CONTEXT = f"{app_name.lower()}" if app_name_exists else "shellfm"
|
||||
USR_APP_CONTEXT = f"/usr/share/{APP_CONTEXT}"
|
||||
USER_HOME = path.expanduser('~')
|
||||
CONFIG_PATH = f"{USER_HOME}/.config/{APP_CONTEXT}"
|
||||
self.DEFAULT_ICONS = f"{CONFIG_PATH}/icons"
|
||||
self.DEFAULT_ICON = f"{self.DEFAULT_ICONS}/text.png"
|
||||
self.FFMPG_THUMBNLR = f"{CONFIG_PATH}/ffmpegthumbnailer" # Thumbnail generator binary
|
||||
self.BLENDER_THUMBNLR = f"{CONFIG_PATH}/blender-thumbnailer" # Blender thumbnail generator binary
|
||||
|
||||
self.ICON_DIRS = ["/usr/share/icons", f"{USER_HOME}/.icons" "/usr/share/pixmaps"]
|
||||
self.BASE_THUMBS_PTH = f"{USER_HOME}/.thumbnails"
|
||||
self.ABS_THUMBS_PTH = f"{self.BASE_THUMBS_PTH}/normal"
|
||||
self.STEAM_ICONS_PTH = f"{self.BASE_THUMBS_PTH}/steam_icons"
|
||||
self.STEAM_CDN_URL = ""
|
||||
|
||||
if not path.isdir(self.BASE_THUMBS_PTH):
|
||||
os.mkdir(self.BASE_THUMBS_PTH)
|
||||
|
||||
if not path.isdir(self.ABS_THUMBS_PTH):
|
||||
os.mkdir(self.ABS_THUMBS_PTH)
|
||||
|
||||
if not path.isdir(self.STEAM_ICONS_PTH):
|
||||
os.mkdir(self.STEAM_ICONS_PTH)
|
||||
|
||||
if not os.path.exists(self.DEFAULT_ICONS):
|
||||
self.DEFAULT_ICONS = f"{USR_APP_CONTEXT}/icons"
|
||||
self.DEFAULT_ICON = f"{self.DEFAULT_ICONS}/text.png"
|
||||
|
||||
CONFIG_FILE = f"{CURRENT_PATH}/../settings.json"
|
||||
with open(CONFIG_FILE) as f:
|
||||
settings = json.load(f)
|
||||
config = settings["config"]
|
||||
|
||||
self.container_icon_wh = config["container_icon_wh"]
|
||||
self.video_icon_wh = config["video_icon_wh"]
|
||||
self.sys_icon_wh = config["sys_icon_wh"]
|
||||
self.STEAM_CDN_URL = config["steam_cdn_url"]
|
||||
|
||||
# Filters
|
||||
filters = settings["filters"]
|
||||
self.fmeshs = tuple(filters["meshs"])
|
||||
self.fcode = tuple(filters["code"])
|
||||
self.fvideos = tuple(filters["videos"])
|
||||
self.foffice = tuple(filters["office"])
|
||||
self.fimages = tuple(filters["images"])
|
||||
self.ftext = tuple(filters["text"])
|
||||
self.fmusic = tuple(filters["music"])
|
||||
self.fpdf = tuple(filters["pdf"])
|
||||
@@ -29,13 +29,17 @@ class IconException(Exception):
|
||||
|
||||
|
||||
class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||
cache = {}
|
||||
|
||||
def create_icon(self, dir, file):
|
||||
full_path = f"{dir}/{file}"
|
||||
return self.get_icon_image(dir, file, full_path)
|
||||
|
||||
def get_icon_image(self, dir, file, full_path):
|
||||
try:
|
||||
thumbnl = None
|
||||
thumbnl = self.cache.get(full_path)
|
||||
if thumbnl:
|
||||
return thumbnl
|
||||
|
||||
if file.lower().endswith(self.fmeshs): # 3D Mesh icon
|
||||
...
|
||||
@@ -55,7 +59,7 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||
if not thumbnl:
|
||||
raise IconException("No known icons found.")
|
||||
|
||||
|
||||
self.cache[full_path] = thumbnl
|
||||
return thumbnl
|
||||
except IconException:
|
||||
...
|
||||
@@ -138,11 +142,14 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||
def _call_gtk_thread(event, result):
|
||||
result.append( self.get_system_thumbnail(full_path, size) )
|
||||
event.set()
|
||||
return False
|
||||
|
||||
result = []
|
||||
event = threading.Event()
|
||||
GLib.idle_add(_call_gtk_thread, event, result)
|
||||
event.wait()
|
||||
|
||||
event = None
|
||||
return result[0]
|
||||
|
||||
|
||||
@@ -151,11 +158,12 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||
gio_file = Gio.File.new_for_path(full_path)
|
||||
info = gio_file.query_info('standard::icon' , 0, None)
|
||||
icon = info.get_icon().get_names()[0]
|
||||
data = settings_manager.get_icon_theme().lookup_icon(icon , size , 0)
|
||||
data = settings_manager.get_icon_theme().lookup_icon(icon , size, 0)
|
||||
|
||||
if data:
|
||||
icon_path = data.get_filename()
|
||||
return GdkPixbuf.Pixbuf.new_from_file(icon_path)
|
||||
|
||||
return GdkPixbuf.Pixbuf.new_from_file_at_size(icon_path, width = size, height = size)
|
||||
|
||||
raise IconException("No system icon found...")
|
||||
except IconException:
|
||||
@@ -174,14 +182,15 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||
return path_exists, img_hash, hash_img_path
|
||||
|
||||
|
||||
def fast_hash(self, filename, hash_factory=hashlib.md5, chunk_num_blocks=128, i=1):
|
||||
def fast_hash(self, filename: str, hash_factory: callable = hashlib.md5, chunk_num_blocks: int = 128, i: int = 1) -> str:
|
||||
h = hash_factory()
|
||||
with open(filename,'rb') as f:
|
||||
# NOTE: Jump to middle of file
|
||||
f.seek(0, 2)
|
||||
mid = int(f.tell() / 2)
|
||||
f.seek(mid, 0)
|
||||
|
||||
while chunk := f.read(chunk_num_blocks*h.block_size):
|
||||
while chunk := f.read(chunk_num_blocks * h.block_size):
|
||||
h.update(chunk)
|
||||
if (i == 12):
|
||||
break
|
||||
@@ -14,4 +14,4 @@ class MeshsIconMixin:
|
||||
proc = subprocess.Popen([self.BLENDER_THUMBNLR, full_path, hash_img_path])
|
||||
proc.wait()
|
||||
except Exception as e:
|
||||
self.logger.debug(repr(e))
|
||||
logger.debug(repr(e))
|
||||
@@ -14,7 +14,7 @@ class VideoIconMixin:
|
||||
proc = subprocess.Popen([self.FFMPG_THUMBNLR, "-t", scrub_percent, "-s", "300", "-c", "jpg", "-i", full_path, "-o", hash_img_path])
|
||||
proc.wait()
|
||||
except Exception as e:
|
||||
self.logger.debug(repr(e))
|
||||
logger.info(e)
|
||||
self.ffprobe_generate_video_thumbnail(full_path, hash_img_path)
|
||||
|
||||
|
||||
@@ -51,5 +51,4 @@ class VideoIconMixin:
|
||||
proc.wait()
|
||||
except Exception as e:
|
||||
print("Video thumbnail generation issue in thread:")
|
||||
print( repr(e) )
|
||||
self.logger.debug(repr(e))
|
||||
logger.info(repr(e))
|
||||
10
plugins/thumbnailer/manifest.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"name": "Thumbnailer",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"pre_launch": true,
|
||||
"requests": {
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
75
plugins/thumbnailer/plugin.py
Normal file
@@ -0,0 +1,75 @@
|
||||
# Python imports
|
||||
import os
|
||||
|
||||
# Lib imports
|
||||
|
||||
# Application imports
|
||||
from plugins.plugin_base import PluginBase
|
||||
from .icons.controller import IconController
|
||||
|
||||
|
||||
|
||||
class Plugin(PluginBase):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.name = "Thumbnailer" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||
# where self.name should not be needed for message comms
|
||||
# self.path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
def run(self):
|
||||
self.icon_controller = IconController()
|
||||
self._event_system.subscribe("create-thumbnail", self.create_thumbnail)
|
||||
self._event_system.subscribe("create-video-thumbnail", self.create_video_thumbnail)
|
||||
self._event_system.subscribe("create-scaled-image", self.create_scaled_image)
|
||||
self._event_system.subscribe("get-thumbnail-hash", self.get_thumbnail_hash)
|
||||
self._event_system.subscribe("get-thumbnails-path", self.get_thumbnails_path)
|
||||
|
||||
def generate_reference_ui_element(self):
|
||||
...
|
||||
|
||||
def create_thumbnail(self, dir, file) -> str:
|
||||
return self.icon_controller.create_icon(dir, file)
|
||||
|
||||
def create_video_thumbnail(self, file, scrub_percent, replace):
|
||||
return self.icon_controller.create_video_thumbnail(file, scrub_percent, replace)
|
||||
|
||||
def create_scaled_image(self, hash_img_pth):
|
||||
return self.icon_controller.create_scaled_image(hash_img_pth)
|
||||
|
||||
def get_thumbnail_hash(self, file):
|
||||
return self.icon_controller.generate_hash_and_path(file)
|
||||
|
||||
def get_thumbnails_path(self) -> str:
|
||||
return self.icon_controller.ABS_THUMBS_PTH
|
||||
|
||||
def get_video_icons(self, dir) -> list:
|
||||
data = []
|
||||
|
||||
def get_video_icons(self) -> list:
|
||||
data = []
|
||||
fvideos = self.icon_controller.fvideos
|
||||
vids = [ file for file in os.path.list_dir(dir) if file.lower().endswith(fvideos) ]
|
||||
|
||||
for file in vids:
|
||||
img_hash, hash_img_path = self.create_video_thumbnail(full_path = f"{dir}/{file}", returnHashInstead = True)
|
||||
data.append([img_hash, hash_img_path])
|
||||
|
||||
return data
|
||||
|
||||
def get_pixbuf_icon_str_combo(self, dir) -> list:
|
||||
data = []
|
||||
for file in os.path.list_dir(dir):
|
||||
icon = self.icon_controller.create_icon(dir, file).get_pixbuf()
|
||||
data.append([icon, file])
|
||||
|
||||
return data
|
||||
|
||||
def get_gtk_icon_str_combo(self, dir) -> list:
|
||||
data = []
|
||||
for file in os.path.list_dir(dir):
|
||||
icon = self.icon_controller.create_icon(dir, file)
|
||||
data.append([icon, file[0]])
|
||||
|
||||
return data
|
||||
101
plugins/thumbnailer/settings.json
Normal file
@@ -0,0 +1,101 @@
|
||||
{
|
||||
"config":{
|
||||
"thumbnailer_path":"ffmpegthumbnailer",
|
||||
"blender_thumbnailer_path":"",
|
||||
"container_icon_wh":[
|
||||
128,
|
||||
128
|
||||
],
|
||||
"video_icon_wh":[
|
||||
128,
|
||||
64
|
||||
],
|
||||
"sys_icon_wh":[
|
||||
56,
|
||||
56
|
||||
],
|
||||
"steam_cdn_url":"https://steamcdn-a.akamaihd.net/steam/apps/",
|
||||
"remux_folder_max_disk_usage":"8589934592"
|
||||
},
|
||||
"filters":{
|
||||
"meshs":[
|
||||
".dae",
|
||||
".fbx",
|
||||
".gltf",
|
||||
".obj",
|
||||
".stl"
|
||||
],
|
||||
"code":[
|
||||
".cpp",
|
||||
".css",
|
||||
".c",
|
||||
".go",
|
||||
".html",
|
||||
".htm",
|
||||
".java",
|
||||
".js",
|
||||
".json",
|
||||
".lua",
|
||||
".md",
|
||||
".py",
|
||||
".rs",
|
||||
".toml",
|
||||
".xml",
|
||||
".pom"
|
||||
],
|
||||
"videos":[
|
||||
".mkv",
|
||||
".mp4",
|
||||
".webm",
|
||||
".avi",
|
||||
".mov",
|
||||
".m4v",
|
||||
".mpg",
|
||||
".mpeg",
|
||||
".wmv",
|
||||
".flv"
|
||||
],
|
||||
"office":[
|
||||
".doc",
|
||||
".docx",
|
||||
".xls",
|
||||
".xlsx",
|
||||
".xlt",
|
||||
".xltx",
|
||||
".xlm",
|
||||
".ppt",
|
||||
".pptx",
|
||||
".pps",
|
||||
".ppsx",
|
||||
".odt",
|
||||
".rtf"
|
||||
],
|
||||
"images":[
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".gif",
|
||||
".ico",
|
||||
".tga",
|
||||
".webp"
|
||||
],
|
||||
"text":[
|
||||
".txt",
|
||||
".text",
|
||||
".sh",
|
||||
".cfg",
|
||||
".conf",
|
||||
".log"
|
||||
],
|
||||
"music":[
|
||||
".psf",
|
||||
".mp3",
|
||||
".ogg",
|
||||
".flac",
|
||||
".m4a"
|
||||
],
|
||||
"pdf":[
|
||||
".pdf"
|
||||
]
|
||||
}
|
||||
}
|
||||
BIN
plugins/translate/brotli/_brotli.abi3.so
Executable file
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Translate",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "plugin_control_list",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,8 +184,8 @@ class Plugin(PluginBase):
|
||||
response = requests.post(self.vqd_link, headers=self.vqd_headers, data=self.vqd_data, timeout=2)
|
||||
if response.status_code == 200:
|
||||
data = response.content
|
||||
vqd_start_index = data.index(b"vqd='") + 5
|
||||
vqd_end_index = data.index(b"'", vqd_start_index)
|
||||
vqd_start_index = data.index(b"vqd=\"") + 5
|
||||
vqd_end_index = data.index(b"\"", vqd_start_index)
|
||||
self._vqd_attrib = data[vqd_start_index:vqd_end_index].decode("utf-8")
|
||||
|
||||
print(f"Translation VQD: {self._vqd_attrib}")
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Trasher",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "context_menu",
|
||||
"pass_fm_events": "true",
|
||||
"pass_fm_events": true,
|
||||
"bind_keys": [
|
||||
"Trasher||delete_files:Delete",
|
||||
"Trasher||trash_files:<Control>d"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,6 +111,8 @@ class Plugin(PluginBase):
|
||||
for uri in state.uris:
|
||||
self.trashman.trash(uri, verbocity)
|
||||
|
||||
self.trashman.regenerate()
|
||||
|
||||
def restore_trash_files(self, widget = None, eve = None, verbocity = False):
|
||||
self._event_system.emit("get_current_state")
|
||||
state = self._fm_state
|
||||
|
||||
@@ -43,4 +43,4 @@ class Trash(object):
|
||||
|
||||
def restore(self, filename, verbose):
|
||||
"""Restore a file from trash."""
|
||||
raise NotImplementedError(_('Backend didn’t \ implement this functionality'))
|
||||
raise NotImplementedError(_('Backend didn’t implement this functionality'))
|
||||
|
||||
@@ -127,7 +127,7 @@ DeletionDate={}
|
||||
f.write(infofile)
|
||||
f.close()
|
||||
|
||||
self.regenerate()
|
||||
# self.regenerate()
|
||||
|
||||
if verbose:
|
||||
sys.stderr.write(_('trashed \'{}\'\n').format(filename))
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "VOD Thumbnailer",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "context_menu_plugins",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,8 @@ def threaded(fn):
|
||||
return wrapper
|
||||
|
||||
|
||||
class VODThumbnailerException(Exception):
|
||||
...
|
||||
|
||||
|
||||
class Plugin(PluginBase):
|
||||
@@ -94,32 +96,39 @@ class Plugin(PluginBase):
|
||||
file = self._file_name.get_text()
|
||||
dir = self._file_location.get_text()
|
||||
file_hash = self._file_hash.get_text()
|
||||
hash_img_pth = f"{self._fm_state.tab.ABS_THUMBS_PTH}/{file_hash}.jpg"
|
||||
hash_img_pth = f"{self.ABS_THUMBS_PTH}/{file_hash}.jpg"
|
||||
|
||||
try:
|
||||
self._fm_state.tab.create_video_thumbnail(f"{dir}/{file}", f"{scrub_percent}%", True)
|
||||
self._event_system.emit_and_await("create-video-thumbnail", (f"{dir}/{file}", f"{scrub_percent}%", True,))
|
||||
preview_pixbuf = GdkPixbuf.Pixbuf.new_from_file(hash_img_pth)
|
||||
self._thumbnail_preview_img.set_from_pixbuf(preview_pixbuf)
|
||||
|
||||
img_pixbuf = self._fm_state.tab.create_scaled_image(hash_img_pth)
|
||||
img_pixbuf = self._event_system.emit_and_await("create-scaled-image", (hash_img_pth,))
|
||||
tree_pth = self._fm_state.icon_grid.get_selected_items()[0]
|
||||
itr = self._fm_state.store.get_iter(tree_pth)
|
||||
pixbuff = self._fm_state.store.get(itr, 0)[0]
|
||||
self._fm_state.store.set(itr, 0, img_pixbuf)
|
||||
except Exception as e:
|
||||
print(repr(e))
|
||||
print("Couldn't regenerate thumbnail!")
|
||||
print(repr(e))
|
||||
|
||||
|
||||
def _set_ui_data(self):
|
||||
uri = self._fm_state.uris[0]
|
||||
path = self._fm_state.tab.get_current_directory()
|
||||
parts = uri.split("/")
|
||||
file_hash = self._fm_state.tab.fast_hash(uri)
|
||||
hash_img_pth = f"{self._fm_state.tab.ABS_THUMBS_PTH}/{file_hash}.jpg"
|
||||
path_exists, \
|
||||
img_hash, \
|
||||
hash_img_pth = self._event_system.emit_and_await("get-thumbnail-hash", (uri,))
|
||||
|
||||
if not path_exists:
|
||||
raise VODThumbnailerException(f"Could not generate file_hash from: {uri}")
|
||||
|
||||
|
||||
self.ABS_THUMBS_PTH = self._event_system.emit_and_await("get-thumbnails-path")
|
||||
preview_pixbuf = GdkPixbuf.Pixbuf.new_from_file(hash_img_pth)
|
||||
|
||||
self._thumbnail_preview_img.set_from_pixbuf(preview_pixbuf)
|
||||
self._file_name.set_text(parts[ len(parts) - 1 ])
|
||||
self._file_location.set_text(path)
|
||||
self._file_hash.set_text(file_hash)
|
||||
self._file_hash.set_text(img_hash)
|
||||
|
||||
@@ -8,12 +8,29 @@
|
||||
|
||||
|
||||
function main() {
|
||||
cd "$(dirname "")"
|
||||
echo "Working Dir: " $(pwd)
|
||||
_STARGET="${1}"
|
||||
_SPATH="${HOME}/.config/solarfm/plugins/youtube_download"
|
||||
LINK=`xclip -selection clipboard -o`
|
||||
|
||||
python "${HOME}/.config/solarfm/plugins/youtube_download/yt_dlp/__main__.py" \
|
||||
--write-sub --embed-sub --sub-langs en \
|
||||
-o "${1}/%(title)s.%(ext)s" "${LINK}"
|
||||
cd "${_SPATH}"
|
||||
echo "Working Dir: " $(pwd)
|
||||
|
||||
rm "${_SPATH}/../../cookies.txt"
|
||||
|
||||
# Note: Export cookies to file
|
||||
python "${_SPATH}/yt_dlp/__main__.py" \
|
||||
--cookies-from-browser firefox --cookies "${_SPATH}/../../cookies.txt"
|
||||
|
||||
# Note: Use cookies from browser directly
|
||||
# python "${_SPATH}/yt_dlp/__main__.py" \
|
||||
# --cookies-from-browser firefox --write-sub --embed-sub --sub-langs en \
|
||||
# -o "${_STARGET}/%(title)s.%(ext)s" "${LINK}"
|
||||
|
||||
# Note: Download video
|
||||
python "${_SPATH}/yt_dlp/__main__.py" \
|
||||
-f "bestvideo[height<=1080][ext=mp4][vcodec^=av]+bestaudio[ext=m4a]/best[ext=mp4]/best" \
|
||||
--cookies "${_SPATH}/../../cookies.txt" --write-sub --embed-sub --sub-langs en \
|
||||
-o "${_STARGET}/%(title)s.%(ext)s" "${LINK}"
|
||||
|
||||
}
|
||||
main "$@";
|
||||
@@ -1,12 +1,10 @@
|
||||
{
|
||||
"manifest": {
|
||||
"name": "Youtube Download",
|
||||
"author": "ITDominator",
|
||||
"version": "0.0.1",
|
||||
"support": "",
|
||||
"requests": {
|
||||
"ui_target": "plugin_control_list",
|
||||
"pass_fm_events": "true"
|
||||
}
|
||||
"pass_fm_events": true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,4 +47,4 @@ class Plugin(PluginBase):
|
||||
|
||||
@threaded
|
||||
def _download(self, dir):
|
||||
subprocess.Popen([f'{self.path}/download.sh', dir])
|
||||
subprocess.Popen([f'{self.path}/download.sh', dir], start_new_session=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, close_fds=True)
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
try:
|
||||
import contextvars # noqa: F401
|
||||
except Exception:
|
||||
raise Exception(
|
||||
f'You are using an unsupported version of Python. Only Python versions 3.7 and above are supported by yt-dlp') # noqa: F541
|
||||
import sys
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
if sys.version_info < (3, 10):
|
||||
raise ImportError(
|
||||
f'You are using an unsupported version of Python. Only Python versions 3.10 and above are supported by yt-dlp') # noqa: F541
|
||||
|
||||
__license__ = 'The Unlicense'
|
||||
|
||||
import collections
|
||||
import getpass
|
||||
@@ -12,15 +12,16 @@ import itertools
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
from .compat import compat_shlex_quote
|
||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS, CookieLoadError
|
||||
from .downloader.external import get_external_downloader
|
||||
from .extractor import list_extractor_classes
|
||||
from .extractor.adobepass import MSO_INFO
|
||||
from .networking.impersonate import ImpersonateTarget
|
||||
from .globals import IN_CLI, plugin_dirs
|
||||
from .options import parseOpts
|
||||
from .plugins import load_all_plugins as _load_all_plugins
|
||||
from .postprocessor import (
|
||||
FFmpegExtractAudioPP,
|
||||
FFmpegMergerPP,
|
||||
@@ -43,12 +44,12 @@ from .utils import (
|
||||
GeoUtils,
|
||||
PlaylistEntries,
|
||||
SameFileError,
|
||||
decodeOption,
|
||||
download_range_func,
|
||||
expand_path,
|
||||
float_or_none,
|
||||
format_field,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
match_filter_func,
|
||||
parse_bytes,
|
||||
parse_duration,
|
||||
@@ -57,14 +58,19 @@ from .utils import (
|
||||
read_stdin,
|
||||
render_table,
|
||||
setproctitle,
|
||||
traverse_obj,
|
||||
shell_quote,
|
||||
variadic,
|
||||
write_string,
|
||||
)
|
||||
from .utils.networking import std_headers
|
||||
from .YoutubeDL import YoutubeDL
|
||||
|
||||
_IN_CLI = False
|
||||
)
|
||||
from .utils._utils import _UnsafeExtensionError
|
||||
from .utils._jsruntime import (
|
||||
BunJsRuntime as _BunJsRuntime,
|
||||
DenoJsRuntime as _DenoJsRuntime,
|
||||
NodeJsRuntime as _NodeJsRuntime,
|
||||
QuickJsRuntime as _QuickJsRuntime,
|
||||
)
|
||||
from .YoutubeDL import YoutubeDL
|
||||
|
||||
|
||||
def _exit(status=0, *args):
|
||||
@@ -74,14 +80,16 @@ def _exit(status=0, *args):
|
||||
|
||||
|
||||
def get_urls(urls, batchfile, verbose):
|
||||
# Batch file verification
|
||||
"""
|
||||
@param verbose -1: quiet, 0: normal, 1: verbose
|
||||
"""
|
||||
batch_urls = []
|
||||
if batchfile is not None:
|
||||
try:
|
||||
batch_urls = read_batch_urls(
|
||||
read_stdin('URLs') if batchfile == '-'
|
||||
read_stdin(None if verbose == -1 else 'URLs') if batchfile == '-'
|
||||
else open(expand_path(batchfile), encoding='utf-8', errors='ignore'))
|
||||
if verbose:
|
||||
if verbose == 1:
|
||||
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
||||
except OSError:
|
||||
_exit(f'ERROR: batch file {batchfile} could not be read')
|
||||
@@ -112,9 +120,9 @@ def print_extractor_information(opts, urls):
|
||||
ie.description(markdown=False, search_examples=_SEARCHES)
|
||||
for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False)
|
||||
elif opts.ap_list_mso:
|
||||
out = 'Supported TV Providers:\n%s\n' % render_table(
|
||||
out = 'Supported TV Providers:\n{}\n'.format(render_table(
|
||||
['mso', 'mso name'],
|
||||
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()])
|
||||
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]))
|
||||
else:
|
||||
return False
|
||||
write_string(out, out=sys.stdout)
|
||||
@@ -126,7 +134,7 @@ def set_compat_opts(opts):
|
||||
if name not in opts.compat_opts:
|
||||
return False
|
||||
opts.compat_opts.discard(name)
|
||||
opts.compat_opts.update(['*%s' % name])
|
||||
opts.compat_opts.update([f'*{name}'])
|
||||
return True
|
||||
|
||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
||||
@@ -153,6 +161,15 @@ def set_compat_opts(opts):
|
||||
opts.embed_infojson = False
|
||||
if 'format-sort' in opts.compat_opts:
|
||||
opts.format_sort.extend(FormatSorter.ytdl_default)
|
||||
elif 'prefer-vp9-sort' in opts.compat_opts:
|
||||
FormatSorter.default = FormatSorter._prefer_vp9_sort
|
||||
|
||||
if 'mtime-by-default' in opts.compat_opts:
|
||||
if opts.updatetime is None:
|
||||
opts.updatetime = True
|
||||
else:
|
||||
_unused_compat_opt('mtime-by-default')
|
||||
|
||||
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
||||
_audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False)
|
||||
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
||||
@@ -219,7 +236,7 @@ def validate_options(opts):
|
||||
validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval')
|
||||
|
||||
if opts.wait_for_video is not None:
|
||||
min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None])
|
||||
min_wait, max_wait, *_ = map(parse_duration, [*opts.wait_for_video.split('-', 1), None])
|
||||
validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video),
|
||||
'time range to wait for video', opts.wait_for_video)
|
||||
validate_minmax(min_wait, max_wait, 'time range to wait for video')
|
||||
@@ -230,6 +247,11 @@ def validate_options(opts):
|
||||
validate_regex('format sorting', f, FormatSorter.regex)
|
||||
|
||||
# Postprocessor formats
|
||||
if opts.convertsubtitles == 'none':
|
||||
opts.convertsubtitles = None
|
||||
if opts.convertthumbnails == 'none':
|
||||
opts.convertthumbnails = None
|
||||
|
||||
validate_regex('merge output format', opts.merge_output_format,
|
||||
r'({0})(/({0}))*'.format('|'.join(map(re.escape, FFmpegMergerPP.SUPPORTED_EXTS))))
|
||||
validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE)
|
||||
@@ -249,9 +271,11 @@ def validate_options(opts):
|
||||
elif value in ('inf', 'infinite'):
|
||||
return float('inf')
|
||||
try:
|
||||
return int(value)
|
||||
int_value = int(value)
|
||||
except (TypeError, ValueError):
|
||||
validate(False, f'{name} retry count', value)
|
||||
validate_positive(f'{name} retry count', int_value)
|
||||
return int_value
|
||||
|
||||
opts.retries = parse_retries('download', opts.retries)
|
||||
opts.fragment_retries = parse_retries('fragment', opts.fragment_retries)
|
||||
@@ -261,9 +285,9 @@ def validate_options(opts):
|
||||
# Retry sleep function
|
||||
def parse_sleep_func(expr):
|
||||
NUMBER_RE = r'\d+(?:\.\d+)?'
|
||||
op, start, limit, step, *_ = tuple(re.fullmatch(
|
||||
op, start, limit, step, *_ = (*tuple(re.fullmatch(
|
||||
rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?',
|
||||
expr.strip()).groups()) + (None, None)
|
||||
expr.strip()).groups()), None, None)
|
||||
|
||||
if op == 'exp':
|
||||
return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf'))
|
||||
@@ -281,18 +305,20 @@ def validate_options(opts):
|
||||
raise ValueError(f'invalid {key} retry sleep expression {expr!r}')
|
||||
|
||||
# Bytes
|
||||
def validate_bytes(name, value):
|
||||
def validate_bytes(name, value, strict_positive=False):
|
||||
if value is None:
|
||||
return None
|
||||
numeric_limit = parse_bytes(value)
|
||||
validate(numeric_limit is not None, 'rate limit', value)
|
||||
validate(numeric_limit is not None, name, value)
|
||||
if strict_positive:
|
||||
validate_positive(name, numeric_limit, True)
|
||||
return numeric_limit
|
||||
|
||||
opts.ratelimit = validate_bytes('rate limit', opts.ratelimit)
|
||||
opts.ratelimit = validate_bytes('rate limit', opts.ratelimit, True)
|
||||
opts.throttledratelimit = validate_bytes('throttled rate limit', opts.throttledratelimit)
|
||||
opts.min_filesize = validate_bytes('min filesize', opts.min_filesize)
|
||||
opts.max_filesize = validate_bytes('max filesize', opts.max_filesize)
|
||||
opts.buffersize = validate_bytes('buffer size', opts.buffersize)
|
||||
opts.buffersize = validate_bytes('buffer size', opts.buffersize, True)
|
||||
opts.http_chunk_size = validate_bytes('http chunk size', opts.http_chunk_size)
|
||||
|
||||
# Output templates
|
||||
@@ -387,16 +413,19 @@ def validate_options(opts):
|
||||
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
||||
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
||||
|
||||
if opts.impersonate is not None:
|
||||
opts.impersonate = ImpersonateTarget.from_str(opts.impersonate.lower())
|
||||
|
||||
# MetadataParser
|
||||
def metadataparser_actions(f):
|
||||
if isinstance(f, str):
|
||||
cmd = '--parse-metadata %s' % compat_shlex_quote(f)
|
||||
cmd = f'--parse-metadata {shell_quote(f)}'
|
||||
try:
|
||||
actions = [MetadataFromFieldPP.to_action(f)]
|
||||
except Exception as err:
|
||||
raise ValueError(f'{cmd} is invalid; {err}')
|
||||
else:
|
||||
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f))
|
||||
cmd = f'--replace-in-metadata {shell_quote(f)}'
|
||||
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
||||
|
||||
for action in actions:
|
||||
@@ -407,13 +436,17 @@ def validate_options(opts):
|
||||
yield action
|
||||
|
||||
if opts.metafromtitle is not None:
|
||||
opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle)
|
||||
opts.parse_metadata.setdefault('pre_process', []).append(f'title:{opts.metafromtitle}')
|
||||
opts.parse_metadata = {
|
||||
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
||||
for k, v in opts.parse_metadata.items()
|
||||
}
|
||||
|
||||
# Other options
|
||||
opts.plugin_dirs = opts.plugin_dirs
|
||||
if opts.plugin_dirs is None:
|
||||
opts.plugin_dirs = ['default']
|
||||
|
||||
if opts.playlist_items is not None:
|
||||
try:
|
||||
tuple(PlaylistEntries.parse_playlist_items(opts.playlist_items))
|
||||
@@ -460,7 +493,7 @@ def validate_options(opts):
|
||||
default_downloader = ed.get_basename()
|
||||
|
||||
for policy in opts.color.values():
|
||||
if policy not in ('always', 'auto', 'no_color', 'never'):
|
||||
if policy not in ('always', 'auto', 'auto-tty', 'no_color', 'no_color-tty', 'never'):
|
||||
raise ValueError(f'"{policy}" is not a valid color policy')
|
||||
|
||||
warnings, deprecation_warnings = [], []
|
||||
@@ -472,6 +505,14 @@ def validate_options(opts):
|
||||
'To let yt-dlp download and merge the best available formats, simply do not pass any format selection',
|
||||
'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning')))
|
||||
|
||||
# Common mistake: -f mp4
|
||||
if opts.format == 'mp4':
|
||||
warnings.append('.\n '.join((
|
||||
'"-f mp4" selects the best pre-merged mp4 format which is often not what\'s intended',
|
||||
'Pre-merged mp4 formats are not available from all sites, or may only be available in lower quality',
|
||||
'To prioritize the best h264 video and aac audio in an mp4 container, use "-t mp4" instead',
|
||||
'If you know what you are doing and want a pre-merged mp4 format, use "-f b[ext=mp4]" instead to suppress this warning')))
|
||||
|
||||
# --(postprocessor/downloader)-args without name
|
||||
def report_args_compat(name, value, key1, key2=None, where=None):
|
||||
if key1 in value and key2 not in value:
|
||||
@@ -487,7 +528,6 @@ def validate_options(opts):
|
||||
|
||||
if report_args_compat('post-processor', opts.postprocessor_args, 'default-compat', 'default'):
|
||||
opts.postprocessor_args['default'] = opts.postprocessor_args.pop('default-compat')
|
||||
opts.postprocessor_args.setdefault('sponskrub', [])
|
||||
|
||||
def report_conflict(arg1, opt1, arg2='--allow-unplayable-formats', opt2='allow_unplayable_formats',
|
||||
val1=NO_DEFAULT, val2=NO_DEFAULT, default=False):
|
||||
@@ -512,11 +552,6 @@ def validate_options(opts):
|
||||
'"--exec before_dl:"', 'exec_cmd', val2=opts.exec_cmd.get('before_dl'))
|
||||
report_conflict('--id', 'useid', '--output', 'outtmpl', val2=opts.outtmpl.get('default'))
|
||||
report_conflict('--remux-video', 'remuxvideo', '--recode-video', 'recodevideo')
|
||||
report_conflict('--sponskrub', 'sponskrub', '--remove-chapters', 'remove_chapters')
|
||||
report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-mark', 'sponsorblock_mark')
|
||||
report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-remove', 'sponsorblock_remove')
|
||||
report_conflict('--sponskrub-cut', 'sponskrub_cut', '--split-chapter', 'split_chapters',
|
||||
val1=opts.sponskrub and opts.sponskrub_cut)
|
||||
|
||||
# Conflicts with --allow-unplayable-formats
|
||||
report_conflict('--embed-metadata', 'addmetadata')
|
||||
@@ -529,23 +564,15 @@ def validate_options(opts):
|
||||
report_conflict('--recode-video', 'recodevideo')
|
||||
report_conflict('--remove-chapters', 'remove_chapters', default=[])
|
||||
report_conflict('--remux-video', 'remuxvideo')
|
||||
report_conflict('--sponskrub', 'sponskrub')
|
||||
report_conflict('--sponsorblock-remove', 'sponsorblock_remove', default=set())
|
||||
report_conflict('--xattrs', 'xattrs')
|
||||
|
||||
# Fully deprecated options
|
||||
def report_deprecation(val, old, new=None):
|
||||
if not val:
|
||||
return
|
||||
if hasattr(opts, '_deprecated_options'):
|
||||
deprecation_warnings.append(
|
||||
f'{old} is deprecated and may be removed in a future version. Use {new} instead' if new
|
||||
else f'{old} is deprecated and may not work as expected')
|
||||
|
||||
report_deprecation(opts.sponskrub, '--sponskrub', '--sponsorblock-mark or --sponsorblock-remove')
|
||||
report_deprecation(not opts.prefer_ffmpeg, '--prefer-avconv', 'ffmpeg')
|
||||
# report_deprecation(opts.include_ads, '--include-ads') # We may re-implement this in future
|
||||
# report_deprecation(opts.call_home, '--call-home') # We may re-implement this in future
|
||||
# report_deprecation(opts.writeannotations, '--write-annotations') # It's just that no website has it
|
||||
f'The following options have been deprecated: {", ".join(opts._deprecated_options)}\n'
|
||||
'Please remove them from your command/configuration to avoid future errors.\n'
|
||||
'See https://github.com/yt-dlp/yt-dlp/issues/14198 for more details')
|
||||
del opts._deprecated_options
|
||||
|
||||
# Dependent options
|
||||
opts.date = DateRange.day(opts.date) if opts.date else DateRange(opts.dateafter, opts.datebefore)
|
||||
@@ -586,6 +613,13 @@ def validate_options(opts):
|
||||
if opts.ap_username is not None and opts.ap_password is None:
|
||||
opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ')
|
||||
|
||||
# compat option changes global state destructively; only allow from cli
|
||||
if 'allow-unsafe-ext' in opts.compat_opts:
|
||||
warnings.append(
|
||||
'Using allow-unsafe-ext opens you up to potential attacks. '
|
||||
'Use with great care!')
|
||||
_UnsafeExtensionError.sanitize_extension = lambda x, prepend=False: x
|
||||
|
||||
return warnings, deprecation_warnings
|
||||
|
||||
|
||||
@@ -596,7 +630,7 @@ def get_postprocessors(opts):
|
||||
yield {
|
||||
'key': 'MetadataParser',
|
||||
'actions': actions,
|
||||
'when': when
|
||||
'when': when,
|
||||
}
|
||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||
if sponsorblock_query:
|
||||
@@ -604,19 +638,19 @@ def get_postprocessors(opts):
|
||||
'key': 'SponsorBlock',
|
||||
'categories': sponsorblock_query,
|
||||
'api': opts.sponsorblock_api,
|
||||
'when': 'after_filter'
|
||||
'when': 'after_filter',
|
||||
}
|
||||
if opts.convertsubtitles:
|
||||
yield {
|
||||
'key': 'FFmpegSubtitlesConvertor',
|
||||
'format': opts.convertsubtitles,
|
||||
'when': 'before_dl'
|
||||
'when': 'before_dl',
|
||||
}
|
||||
if opts.convertthumbnails:
|
||||
yield {
|
||||
'key': 'FFmpegThumbnailsConvertor',
|
||||
'format': opts.convertthumbnails,
|
||||
'when': 'before_dl'
|
||||
'when': 'before_dl',
|
||||
}
|
||||
if opts.extractaudio:
|
||||
yield {
|
||||
@@ -641,7 +675,7 @@ def get_postprocessors(opts):
|
||||
yield {
|
||||
'key': 'FFmpegEmbedSubtitle',
|
||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||
'already_have_subtitle': opts.writesubtitles and keep_subs
|
||||
'already_have_subtitle': opts.writesubtitles and keep_subs,
|
||||
}
|
||||
if not opts.writeautomaticsub and keep_subs:
|
||||
opts.writesubtitles = True
|
||||
@@ -654,7 +688,7 @@ def get_postprocessors(opts):
|
||||
'remove_sponsor_segments': opts.sponsorblock_remove,
|
||||
'remove_ranges': opts.remove_ranges,
|
||||
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
||||
'force_keyframes': opts.force_keyframes_at_cuts
|
||||
'force_keyframes': opts.force_keyframes_at_cuts,
|
||||
}
|
||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||
@@ -669,26 +703,11 @@ def get_postprocessors(opts):
|
||||
'add_metadata': opts.addmetadata,
|
||||
'add_infojson': opts.embed_infojson,
|
||||
}
|
||||
# Deprecated
|
||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
||||
# If opts.sponskrub is None, sponskrub is used, but it silently fails if the executable can't be found
|
||||
if opts.sponskrub is not False:
|
||||
yield {
|
||||
'key': 'SponSkrub',
|
||||
'path': opts.sponskrub_path,
|
||||
'args': opts.sponskrub_args,
|
||||
'cut': opts.sponskrub_cut,
|
||||
'force': opts.sponskrub_force,
|
||||
'ignoreerror': opts.sponskrub is None,
|
||||
'_from_cli': True,
|
||||
}
|
||||
if opts.embedthumbnail:
|
||||
yield {
|
||||
'key': 'EmbedThumbnail',
|
||||
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
||||
'already_have_thumbnail': opts.writethumbnail
|
||||
'already_have_thumbnail': opts.writethumbnail,
|
||||
}
|
||||
if not opts.writethumbnail:
|
||||
opts.writethumbnail = True
|
||||
@@ -722,7 +741,7 @@ ParsedOptions = collections.namedtuple('ParsedOptions', ('parser', 'options', 'u
|
||||
def parse_options(argv=None):
|
||||
"""@returns ParsedOptions(parser, opts, urls, ydl_opts)"""
|
||||
parser, opts, urls = parseOpts(argv)
|
||||
urls = get_urls(urls, opts.batchfile, opts.verbose)
|
||||
urls = get_urls(urls, opts.batchfile, -1 if opts.quiet and not opts.verbose else opts.verbose)
|
||||
|
||||
set_compat_opts(opts)
|
||||
try:
|
||||
@@ -735,7 +754,7 @@ def parse_options(argv=None):
|
||||
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
||||
any_getting = any(getattr(opts, k) for k in (
|
||||
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl'
|
||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl',
|
||||
))
|
||||
if opts.quiet is None:
|
||||
opts.quiet = any_getting or opts.print_json or bool(opts.forceprint)
|
||||
@@ -761,6 +780,10 @@ def parse_options(argv=None):
|
||||
else opts.audioformat if (opts.extractaudio and opts.audioformat in FFmpegExtractAudioPP.SUPPORTED_EXTS)
|
||||
else None)
|
||||
|
||||
js_runtimes = {
|
||||
runtime.lower(): {'path': path} for runtime, path in (
|
||||
[*arg.split(':', 1), None][:2] for arg in opts.js_runtimes)}
|
||||
|
||||
return ParsedOptions(parser, opts, urls, {
|
||||
'usenetrc': opts.usenetrc,
|
||||
'netrc_location': opts.netrc_location,
|
||||
@@ -830,6 +853,7 @@ def parse_options(argv=None):
|
||||
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
|
||||
'progress_with_newline': opts.progress_with_newline,
|
||||
'progress_template': opts.progress_template,
|
||||
'progress_delta': opts.progress_delta,
|
||||
'playliststart': opts.playliststart,
|
||||
'playlistend': opts.playlistend,
|
||||
'playlistreverse': opts.playlist_reverse,
|
||||
@@ -841,7 +865,6 @@ def parse_options(argv=None):
|
||||
'nopart': opts.nopart,
|
||||
'updatetime': opts.updatetime,
|
||||
'writedescription': opts.writedescription,
|
||||
'writeannotations': opts.writeannotations,
|
||||
'writeinfojson': opts.writeinfojson,
|
||||
'allow_playlist_files': opts.allow_playlist_files,
|
||||
'clean_infojson': opts.clean_infojson,
|
||||
@@ -858,8 +881,8 @@ def parse_options(argv=None):
|
||||
'listsubtitles': opts.listsubtitles,
|
||||
'subtitlesformat': opts.subtitlesformat,
|
||||
'subtitleslangs': opts.subtitleslangs,
|
||||
'matchtitle': decodeOption(opts.matchtitle),
|
||||
'rejecttitle': decodeOption(opts.rejecttitle),
|
||||
'matchtitle': opts.matchtitle,
|
||||
'rejecttitle': opts.rejecttitle,
|
||||
'max_downloads': opts.max_downloads,
|
||||
'prefer_free_formats': opts.prefer_free_formats,
|
||||
'trim_file_name': opts.trim_file_name,
|
||||
@@ -875,7 +898,6 @@ def parse_options(argv=None):
|
||||
'max_views': opts.max_views,
|
||||
'daterange': opts.date,
|
||||
'cachedir': opts.cachedir,
|
||||
'youtube_print_sig_code': opts.youtube_print_sig_code,
|
||||
'age_limit': opts.age_limit,
|
||||
'download_archive': opts.download_archive,
|
||||
'break_on_existing': opts.break_on_existing,
|
||||
@@ -893,13 +915,9 @@ def parse_options(argv=None):
|
||||
'socket_timeout': opts.socket_timeout,
|
||||
'bidi_workaround': opts.bidi_workaround,
|
||||
'debug_printtraffic': opts.debug_printtraffic,
|
||||
'prefer_ffmpeg': opts.prefer_ffmpeg,
|
||||
'include_ads': opts.include_ads,
|
||||
'default_search': opts.default_search,
|
||||
'dynamic_mpd': opts.dynamic_mpd,
|
||||
'extractor_args': opts.extractor_args,
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
'youtube_include_hls_manifest': opts.youtube_include_hls_manifest,
|
||||
'encoding': opts.encoding,
|
||||
'extract_flat': opts.extract_flat,
|
||||
'live_from_start': opts.live_from_start,
|
||||
@@ -910,7 +928,7 @@ def parse_options(argv=None):
|
||||
'postprocessors': postprocessors,
|
||||
'fixup': opts.fixup,
|
||||
'source_address': opts.source_address,
|
||||
'call_home': opts.call_home,
|
||||
'impersonate': opts.impersonate,
|
||||
'sleep_interval_requests': opts.sleep_interval_requests,
|
||||
'sleep_interval': opts.sleep_interval,
|
||||
'max_sleep_interval': opts.max_sleep_interval,
|
||||
@@ -920,7 +938,6 @@ def parse_options(argv=None):
|
||||
'force_keyframes_at_cuts': opts.force_keyframes_at_cuts,
|
||||
'list_thumbnails': opts.list_thumbnails,
|
||||
'playlist_items': opts.playlist_items,
|
||||
'xattr_set_filesize': opts.xattr_set_filesize,
|
||||
'match_filter': opts.match_filter,
|
||||
'color': opts.color,
|
||||
'ffmpeg_location': opts.ffmpeg_location,
|
||||
@@ -929,11 +946,14 @@ def parse_options(argv=None):
|
||||
'hls_split_discontinuity': opts.hls_split_discontinuity,
|
||||
'external_downloader_args': opts.external_downloader_args,
|
||||
'postprocessor_args': opts.postprocessor_args,
|
||||
'cn_verification_proxy': opts.cn_verification_proxy,
|
||||
'geo_verification_proxy': opts.geo_verification_proxy,
|
||||
'geo_bypass': opts.geo_bypass,
|
||||
'geo_bypass_country': opts.geo_bypass_country,
|
||||
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
||||
'useid': opts.useid or None,
|
||||
'js_runtimes': js_runtimes,
|
||||
'remote_components': opts.remote_components,
|
||||
'warn_when_outdated': opts.update_self is None,
|
||||
'_warnings': warnings,
|
||||
'_deprecation_warnings': deprecation_warnings,
|
||||
'compat_opts': opts.compat_opts,
|
||||
@@ -945,12 +965,6 @@ def _real_main(argv=None):
|
||||
|
||||
parser, opts, all_urls, ydl_opts = parse_options(argv)
|
||||
|
||||
# Dump user agent
|
||||
if opts.dump_user_agent:
|
||||
ua = traverse_obj(opts.headers, 'User-Agent', casesense=False, default=std_headers['User-Agent'])
|
||||
write_string(f'{ua}\n', out=sys.stdout)
|
||||
return
|
||||
|
||||
if print_extractor_information(opts, all_urls):
|
||||
return
|
||||
|
||||
@@ -959,6 +973,11 @@ def _real_main(argv=None):
|
||||
if opts.ffmpeg_location:
|
||||
FFmpegPostProcessor._ffmpeg_location.set(opts.ffmpeg_location)
|
||||
|
||||
# load all plugins into the global lookup
|
||||
plugin_dirs.value = opts.plugin_dirs
|
||||
if plugin_dirs.value:
|
||||
_load_all_plugins()
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
pre_process = opts.update_self or opts.rm_cachedir
|
||||
actual_use = all_urls or opts.load_info_filename
|
||||
@@ -968,22 +987,75 @@ def _real_main(argv=None):
|
||||
|
||||
try:
|
||||
updater = Updater(ydl, opts.update_self)
|
||||
if opts.update_self and updater.update() and actual_use:
|
||||
if updater.cmd:
|
||||
if opts.update_self and updater.update() and actual_use and updater.cmd:
|
||||
return updater.restart()
|
||||
# This code is reachable only for zip variant in py < 3.10
|
||||
# It makes sense to exit here, but the old behavior is to continue
|
||||
ydl.report_warning('Restart yt-dlp to use the updated version')
|
||||
# return 100, 'ERROR: The program must exit for the update to complete'
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
ydl._download_retcode = 100
|
||||
|
||||
if opts.list_impersonate_targets:
|
||||
|
||||
known_targets = [
|
||||
# List of simplified targets we know are supported,
|
||||
# to help users know what dependencies may be required.
|
||||
(ImpersonateTarget('chrome'), 'curl_cffi'),
|
||||
(ImpersonateTarget('safari'), 'curl_cffi'),
|
||||
(ImpersonateTarget('firefox'), 'curl_cffi>=0.10'),
|
||||
(ImpersonateTarget('edge'), 'curl_cffi'),
|
||||
(ImpersonateTarget('tor'), 'curl_cffi>=0.11'),
|
||||
]
|
||||
|
||||
available_targets = ydl._get_available_impersonate_targets()
|
||||
|
||||
def make_row(target, handler):
|
||||
return [
|
||||
join_nonempty(target.client.title(), target.version, delim='-') or '-',
|
||||
join_nonempty((target.os or '').title(), target.os_version, delim='-') or '-',
|
||||
handler,
|
||||
]
|
||||
|
||||
rows = [make_row(target, handler) for target, handler in available_targets]
|
||||
|
||||
for known_target, known_handler in known_targets:
|
||||
if not any(
|
||||
known_target in target and known_handler.startswith(handler)
|
||||
for target, handler in available_targets
|
||||
):
|
||||
rows.insert(0, [
|
||||
ydl._format_out(text, ydl.Styles.SUPPRESS)
|
||||
for text in make_row(known_target, f'{known_handler} (unavailable)')
|
||||
])
|
||||
|
||||
ydl.to_screen('[info] Available impersonate targets')
|
||||
ydl.to_stdout(render_table(['Client', 'OS', 'Source'], rows, extra_gap=2, delim='-'))
|
||||
return
|
||||
|
||||
if not actual_use:
|
||||
if pre_process:
|
||||
return ydl._download_retcode
|
||||
|
||||
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
|
||||
args = sys.argv[1:] if argv is None else argv
|
||||
ydl.warn_if_short_id(args)
|
||||
|
||||
# Show a useful error message and wait for keypress if not launched from shell on Windows
|
||||
if not args and os.name == 'nt' and getattr(sys, 'frozen', False):
|
||||
import ctypes.wintypes
|
||||
import msvcrt
|
||||
|
||||
kernel32 = ctypes.WinDLL('Kernel32')
|
||||
|
||||
buffer = (1 * ctypes.wintypes.DWORD)()
|
||||
attached_processes = kernel32.GetConsoleProcessList(buffer, 1)
|
||||
# If we only have a single process attached, then the executable was double clicked
|
||||
# When using `pyinstaller` with `--onefile`, two processes get attached
|
||||
is_onefile = hasattr(sys, '_MEIPASS') and os.path.basename(sys._MEIPASS).startswith('_MEI')
|
||||
if attached_processes == 1 or (is_onefile and attached_processes == 2):
|
||||
print(parser._generate_error_message(
|
||||
'Do not double-click the executable, instead call it from a command line.\n'
|
||||
'Please read the README for further information on how to use yt-dlp: '
|
||||
'https://github.com/yt-dlp/yt-dlp#readme'))
|
||||
msvcrt.getch()
|
||||
_exit(2)
|
||||
parser.error(
|
||||
'You must provide at least one URL.\n'
|
||||
'Type yt-dlp --help to see a list of all options.')
|
||||
@@ -1002,11 +1074,10 @@ def _real_main(argv=None):
|
||||
|
||||
|
||||
def main(argv=None):
|
||||
global _IN_CLI
|
||||
_IN_CLI = True
|
||||
IN_CLI.value = True
|
||||
try:
|
||||
_exit(*variadic(_real_main(argv)))
|
||||
except DownloadError:
|
||||
except (CookieLoadError, DownloadError):
|
||||
_exit(1)
|
||||
except SameFileError as e:
|
||||
_exit(f'ERROR: {e}')
|
||||
@@ -1023,10 +1094,20 @@ def main(argv=None):
|
||||
|
||||
from .extractor import gen_extractors, list_extractors
|
||||
|
||||
# Register JS runtimes and remote components
|
||||
from .globals import supported_js_runtimes, supported_remote_components
|
||||
supported_js_runtimes.value['deno'] = _DenoJsRuntime
|
||||
supported_js_runtimes.value['node'] = _NodeJsRuntime
|
||||
supported_js_runtimes.value['bun'] = _BunJsRuntime
|
||||
supported_js_runtimes.value['quickjs'] = _QuickJsRuntime
|
||||
|
||||
supported_remote_components.value.append('ejs:github')
|
||||
supported_remote_components.value.append('ejs:npm')
|
||||
|
||||
__all__ = [
|
||||
'main',
|
||||
'YoutubeDL',
|
||||
'parse_options',
|
||||
'gen_extractors',
|
||||
'list_extractors',
|
||||
'main',
|
||||
'parse_options',
|
||||
]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Execute with
|
||||
# $ python -m yt_dlp
|
||||
# $ python3 -m yt_dlp
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import sys
|
||||
|
||||
from PyInstaller.utils.hooks import collect_submodules
|
||||
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
|
||||
|
||||
|
||||
def pycryptodome_module():
|
||||
@@ -10,7 +10,7 @@ def pycryptodome_module():
|
||||
try:
|
||||
import Crypto # noqa: F401
|
||||
print('WARNING: Using Crypto since Cryptodome is not available. '
|
||||
'Install with: pip install pycryptodomex', file=sys.stderr)
|
||||
'Install with: python3 -m pip install pycryptodomex', file=sys.stderr)
|
||||
return 'Crypto'
|
||||
except ImportError:
|
||||
pass
|
||||
@@ -21,12 +21,17 @@ def get_hidden_imports():
|
||||
yield from ('yt_dlp.compat._legacy', 'yt_dlp.compat._deprecated')
|
||||
yield from ('yt_dlp.utils._legacy', 'yt_dlp.utils._deprecated')
|
||||
yield pycryptodome_module()
|
||||
yield from collect_submodules('websockets')
|
||||
# Only `websockets` is required, others are collected just in case
|
||||
for module in ('websockets', 'requests', 'urllib3'):
|
||||
yield from collect_submodules(module)
|
||||
# These are auto-detected, but explicitly add them just in case
|
||||
yield from ('mutagen', 'brotli', 'certifi')
|
||||
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage', 'curl_cffi')
|
||||
|
||||
|
||||
hiddenimports = list(get_hidden_imports())
|
||||
print(f'Adding imports: {hiddenimports}')
|
||||
|
||||
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts']
|
||||
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
|
||||
|
||||
datas = collect_data_files('curl_cffi', includes=['cacert.pem'])
|
||||
datas += collect_data_files('yt_dlp_ejs', includes=['**/*.js'])
|
||||
|
||||
@@ -3,7 +3,6 @@ from math import ceil
|
||||
|
||||
from .compat import compat_ord
|
||||
from .dependencies import Cryptodome
|
||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
||||
|
||||
if Cryptodome.AES:
|
||||
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||
@@ -17,15 +16,15 @@ if Cryptodome.AES:
|
||||
else:
|
||||
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||
""" Decrypt bytes with AES-CBC using native implementation since pycryptodome is unavailable """
|
||||
return intlist_to_bytes(aes_cbc_decrypt(*map(bytes_to_intlist, (data, key, iv))))
|
||||
return bytes(aes_cbc_decrypt(*map(list, (data, key, iv))))
|
||||
|
||||
def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
||||
""" Decrypt bytes with AES-GCM using native implementation since pycryptodome is unavailable """
|
||||
return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce))))
|
||||
return bytes(aes_gcm_decrypt_and_verify(*map(list, (data, key, tag, nonce))))
|
||||
|
||||
|
||||
def aes_cbc_encrypt_bytes(data, key, iv, **kwargs):
|
||||
return intlist_to_bytes(aes_cbc_encrypt(*map(bytes_to_intlist, (data, key, iv)), **kwargs))
|
||||
return bytes(aes_cbc_encrypt(*map(list, (data, key, iv)), **kwargs))
|
||||
|
||||
|
||||
BLOCK_SIZE_BYTES = 16
|
||||
@@ -68,7 +67,7 @@ def pad_block(block, padding_mode):
|
||||
raise NotImplementedError(f'Padding mode {padding_mode} is not implemented')
|
||||
|
||||
if padding_mode == 'iso7816' and padding_size:
|
||||
block = block + [0x80] # NB: += mutates list
|
||||
block = [*block, 0x80] # NB: += mutates list
|
||||
padding_size -= 1
|
||||
|
||||
return block + [PADDING_BYTE[padding_mode]] * padding_size
|
||||
@@ -84,7 +83,7 @@ def aes_ecb_encrypt(data, key, iv=None):
|
||||
@returns {int[]} encrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||
|
||||
encrypted_data = []
|
||||
for i in range(block_count):
|
||||
@@ -104,15 +103,13 @@ def aes_ecb_decrypt(data, key, iv=None):
|
||||
@returns {int[]} decrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||
|
||||
encrypted_data = []
|
||||
for i in range(block_count):
|
||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||
encrypted_data += aes_decrypt(block, expanded_key)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
return encrypted_data[:len(data)]
|
||||
|
||||
|
||||
def aes_ctr_decrypt(data, key, iv):
|
||||
@@ -137,7 +134,7 @@ def aes_ctr_encrypt(data, key, iv):
|
||||
@returns {int[]} encrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||
counter = iter_vector(iv)
|
||||
|
||||
encrypted_data = []
|
||||
@@ -148,9 +145,7 @@ def aes_ctr_encrypt(data, key, iv):
|
||||
|
||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||
encrypted_data += xor(block, cipher_counter_block)
|
||||
encrypted_data = encrypted_data[:len(data)]
|
||||
|
||||
return encrypted_data
|
||||
return encrypted_data[:len(data)]
|
||||
|
||||
|
||||
def aes_cbc_decrypt(data, key, iv):
|
||||
@@ -163,7 +158,7 @@ def aes_cbc_decrypt(data, key, iv):
|
||||
@returns {int[]} decrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||
|
||||
decrypted_data = []
|
||||
previous_cipher_block = iv
|
||||
@@ -174,9 +169,7 @@ def aes_cbc_decrypt(data, key, iv):
|
||||
decrypted_block = aes_decrypt(block, expanded_key)
|
||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||
previous_cipher_block = block
|
||||
decrypted_data = decrypted_data[:len(data)]
|
||||
|
||||
return decrypted_data
|
||||
return decrypted_data[:len(data)]
|
||||
|
||||
|
||||
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
||||
@@ -190,7 +183,7 @@ def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
||||
@returns {int[]} encrypted data
|
||||
"""
|
||||
expanded_key = key_expansion(key)
|
||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
||||
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||
|
||||
encrypted_data = []
|
||||
previous_cipher_block = iv
|
||||
@@ -224,10 +217,10 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
||||
|
||||
if len(nonce) == 12:
|
||||
j0 = nonce + [0, 0, 0, 1]
|
||||
j0 = [*nonce, 0, 0, 0, 1]
|
||||
else:
|
||||
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
||||
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
|
||||
ghash_in = nonce + [0] * fill + list((8 * len(nonce)).to_bytes(8, 'big'))
|
||||
j0 = ghash(hash_subkey, ghash_in)
|
||||
|
||||
# TODO: add nonce support to aes_ctr_decrypt
|
||||
@@ -236,17 +229,17 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
||||
iv_ctr = inc(j0)
|
||||
|
||||
decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr)))
|
||||
pad_len = len(data) // 16 * 16
|
||||
pad_len = (BLOCK_SIZE_BYTES - (len(data) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES
|
||||
s_tag = ghash(
|
||||
hash_subkey,
|
||||
data
|
||||
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
|
||||
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
|
||||
+ ((len(data) * 8).to_bytes(8, 'big'))) # length of data
|
||||
+ [0] * pad_len # pad
|
||||
+ list((0 * 8).to_bytes(8, 'big') # length of associated data
|
||||
+ ((len(data) * 8).to_bytes(8, 'big'))), # length of data
|
||||
)
|
||||
|
||||
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
||||
raise ValueError("Mismatching authentication tag")
|
||||
raise ValueError('Mismatching authentication tag')
|
||||
|
||||
return decrypted_data
|
||||
|
||||
@@ -288,9 +281,7 @@ def aes_decrypt(data, expanded_key):
|
||||
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
||||
data = shift_rows_inv(data)
|
||||
data = sub_bytes_inv(data)
|
||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
|
||||
return data
|
||||
return xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||
|
||||
|
||||
def aes_decrypt_text(data, password, key_size_bytes):
|
||||
@@ -308,8 +299,8 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
"""
|
||||
NONCE_LENGTH_BYTES = 8
|
||||
|
||||
data = bytes_to_intlist(base64.b64decode(data))
|
||||
password = bytes_to_intlist(password.encode())
|
||||
data = list(base64.b64decode(data))
|
||||
password = list(password.encode())
|
||||
|
||||
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
||||
@@ -318,9 +309,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
||||
plaintext = intlist_to_bytes(decrypted_data)
|
||||
|
||||
return plaintext
|
||||
return bytes(decrypted_data)
|
||||
|
||||
|
||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
||||
@@ -428,9 +417,7 @@ def key_expansion(data):
|
||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||
temp = data[-4:]
|
||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||
data = data[:expanded_key_size_bytes]
|
||||
|
||||
return data
|
||||
return data[:expanded_key_size_bytes]
|
||||
|
||||
|
||||
def iter_vector(iv):
|
||||
@@ -448,7 +435,7 @@ def sub_bytes_inv(data):
|
||||
|
||||
|
||||
def rotate(data):
|
||||
return data[1:] + [data[0]]
|
||||
return [*data[1:], data[0]]
|
||||
|
||||
|
||||
def key_schedule_core(data, rcon_iteration):
|
||||
@@ -460,7 +447,7 @@ def key_schedule_core(data, rcon_iteration):
|
||||
|
||||
|
||||
def xor(data1, data2):
|
||||
return [x ^ y for x, y in zip(data1, data2)]
|
||||
return [x ^ y for x, y in zip(data1, data2, strict=False)]
|
||||
|
||||
|
||||
def iter_mix_columns(data, matrix):
|
||||
@@ -511,7 +498,7 @@ def block_product(block_x, block_y):
|
||||
# NIST SP 800-38D, Algorithm 1
|
||||
|
||||
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
||||
raise ValueError("Length of blocks need to be %d bytes" % BLOCK_SIZE_BYTES)
|
||||
raise ValueError(f'Length of blocks need to be {BLOCK_SIZE_BYTES} bytes')
|
||||
|
||||
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
||||
block_v = block_y[:]
|
||||
@@ -534,7 +521,7 @@ def ghash(subkey, data):
|
||||
# NIST SP 800-38D, Algorithm 2
|
||||
|
||||
if len(data) % BLOCK_SIZE_BYTES:
|
||||
raise ValueError("Length of data should be %d bytes" % BLOCK_SIZE_BYTES)
|
||||
raise ValueError(f'Length of data should be {BLOCK_SIZE_BYTES} bytes')
|
||||
|
||||
last_y = [0] * BLOCK_SIZE_BYTES
|
||||
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
||||
@@ -547,19 +534,17 @@ def ghash(subkey, data):
|
||||
__all__ = [
|
||||
'aes_cbc_decrypt',
|
||||
'aes_cbc_decrypt_bytes',
|
||||
'aes_ctr_decrypt',
|
||||
'aes_decrypt_text',
|
||||
'aes_decrypt',
|
||||
'aes_ecb_decrypt',
|
||||
'aes_gcm_decrypt_and_verify',
|
||||
'aes_gcm_decrypt_and_verify_bytes',
|
||||
|
||||
'aes_cbc_encrypt',
|
||||
'aes_cbc_encrypt_bytes',
|
||||
'aes_ctr_decrypt',
|
||||
'aes_ctr_encrypt',
|
||||
'aes_decrypt',
|
||||
'aes_decrypt_text',
|
||||
'aes_ecb_decrypt',
|
||||
'aes_ecb_encrypt',
|
||||
'aes_encrypt',
|
||||
|
||||
'aes_gcm_decrypt_and_verify',
|
||||
'aes_gcm_decrypt_and_verify_bytes',
|
||||
'key_expansion',
|
||||
'pad_block',
|
||||
'pkcs7_padding',
|
||||
|
||||
@@ -81,10 +81,10 @@ class Cache:
|
||||
|
||||
cachedir = self._get_root_dir()
|
||||
if not any((term in cachedir) for term in ('cache', 'tmp')):
|
||||
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
|
||||
raise Exception(f'Not removing directory {cachedir} - this does not look like a cache dir')
|
||||
|
||||
self._ydl.to_screen(
|
||||
'Removing cache dir %s .' % cachedir, skip_eol=True)
|
||||
f'Removing cache dir {cachedir} .', skip_eol=True)
|
||||
if os.path.exists(cachedir):
|
||||
self._ydl.to_screen('.', skip_eol=True)
|
||||
shutil.rmtree(cachedir)
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
import warnings
|
||||
|
||||
warnings.warn(DeprecationWarning(f'{__name__} is deprecated'))
|
||||
|
||||
casefold = str.casefold
|
||||
@@ -1,5 +1,5 @@
|
||||
import datetime as dt
|
||||
import os
|
||||
import sys
|
||||
import xml.etree.ElementTree as etree
|
||||
|
||||
from .compat_utils import passthrough_module
|
||||
@@ -24,36 +24,21 @@ def compat_etree_fromstring(text):
|
||||
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
|
||||
|
||||
|
||||
compat_os_name = os._name if os.name == 'java' else os.name
|
||||
|
||||
|
||||
if compat_os_name == 'nt':
|
||||
def compat_shlex_quote(s):
|
||||
import re
|
||||
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
|
||||
else:
|
||||
from shlex import quote as compat_shlex_quote # noqa: F401
|
||||
|
||||
|
||||
def compat_ord(c):
|
||||
return c if isinstance(c, int) else ord(c)
|
||||
|
||||
|
||||
if compat_os_name == 'nt' and sys.version_info < (3, 8):
|
||||
# os.path.realpath on Windows does not follow symbolic links
|
||||
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
|
||||
def compat_realpath(path):
|
||||
while os.path.islink(path):
|
||||
path = os.path.abspath(os.readlink(path))
|
||||
return os.path.realpath(path)
|
||||
else:
|
||||
compat_realpath = os.path.realpath
|
||||
def compat_datetime_from_timestamp(timestamp):
|
||||
# Calling dt.datetime.fromtimestamp with negative timestamps throws error in Windows
|
||||
# Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/81708,
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
|
||||
return (dt.datetime.fromtimestamp(0, dt.timezone.utc) + dt.timedelta(seconds=timestamp))
|
||||
|
||||
|
||||
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
||||
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
||||
if compat_os_name in ('nt', 'ce'):
|
||||
if os.name in ('nt', 'ce'):
|
||||
def compat_expanduser(path):
|
||||
HOME = os.environ.get('HOME')
|
||||
if not HOME:
|
||||
|
||||
@@ -8,16 +8,14 @@ passthrough_module(__name__, '.._legacy', callback=lambda attr: warnings.warn(
|
||||
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=6))
|
||||
del passthrough_module
|
||||
|
||||
import base64
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import functools # noqa: F401
|
||||
import os
|
||||
|
||||
compat_str = str
|
||||
|
||||
compat_b64decode = base64.b64decode
|
||||
compat_os_name = os.name
|
||||
compat_realpath = os.path.realpath
|
||||
|
||||
compat_urlparse = urllib.parse
|
||||
compat_parse_qs = urllib.parse.parse_qs
|
||||
compat_urllib_parse_unquote = urllib.parse.unquote
|
||||
compat_urllib_parse_urlencode = urllib.parse.urlencode
|
||||
compat_urllib_parse_urlparse = urllib.parse.urlparse
|
||||
|
||||
def compat_shlex_quote(s):
|
||||
from ..utils import shell_quote
|
||||
return shell_quote(s)
|
||||
|
||||
@@ -30,13 +30,14 @@ from asyncio import run as compat_asyncio_run # noqa: F401
|
||||
from re import Pattern as compat_Pattern # noqa: F401
|
||||
from re import match as compat_Match # noqa: F401
|
||||
|
||||
from . import compat_expanduser, compat_HTMLParseError, compat_realpath
|
||||
from . import compat_expanduser, compat_HTMLParseError
|
||||
from .compat_utils import passthrough_module
|
||||
from ..dependencies import brotli as compat_brotli # noqa: F401
|
||||
from ..dependencies import websockets as compat_websockets # noqa: F401
|
||||
from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401
|
||||
from ..networking.exceptions import HTTPError as compat_HTTPError
|
||||
|
||||
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))
|
||||
passthrough_module(__name__, '...utils', ('windows_enable_vt_mode',))
|
||||
|
||||
|
||||
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
|
||||
@@ -70,7 +71,6 @@ compat_html_parser_HTMLParseError = compat_HTMLParseError
|
||||
compat_HTMLParser = compat_html_parser_HTMLParser = html.parser.HTMLParser
|
||||
compat_http_client = http.client
|
||||
compat_http_server = http.server
|
||||
compat_HTTPError = urllib.error.HTTPError
|
||||
compat_input = input
|
||||
compat_integer_types = (int, )
|
||||
compat_itertools_count = itertools.count
|
||||
@@ -78,7 +78,7 @@ compat_kwargs = lambda kwargs: kwargs
|
||||
compat_map = map
|
||||
compat_numeric_types = (int, float, complex)
|
||||
compat_os_path_expanduser = compat_expanduser
|
||||
compat_os_path_realpath = compat_realpath
|
||||
compat_os_path_realpath = os.path.realpath
|
||||
compat_print = print
|
||||
compat_shlex_split = shlex.split
|
||||
compat_socket_create_connection = socket.create_connection
|
||||
@@ -88,7 +88,7 @@ compat_struct_unpack = struct.unpack
|
||||
compat_subprocess_get_DEVNULL = lambda: subprocess.DEVNULL
|
||||
compat_tokenize_tokenize = tokenize.tokenize
|
||||
compat_urllib_error = urllib.error
|
||||
compat_urllib_HTTPError = urllib.error.HTTPError
|
||||
compat_urllib_HTTPError = compat_HTTPError
|
||||
compat_urllib_parse = urllib.parse
|
||||
compat_urllib_parse_parse_qs = urllib.parse.parse_qs
|
||||
compat_urllib_parse_quote = urllib.parse.quote
|
||||
@@ -104,5 +104,12 @@ compat_xml_parse_error = compat_xml_etree_ElementTree_ParseError = etree.ParseEr
|
||||
compat_xpath = lambda xpath: xpath
|
||||
compat_zip = zip
|
||||
workaround_optparse_bug9161 = lambda: None
|
||||
compat_str = str
|
||||
compat_b64decode = base64.b64decode
|
||||
compat_urlparse = urllib.parse
|
||||
compat_parse_qs = urllib.parse.parse_qs
|
||||
compat_urllib_parse_unquote = urllib.parse.unquote
|
||||
compat_urllib_parse_urlencode = urllib.parse.urlencode
|
||||
compat_urllib_parse_urlparse = urllib.parse.urlparse
|
||||
|
||||
legacy = []
|
||||
|
||||
@@ -15,7 +15,7 @@ def get_package_info(module):
|
||||
name=getattr(module, '_yt_dlp__identifier', module.__name__),
|
||||
version=str(next(filter(None, (
|
||||
getattr(module, attr, None)
|
||||
for attr in ('__version__', 'version_string', 'version')
|
||||
for attr in ('_yt_dlp__version', '__version__', 'version_string', 'version')
|
||||
)), None)))
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ def passthrough_module(parent, child, allowed_attributes=(..., ), *, callback=la
|
||||
callback(attr)
|
||||
return ret
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
@functools.cache
|
||||
def from_child(attr):
|
||||
nonlocal child
|
||||
if attr not in allowed_attributes:
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
# flake8: noqa: F405
|
||||
from functools import * # noqa: F403
|
||||
|
||||
from .compat_utils import passthrough_module
|
||||
|
||||
passthrough_module(__name__, 'functools')
|
||||
del passthrough_module
|
||||
|
||||
try:
|
||||
cache # >= 3.9
|
||||
except NameError:
|
||||
cache = lru_cache(maxsize=None)
|
||||
|
||||
try:
|
||||
cached_property # >= 3.8
|
||||
except NameError:
|
||||
class cached_property:
|
||||
def __init__(self, func):
|
||||
update_wrapper(self, func)
|
||||
self.func = func
|
||||
|
||||
def __get__(self, instance, _):
|
||||
if instance is None:
|
||||
return self
|
||||
setattr(instance, self.func.__name__, self.func(instance))
|
||||
return getattr(instance, self.func.__name__)
|
||||
@@ -1,16 +1,22 @@
|
||||
tests = {
|
||||
'webp': lambda h: h[0:4] == b'RIFF' and h[8:] == b'WEBP',
|
||||
'png': lambda h: h[:8] == b'\211PNG\r\n\032\n',
|
||||
'jpeg': lambda h: h[6:10] in (b'JFIF', b'Exif'),
|
||||
'gif': lambda h: h[:6] in (b'GIF87a', b'GIF89a'),
|
||||
}
|
||||
|
||||
|
||||
def what(file=None, h=None):
|
||||
"""Detect format of image (Currently supports jpeg, png, webp, gif only)
|
||||
Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py
|
||||
Ref: https://github.com/python/cpython/blob/3.11/Lib/imghdr.py
|
||||
Ref: https://www.w3.org/Graphics/JPEG/itu-t81.pdf
|
||||
"""
|
||||
if h is None:
|
||||
with open(file, 'rb') as f:
|
||||
h = f.read(12)
|
||||
return next((type_ for type_, test in tests.items() if test(h)), None)
|
||||
|
||||
if h.startswith(b'RIFF') and h.startswith(b'WEBP', 8):
|
||||
return 'webp'
|
||||
|
||||
if h.startswith(b'\x89PNG'):
|
||||
return 'png'
|
||||
|
||||
if h.startswith(b'\xFF\xD8\xFF'):
|
||||
return 'jpeg'
|
||||
|
||||
if h.startswith(b'GIF'):
|
||||
return 'gif'
|
||||
|
||||
return None
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
# flake8: noqa: F405
|
||||
from types import * # noqa: F403
|
||||
|
||||
from .compat_utils import passthrough_module
|
||||
|
||||
passthrough_module(__name__, 'types')
|
||||
del passthrough_module
|
||||
|
||||
try:
|
||||
# NB: pypy has builtin NoneType, so checking NameError won't work
|
||||
from types import NoneType # >= 3.10
|
||||
except ImportError:
|
||||
NoneType = type(None)
|
||||
@@ -1,7 +1,7 @@
|
||||
# flake8: noqa: F405
|
||||
from urllib import * # noqa: F403
|
||||
|
||||
del request
|
||||
del request # noqa: F821
|
||||
from . import request # noqa: F401
|
||||
|
||||
from ..compat_utils import passthrough_module
|
||||
|
||||
@@ -7,13 +7,13 @@ passthrough_module(__name__, 'urllib.request')
|
||||
del passthrough_module
|
||||
|
||||
|
||||
from .. import compat_os_name
|
||||
import os
|
||||
|
||||
if compat_os_name == 'nt':
|
||||
# On older python versions, proxies are extracted from Windows registry erroneously. [1]
|
||||
if os.name == 'nt':
|
||||
# On older Python versions, proxies are extracted from Windows registry erroneously. [1]
|
||||
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
||||
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
||||
# it to http on these older python versions to avoid issues
|
||||
# it to http on these older Python versions to avoid issues
|
||||
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
||||
# 1: https://github.com/python/cpython/issues/86793
|
||||
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
||||
@@ -22,12 +22,8 @@ if compat_os_name == 'nt':
|
||||
|
||||
def getproxies_registry_patched():
|
||||
proxies = getproxies_registry()
|
||||
if (
|
||||
sys.version_info >= (3, 10, 5) # https://docs.python.org/3.10/whatsnew/changelog.html#python-3-10-5-final
|
||||
or (3, 9, 13) <= sys.version_info < (3, 10) # https://docs.python.org/3.9/whatsnew/changelog.html#python-3-9-13-final
|
||||
):
|
||||
return proxies
|
||||
|
||||
if sys.version_info < (3, 10, 5): # https://docs.python.org/3.10/whatsnew/changelog.html#python-3-10-5-final
|
||||
for scheme in ('https', 'ftp'):
|
||||
if scheme in proxies and proxies[scheme].startswith(f'{scheme}://'):
|
||||
proxies[scheme] = 'http' + proxies[scheme][len(scheme):]
|
||||
@@ -37,4 +33,4 @@ if compat_os_name == 'nt':
|
||||
def getproxies():
|
||||
return getproxies_environment() or getproxies_registry_patched()
|
||||
|
||||
del compat_os_name
|
||||
del os
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import base64
|
||||
import collections
|
||||
import contextlib
|
||||
import datetime as dt
|
||||
import functools
|
||||
import glob
|
||||
import hashlib
|
||||
import http.cookiejar
|
||||
import http.cookies
|
||||
import io
|
||||
@@ -14,16 +18,13 @@ import sys
|
||||
import tempfile
|
||||
import time
|
||||
import urllib.request
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from enum import Enum, auto
|
||||
from hashlib import pbkdf2_hmac
|
||||
|
||||
from .aes import (
|
||||
aes_cbc_decrypt_bytes,
|
||||
aes_gcm_decrypt_and_verify_bytes,
|
||||
unpad_pkcs7,
|
||||
)
|
||||
from .compat import functools
|
||||
from .dependencies import (
|
||||
_SECRETSTORAGE_UNAVAILABLE_REASON,
|
||||
secretstorage,
|
||||
@@ -31,6 +32,8 @@ from .dependencies import (
|
||||
)
|
||||
from .minicurses import MultilinePrinter, QuietMultilinePrinter
|
||||
from .utils import (
|
||||
DownloadError,
|
||||
YoutubeDLError,
|
||||
Popen,
|
||||
error_to_str,
|
||||
expand_path,
|
||||
@@ -43,7 +46,7 @@ from .utils import (
|
||||
from .utils._utils import _YDLLogger
|
||||
from .utils.networking import normalize_url
|
||||
|
||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi', 'whale'}
|
||||
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
||||
|
||||
|
||||
@@ -83,7 +86,12 @@ def _create_progress_bar(logger):
|
||||
return printer
|
||||
|
||||
|
||||
class CookieLoadError(YoutubeDLError):
|
||||
pass
|
||||
|
||||
|
||||
def load_cookies(cookie_file, browser_specification, ydl):
|
||||
try:
|
||||
cookie_jars = []
|
||||
if browser_specification is not None:
|
||||
browser_name, profile, keyring, container = _parse_browser_specification(*browser_specification)
|
||||
@@ -101,6 +109,8 @@ def load_cookies(cookie_file, browser_specification, ydl):
|
||||
cookie_jars.append(jar)
|
||||
|
||||
return _merge_cookie_jars(cookie_jars)
|
||||
except Exception:
|
||||
raise CookieLoadError('failed to load cookies')
|
||||
|
||||
|
||||
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None, container=None):
|
||||
@@ -115,20 +125,23 @@ def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(),
|
||||
|
||||
|
||||
def _extract_firefox_cookies(profile, container, logger):
|
||||
MAX_SUPPORTED_DB_SCHEMA_VERSION = 17
|
||||
|
||||
logger.info('Extracting cookies from firefox')
|
||||
if not sqlite3:
|
||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||
'Please use a python interpreter compiled with sqlite3 support')
|
||||
'Please use a Python interpreter compiled with sqlite3 support')
|
||||
return YoutubeDLCookieJar()
|
||||
|
||||
if profile is None:
|
||||
search_root = _firefox_browser_dir()
|
||||
search_roots = list(_firefox_browser_dirs())
|
||||
elif _is_path(profile):
|
||||
search_root = profile
|
||||
search_roots = [profile]
|
||||
else:
|
||||
search_root = os.path.join(_firefox_browser_dir(), profile)
|
||||
search_roots = [os.path.join(path, profile) for path in _firefox_browser_dirs()]
|
||||
search_root = ', '.join(map(repr, search_roots))
|
||||
|
||||
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
|
||||
cookie_database_path = _newest(_firefox_cookie_dbs(search_roots))
|
||||
if cookie_database_path is None:
|
||||
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
|
||||
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
||||
@@ -142,15 +155,19 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||
identities = json.load(containers).get('identities', [])
|
||||
container_id = next((context.get('userContextId') for context in identities if container in (
|
||||
context.get('name'),
|
||||
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group())
|
||||
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()),
|
||||
)), None)
|
||||
if not isinstance(container_id, int):
|
||||
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||
cursor = None
|
||||
try:
|
||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||
with contextlib.closing(cursor.connection):
|
||||
db_schema_version = cursor.execute('PRAGMA user_version;').fetchone()[0]
|
||||
if db_schema_version > MAX_SUPPORTED_DB_SCHEMA_VERSION:
|
||||
logger.warning(f'Possibly unsupported firefox cookies database version: {db_schema_version}')
|
||||
else:
|
||||
logger.debug(f'Firefox cookies database version: {db_schema_version}')
|
||||
if isinstance(container_id, int):
|
||||
logger.debug(
|
||||
f'Only loading cookies from firefox container "{container}", ID {container_id}')
|
||||
@@ -169,6 +186,10 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||
total_cookie_count = len(table)
|
||||
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
|
||||
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
|
||||
# FF142 upgraded cookies DB to schema version 16 and started using milliseconds for cookie expiry
|
||||
# Ref: https://github.com/mozilla-firefox/firefox/commit/5869af852cd20425165837f6c2d9971f3efba83d
|
||||
if db_schema_version >= 16 and expiry is not None:
|
||||
expiry /= 1000
|
||||
cookie = http.cookiejar.Cookie(
|
||||
version=0, name=name, value=value, port=None, port_specified=False,
|
||||
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
||||
@@ -177,17 +198,37 @@ def _extract_firefox_cookies(profile, container, logger):
|
||||
jar.set_cookie(cookie)
|
||||
logger.info(f'Extracted {len(jar)} cookies from firefox')
|
||||
return jar
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.connection.close()
|
||||
|
||||
|
||||
def _firefox_browser_dir():
|
||||
def _firefox_browser_dirs():
|
||||
if sys.platform in ('cygwin', 'win32'):
|
||||
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
|
||||
yield from map(os.path.expandvars, (
|
||||
R'%APPDATA%\Mozilla\Firefox\Profiles',
|
||||
R'%LOCALAPPDATA%\Packages\Mozilla.Firefox_n80bbvh6b1yt2\LocalCache\Roaming\Mozilla\Firefox\Profiles',
|
||||
))
|
||||
|
||||
elif sys.platform == 'darwin':
|
||||
return os.path.expanduser('~/Library/Application Support/Firefox')
|
||||
return os.path.expanduser('~/.mozilla/firefox')
|
||||
yield os.path.expanduser('~/Library/Application Support/Firefox/Profiles')
|
||||
|
||||
else:
|
||||
yield from map(os.path.expanduser, (
|
||||
# New installations of FF147+ respect the XDG base directory specification
|
||||
# Ref: https://bugzilla.mozilla.org/show_bug.cgi?id=259356
|
||||
os.path.join(_config_home(), 'mozilla/firefox'),
|
||||
# Existing FF version<=146 installations
|
||||
'~/.mozilla/firefox',
|
||||
# Flatpak XDG: https://docs.flatpak.org/en/latest/conventions.html#xdg-base-directories
|
||||
'~/.var/app/org.mozilla.firefox/config/mozilla/firefox',
|
||||
'~/.var/app/org.mozilla.firefox/.mozilla/firefox',
|
||||
# Snap installations do not respect the XDG base directory specification
|
||||
'~/snap/firefox/common/.mozilla/firefox',
|
||||
))
|
||||
|
||||
|
||||
def _firefox_cookie_dbs(roots):
|
||||
for root in map(os.path.abspath, roots):
|
||||
for pattern in ('', '*/', 'Profiles/*/'):
|
||||
yield from glob.iglob(os.path.join(root, pattern, 'cookies.sqlite'))
|
||||
|
||||
|
||||
def _get_chromium_based_browser_settings(browser_name):
|
||||
@@ -202,6 +243,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
|
||||
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
|
||||
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
|
||||
'whale': os.path.join(appdata_local, R'Naver\Naver Whale\User Data'),
|
||||
}[browser_name]
|
||||
|
||||
elif sys.platform == 'darwin':
|
||||
@@ -213,6 +255,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
||||
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
||||
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
||||
'whale': os.path.join(appdata, 'Naver/Whale'),
|
||||
}[browser_name]
|
||||
|
||||
else:
|
||||
@@ -224,6 +267,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||
'edge': os.path.join(config, 'microsoft-edge'),
|
||||
'opera': os.path.join(config, 'opera'),
|
||||
'vivaldi': os.path.join(config, 'vivaldi'),
|
||||
'whale': os.path.join(config, 'naver-whale'),
|
||||
}[browser_name]
|
||||
|
||||
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
||||
@@ -235,6 +279,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
||||
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
||||
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
||||
'whale': 'Whale',
|
||||
}[browser_name]
|
||||
|
||||
browsers_without_profiles = {'opera'}
|
||||
@@ -242,7 +287,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
||||
return {
|
||||
'browser_dir': browser_dir,
|
||||
'keyring_name': keyring_name,
|
||||
'supports_profiles': browser_name not in browsers_without_profiles
|
||||
'supports_profiles': browser_name not in browsers_without_profiles,
|
||||
}
|
||||
|
||||
|
||||
@@ -251,7 +296,7 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
||||
|
||||
if not sqlite3:
|
||||
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
||||
'Please use a python interpreter compiled with sqlite3 support')
|
||||
'Please use a Python interpreter compiled with sqlite3 support')
|
||||
return YoutubeDLCookieJar()
|
||||
|
||||
config = _get_chromium_based_browser_settings(browser_name)
|
||||
@@ -268,17 +313,23 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
||||
logger.error(f'{browser_name} does not support profiles')
|
||||
search_root = config['browser_dir']
|
||||
|
||||
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
|
||||
cookie_database_path = _newest(_find_files(search_root, 'Cookies', logger))
|
||||
if cookie_database_path is None:
|
||||
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
|
||||
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
||||
|
||||
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
|
||||
|
||||
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||
cursor = None
|
||||
try:
|
||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||
|
||||
# meta_version is necessary to determine if we need to trim the hash prefix from the cookies
|
||||
# Ref: https://chromium.googlesource.com/chromium/src/+/b02dcebd7cafab92770734dc2bc317bd07f1d891/net/extras/sqlite/sqlite_persistent_cookie_store.cc#223
|
||||
meta_version = int(cursor.execute('SELECT value FROM meta WHERE key = "version"').fetchone()[0])
|
||||
decryptor = get_cookie_decryptor(
|
||||
config['browser_dir'], config['keyring_name'], logger,
|
||||
keyring=keyring, meta_version=meta_version)
|
||||
|
||||
cursor.connection.text_factory = bytes
|
||||
column_names = _get_column_names(cursor, 'cookies')
|
||||
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
|
||||
@@ -307,6 +358,12 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
||||
counts['unencrypted'] = unencrypted_cookies
|
||||
logger.debug(f'cookie version breakdown: {counts}')
|
||||
return jar
|
||||
except PermissionError as error:
|
||||
if os.name == 'nt' and error.errno == 13:
|
||||
message = 'Could not copy Chrome cookie database. See https://github.com/yt-dlp/yt-dlp/issues/7271 for more info'
|
||||
logger.error(message)
|
||||
raise DownloadError(message) # force exit
|
||||
raise
|
||||
finally:
|
||||
if cursor is not None:
|
||||
cursor.connection.close()
|
||||
@@ -324,6 +381,11 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa
|
||||
if value is None:
|
||||
return is_encrypted, None
|
||||
|
||||
# In chrome, session cookies have expires_utc set to 0
|
||||
# In our cookie-store, cookies that do not expire should have expires set to None
|
||||
if not expires_utc:
|
||||
expires_utc = None
|
||||
|
||||
return is_encrypted, http.cookiejar.Cookie(
|
||||
version=0, name=name, value=value, port=None, port_specified=False,
|
||||
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
||||
@@ -365,22 +427,23 @@ class ChromeCookieDecryptor:
|
||||
raise NotImplementedError('Must be implemented by sub classes')
|
||||
|
||||
|
||||
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
|
||||
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None, meta_version=None):
|
||||
if sys.platform == 'darwin':
|
||||
return MacChromeCookieDecryptor(browser_keyring_name, logger)
|
||||
return MacChromeCookieDecryptor(browser_keyring_name, logger, meta_version=meta_version)
|
||||
elif sys.platform in ('win32', 'cygwin'):
|
||||
return WindowsChromeCookieDecryptor(browser_root, logger)
|
||||
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
|
||||
return WindowsChromeCookieDecryptor(browser_root, logger, meta_version=meta_version)
|
||||
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring, meta_version=meta_version)
|
||||
|
||||
|
||||
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_keyring_name, logger, *, keyring=None):
|
||||
def __init__(self, browser_keyring_name, logger, *, keyring=None, meta_version=None):
|
||||
self._logger = logger
|
||||
self._v10_key = self.derive_key(b'peanuts')
|
||||
self._empty_key = self.derive_key(b'')
|
||||
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
|
||||
self._browser_keyring_name = browser_keyring_name
|
||||
self._keyring = keyring
|
||||
self._meta_version = meta_version or 0
|
||||
|
||||
@functools.cached_property
|
||||
def _v11_key(self):
|
||||
@@ -409,14 +472,18 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
|
||||
if version == b'v10':
|
||||
self._cookie_counts['v10'] += 1
|
||||
return _decrypt_aes_cbc_multi(ciphertext, (self._v10_key, self._empty_key), self._logger)
|
||||
return _decrypt_aes_cbc_multi(
|
||||
ciphertext, (self._v10_key, self._empty_key), self._logger,
|
||||
hash_prefix=self._meta_version >= 24)
|
||||
|
||||
elif version == b'v11':
|
||||
self._cookie_counts['v11'] += 1
|
||||
if self._v11_key is None:
|
||||
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
|
||||
return None
|
||||
return _decrypt_aes_cbc_multi(ciphertext, (self._v11_key, self._empty_key), self._logger)
|
||||
return _decrypt_aes_cbc_multi(
|
||||
ciphertext, (self._v11_key, self._empty_key), self._logger,
|
||||
hash_prefix=self._meta_version >= 24)
|
||||
|
||||
else:
|
||||
self._logger.warning(f'unknown cookie version: "{version}"', only_once=True)
|
||||
@@ -425,11 +492,12 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
|
||||
|
||||
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_keyring_name, logger):
|
||||
def __init__(self, browser_keyring_name, logger, meta_version=None):
|
||||
self._logger = logger
|
||||
password = _get_mac_keyring_password(browser_keyring_name, logger)
|
||||
self._v10_key = None if password is None else self.derive_key(password)
|
||||
self._cookie_counts = {'v10': 0, 'other': 0}
|
||||
self._meta_version = meta_version or 0
|
||||
|
||||
@staticmethod
|
||||
def derive_key(password):
|
||||
@@ -447,7 +515,8 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||
return None
|
||||
|
||||
return _decrypt_aes_cbc_multi(ciphertext, (self._v10_key,), self._logger)
|
||||
return _decrypt_aes_cbc_multi(
|
||||
ciphertext, (self._v10_key,), self._logger, hash_prefix=self._meta_version >= 24)
|
||||
|
||||
else:
|
||||
self._cookie_counts['other'] += 1
|
||||
@@ -457,10 +526,11 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
|
||||
|
||||
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
def __init__(self, browser_root, logger):
|
||||
def __init__(self, browser_root, logger, meta_version=None):
|
||||
self._logger = logger
|
||||
self._v10_key = _get_windows_v10_key(browser_root, logger)
|
||||
self._cookie_counts = {'v10': 0, 'other': 0}
|
||||
self._meta_version = meta_version or 0
|
||||
|
||||
def decrypt(self, encrypted_value):
|
||||
version = encrypted_value[:3]
|
||||
@@ -484,7 +554,9 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
|
||||
authentication_tag = raw_ciphertext[-authentication_tag_length:]
|
||||
|
||||
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
|
||||
return _decrypt_aes_gcm(
|
||||
ciphertext, self._v10_key, nonce, authentication_tag, self._logger,
|
||||
hash_prefix=self._meta_version >= 24)
|
||||
|
||||
else:
|
||||
self._cookie_counts['other'] += 1
|
||||
@@ -494,7 +566,7 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||
|
||||
|
||||
def _extract_safari_cookies(profile, logger):
|
||||
if sys.platform != 'darwin':
|
||||
if sys.platform not in ('darwin', 'ios'):
|
||||
raise ValueError(f'unsupported platform: {sys.platform}')
|
||||
|
||||
if profile:
|
||||
@@ -575,7 +647,7 @@ class DataParser:
|
||||
|
||||
|
||||
def _mac_absolute_time_to_posix(timestamp):
|
||||
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
|
||||
return int((dt.datetime(2001, 1, 1, 0, 0, tzinfo=dt.timezone.utc) + dt.timedelta(seconds=timestamp)).timestamp())
|
||||
|
||||
|
||||
def _parse_safari_cookies_header(data, logger):
|
||||
@@ -706,22 +778,21 @@ def _get_linux_desktop_environment(env, logger):
|
||||
GetDesktopEnvironment
|
||||
"""
|
||||
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
|
||||
desktop_session = env.get('DESKTOP_SESSION', None)
|
||||
desktop_session = env.get('DESKTOP_SESSION', '')
|
||||
if xdg_current_desktop is not None:
|
||||
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
|
||||
|
||||
if xdg_current_desktop == 'Unity':
|
||||
if desktop_session is not None and 'gnome-fallback' in desktop_session:
|
||||
for part in map(str.strip, xdg_current_desktop.split(':')):
|
||||
if part == 'Unity':
|
||||
if 'gnome-fallback' in desktop_session:
|
||||
return _LinuxDesktopEnvironment.GNOME
|
||||
else:
|
||||
return _LinuxDesktopEnvironment.UNITY
|
||||
elif xdg_current_desktop == 'Deepin':
|
||||
elif part == 'Deepin':
|
||||
return _LinuxDesktopEnvironment.DEEPIN
|
||||
elif xdg_current_desktop == 'GNOME':
|
||||
elif part == 'GNOME':
|
||||
return _LinuxDesktopEnvironment.GNOME
|
||||
elif xdg_current_desktop == 'X-Cinnamon':
|
||||
elif part == 'X-Cinnamon':
|
||||
return _LinuxDesktopEnvironment.CINNAMON
|
||||
elif xdg_current_desktop == 'KDE':
|
||||
elif part == 'KDE':
|
||||
kde_version = env.get('KDE_SESSION_VERSION', None)
|
||||
if kde_version == '5':
|
||||
return _LinuxDesktopEnvironment.KDE5
|
||||
@@ -732,18 +803,16 @@ def _get_linux_desktop_environment(env, logger):
|
||||
else:
|
||||
logger.info(f'unknown KDE version: "{kde_version}". Assuming KDE4')
|
||||
return _LinuxDesktopEnvironment.KDE4
|
||||
elif xdg_current_desktop == 'Pantheon':
|
||||
elif part == 'Pantheon':
|
||||
return _LinuxDesktopEnvironment.PANTHEON
|
||||
elif xdg_current_desktop == 'XFCE':
|
||||
elif part == 'XFCE':
|
||||
return _LinuxDesktopEnvironment.XFCE
|
||||
elif xdg_current_desktop == 'UKUI':
|
||||
elif part == 'UKUI':
|
||||
return _LinuxDesktopEnvironment.UKUI
|
||||
elif xdg_current_desktop == 'LXQt':
|
||||
elif part == 'LXQt':
|
||||
return _LinuxDesktopEnvironment.LXQT
|
||||
else:
|
||||
logger.info(f'XDG_CURRENT_DESKTOP is set to an unknown value: "{xdg_current_desktop}"')
|
||||
logger.debug(f'XDG_CURRENT_DESKTOP is set to an unknown value: "{xdg_current_desktop}"')
|
||||
|
||||
elif desktop_session is not None:
|
||||
if desktop_session == 'deepin':
|
||||
return _LinuxDesktopEnvironment.DEEPIN
|
||||
elif desktop_session in ('mate', 'gnome'):
|
||||
@@ -760,9 +829,8 @@ def _get_linux_desktop_environment(env, logger):
|
||||
elif desktop_session == 'ukui':
|
||||
return _LinuxDesktopEnvironment.UKUI
|
||||
else:
|
||||
logger.info(f'DESKTOP_SESSION is set to an unknown value: "{desktop_session}"')
|
||||
logger.debug(f'DESKTOP_SESSION is set to an unknown value: "{desktop_session}"')
|
||||
|
||||
else:
|
||||
if 'GNOME_DESKTOP_SESSION_ID' in env:
|
||||
return _LinuxDesktopEnvironment.GNOME
|
||||
elif 'KDE_FULL_SESSION' in env:
|
||||
@@ -770,6 +838,7 @@ def _get_linux_desktop_environment(env, logger):
|
||||
return _LinuxDesktopEnvironment.KDE4
|
||||
else:
|
||||
return _LinuxDesktopEnvironment.KDE3
|
||||
|
||||
return _LinuxDesktopEnvironment.OTHER
|
||||
|
||||
|
||||
@@ -794,7 +863,7 @@ def _choose_linux_keyring(logger):
|
||||
elif desktop_environment == _LinuxDesktopEnvironment.KDE6:
|
||||
linux_keyring = _LinuxKeyring.KWALLET6
|
||||
elif desktop_environment in (
|
||||
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER
|
||||
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER,
|
||||
):
|
||||
linux_keyring = _LinuxKeyring.BASICTEXT
|
||||
else:
|
||||
@@ -829,7 +898,7 @@ def _get_kwallet_network_wallet(keyring, logger):
|
||||
'dbus-send', '--session', '--print-reply=literal',
|
||||
f'--dest={service_name}',
|
||||
wallet_path,
|
||||
'org.kde.KWallet.networkWallet'
|
||||
'org.kde.KWallet.networkWallet',
|
||||
], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
|
||||
if returncode:
|
||||
@@ -859,7 +928,7 @@ def _get_kwallet_password(browser_keyring_name, keyring, logger):
|
||||
'kwallet-query',
|
||||
'--read-password', f'{browser_keyring_name} Safe Storage',
|
||||
'--folder', f'{browser_keyring_name} Keys',
|
||||
network_wallet
|
||||
network_wallet,
|
||||
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
|
||||
if returncode:
|
||||
@@ -899,7 +968,6 @@ def _get_gnome_keyring_password(browser_keyring_name, logger):
|
||||
for item in col.get_all_items():
|
||||
if item.get_label() == f'{browser_keyring_name} Safe Storage':
|
||||
return item.get_secret()
|
||||
else:
|
||||
logger.error('failed to read from keyring')
|
||||
return b''
|
||||
|
||||
@@ -947,7 +1015,7 @@ def _get_windows_v10_key(browser_root, logger):
|
||||
References:
|
||||
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_win.cc
|
||||
"""
|
||||
path = _find_most_recently_used_file(browser_root, 'Local State', logger)
|
||||
path = _newest(_find_files(browser_root, 'Local State', logger))
|
||||
if path is None:
|
||||
logger.error('could not find local state file')
|
||||
return None
|
||||
@@ -970,13 +1038,15 @@ def _get_windows_v10_key(browser_root, logger):
|
||||
|
||||
|
||||
def pbkdf2_sha1(password, salt, iterations, key_length):
|
||||
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
|
||||
return hashlib.pbkdf2_hmac('sha1', password, salt, iterations, key_length)
|
||||
|
||||
|
||||
def _decrypt_aes_cbc_multi(ciphertext, keys, logger, initialization_vector=b' ' * 16):
|
||||
def _decrypt_aes_cbc_multi(ciphertext, keys, logger, initialization_vector=b' ' * 16, hash_prefix=False):
|
||||
for key in keys:
|
||||
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
|
||||
try:
|
||||
if hash_prefix:
|
||||
return plaintext[32:].decode()
|
||||
return plaintext.decode()
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
@@ -984,7 +1054,7 @@ def _decrypt_aes_cbc_multi(ciphertext, keys, logger, initialization_vector=b' '
|
||||
return None
|
||||
|
||||
|
||||
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
||||
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger, hash_prefix=False):
|
||||
try:
|
||||
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
|
||||
except ValueError:
|
||||
@@ -992,6 +1062,8 @@ def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
||||
return None
|
||||
|
||||
try:
|
||||
if hash_prefix:
|
||||
return plaintext[32:].decode()
|
||||
return plaintext.decode()
|
||||
except UnicodeDecodeError:
|
||||
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
||||
@@ -1021,11 +1093,12 @@ def _decrypt_windows_dpapi(ciphertext, logger):
|
||||
None, # pvReserved: must be NULL
|
||||
None, # pPromptStruct: information about prompts to display
|
||||
0, # dwFlags
|
||||
ctypes.byref(blob_out) # pDataOut
|
||||
ctypes.byref(blob_out), # pDataOut
|
||||
)
|
||||
if not ret:
|
||||
logger.warning('failed to decrypt with DPAPI', only_once=True)
|
||||
return None
|
||||
message = 'Failed to decrypt with DPAPI. See https://github.com/yt-dlp/yt-dlp/issues/10927 for more info'
|
||||
logger.error(message)
|
||||
raise DownloadError(message) # force exit
|
||||
|
||||
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
|
||||
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
|
||||
@@ -1049,17 +1122,20 @@ def _get_column_names(cursor, table_name):
|
||||
return [row[1].decode() for row in table_info]
|
||||
|
||||
|
||||
def _find_most_recently_used_file(root, filename, logger):
|
||||
def _newest(files):
|
||||
return max(files, key=lambda path: os.lstat(path).st_mtime, default=None)
|
||||
|
||||
|
||||
def _find_files(root, filename, logger):
|
||||
# if there are multiple browser profiles, take the most recently used one
|
||||
i, paths = 0, []
|
||||
i = 0
|
||||
with _create_progress_bar(logger) as progress_bar:
|
||||
for curr_root, dirs, files in os.walk(root):
|
||||
for curr_root, _, files in os.walk(root):
|
||||
for file in files:
|
||||
i += 1
|
||||
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
|
||||
if file == filename:
|
||||
paths.append(os.path.join(curr_root, file))
|
||||
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
|
||||
yield os.path.join(curr_root, file)
|
||||
|
||||
|
||||
def _merge_cookie_jars(jars):
|
||||
@@ -1073,7 +1149,7 @@ def _merge_cookie_jars(jars):
|
||||
|
||||
|
||||
def _is_path(value):
|
||||
return os.path.sep in value
|
||||
return any(sep in value for sep in (os.path.sep, os.path.altsep) if sep)
|
||||
|
||||
|
||||
def _parse_browser_specification(browser_name, profile=None, keyring=None, container=None):
|
||||
@@ -1094,24 +1170,24 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
|
||||
|
||||
_RESERVED = {
|
||||
"expires",
|
||||
"path",
|
||||
"comment",
|
||||
"domain",
|
||||
"max-age",
|
||||
"secure",
|
||||
"httponly",
|
||||
"version",
|
||||
"samesite",
|
||||
'expires',
|
||||
'path',
|
||||
'comment',
|
||||
'domain',
|
||||
'max-age',
|
||||
'secure',
|
||||
'httponly',
|
||||
'version',
|
||||
'samesite',
|
||||
}
|
||||
|
||||
_FLAGS = {"secure", "httponly"}
|
||||
_FLAGS = {'secure', 'httponly'}
|
||||
|
||||
# Added 'bad' group to catch the remaining value
|
||||
_COOKIE_PATTERN = re.compile(r"""
|
||||
_COOKIE_PATTERN = re.compile(r'''
|
||||
\s* # Optional whitespace at start of cookie
|
||||
(?P<key> # Start of group 'key'
|
||||
[""" + _LEGAL_KEY_CHARS + r"""]+?# Any word of at least one letter
|
||||
[''' + _LEGAL_KEY_CHARS + r''']+?# Any word of at least one letter
|
||||
) # End of group 'key'
|
||||
( # Optional group: there may not be a value.
|
||||
\s*=\s* # Equal Sign
|
||||
@@ -1121,7 +1197,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||
| # or
|
||||
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
||||
| # or
|
||||
[""" + _LEGAL_VALUE_CHARS + r"""]* # Any word or empty string
|
||||
[''' + _LEGAL_VALUE_CHARS + r''']* # Any word or empty string
|
||||
) # End of group 'val'
|
||||
| # or
|
||||
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
|
||||
@@ -1129,7 +1205,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
||||
)? # End of optional value group
|
||||
\s* # Any number of spaces.
|
||||
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
||||
""", re.ASCII | re.VERBOSE)
|
||||
''', re.ASCII | re.VERBOSE)
|
||||
|
||||
def load(self, data):
|
||||
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
|
||||
@@ -1216,8 +1292,8 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
||||
def _really_save(self, f, ignore_discard, ignore_expires):
|
||||
now = time.time()
|
||||
for cookie in self:
|
||||
if (not ignore_discard and cookie.discard
|
||||
or not ignore_expires and cookie.is_expired(now)):
|
||||
if ((not ignore_discard and cookie.discard)
|
||||
or (not ignore_expires and cookie.is_expired(now))):
|
||||
continue
|
||||
name, value = cookie.name, cookie.value
|
||||
if value is None:
|
||||
@@ -1225,14 +1301,14 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
||||
# with no name, whereas http.cookiejar regards it as a
|
||||
# cookie with no value.
|
||||
name, value = '', name
|
||||
f.write('%s\n' % '\t'.join((
|
||||
f.write('{}\n'.format('\t'.join((
|
||||
cookie.domain,
|
||||
self._true_or_false(cookie.domain.startswith('.')),
|
||||
cookie.path,
|
||||
self._true_or_false(cookie.secure),
|
||||
str_or_none(cookie.expires, default=''),
|
||||
name, value
|
||||
)))
|
||||
name, value,
|
||||
))))
|
||||
|
||||
def save(self, filename=None, ignore_discard=True, ignore_expires=True):
|
||||
"""
|
||||
@@ -1271,10 +1347,10 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
||||
return line
|
||||
cookie_list = line.split('\t')
|
||||
if len(cookie_list) != self._ENTRY_LEN:
|
||||
raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
|
||||
raise http.cookiejar.LoadError(f'invalid length {len(cookie_list)}')
|
||||
cookie = self._CookieFileEntry(*cookie_list)
|
||||
if cookie.expires_at and not cookie.expires_at.isdigit():
|
||||
raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
|
||||
if cookie.expires_at and not re.fullmatch(r'[0-9]+(?:\.[0-9]+)?', cookie.expires_at):
|
||||
raise http.cookiejar.LoadError(f'invalid expires at {cookie.expires_at}')
|
||||
return line
|
||||
|
||||
cf = io.StringIO()
|
||||
|
||||
@@ -24,7 +24,7 @@ try:
|
||||
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
|
||||
from Crypto.Hash import CMAC, SHA1 # noqa: F401
|
||||
from Crypto.PublicKey import RSA # noqa: F401
|
||||
except ImportError:
|
||||
except (ImportError, OSError):
|
||||
__version__ = f'broken {__version__}'.strip()
|
||||
|
||||
|
||||
|
||||
@@ -43,19 +43,28 @@ except Exception as _err:
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
||||
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
||||
except ImportError:
|
||||
# although sqlite3 is part of the standard library, it is possible to compile python without
|
||||
# although sqlite3 is part of the standard library, it is possible to compile Python without
|
||||
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
||||
sqlite3 = None
|
||||
|
||||
|
||||
try:
|
||||
import websockets
|
||||
except (ImportError, SyntaxError):
|
||||
# websockets 3.10 on python 3.6 causes SyntaxError
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
||||
except ImportError:
|
||||
websockets = None
|
||||
|
||||
try:
|
||||
import urllib3
|
||||
except ImportError:
|
||||
urllib3 = None
|
||||
|
||||
try:
|
||||
import requests
|
||||
except ImportError:
|
||||
requests = None
|
||||
|
||||
try:
|
||||
import xattr # xattr or pyxattr
|
||||
@@ -65,9 +74,19 @@ else:
|
||||
if hasattr(xattr, 'set'): # pyxattr
|
||||
xattr._yt_dlp__identifier = 'pyxattr'
|
||||
|
||||
try:
|
||||
import curl_cffi
|
||||
except ImportError:
|
||||
curl_cffi = None
|
||||
|
||||
from . import Cryptodome
|
||||
|
||||
try:
|
||||
import yt_dlp_ejs
|
||||
except ImportError:
|
||||
yt_dlp_ejs = None
|
||||
|
||||
|
||||
all_dependencies = {k: v for k, v in globals().items() if not k.startswith('_')}
|
||||
available_dependencies = {k: v for k, v in all_dependencies.items() if v}
|
||||
|
||||
|
||||
@@ -30,11 +30,12 @@ from .hls import HlsFD
|
||||
from .http import HttpFD
|
||||
from .ism import IsmFD
|
||||
from .mhtml import MhtmlFD
|
||||
from .niconico import NiconicoDmcFD, NiconicoLiveFD
|
||||
from .niconico import NiconicoLiveFD
|
||||
from .rtmp import RtmpFD
|
||||
from .rtsp import RtspFD
|
||||
from .websocket import WebSocketFragmentFD
|
||||
from .youtube_live_chat import YoutubeLiveChatFD
|
||||
from .bunnycdn import BunnyCdnFD
|
||||
|
||||
PROTOCOL_MAP = {
|
||||
'rtmp': RtmpFD,
|
||||
@@ -49,12 +50,12 @@ PROTOCOL_MAP = {
|
||||
'http_dash_segments_generator': DashSegmentsFD,
|
||||
'ism': IsmFD,
|
||||
'mhtml': MhtmlFD,
|
||||
'niconico_dmc': NiconicoDmcFD,
|
||||
'niconico_live': NiconicoLiveFD,
|
||||
'fc2_live': FC2LiveFD,
|
||||
'websocket_frag': WebSocketFragmentFD,
|
||||
'youtube_live_chat': YoutubeLiveChatFD,
|
||||
'youtube_live_chat_replay': YoutubeLiveChatFD,
|
||||
'bunnycdn': BunnyCdnFD,
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +66,6 @@ def shorten_protocol_name(proto, simplify=False):
|
||||
'rtmp_ffmpeg': 'rtmpF',
|
||||
'http_dash_segments': 'dash',
|
||||
'http_dash_segments_generator': 'dashG',
|
||||
'niconico_dmc': 'dmc',
|
||||
'websocket_frag': 'WSfrag',
|
||||
}
|
||||
if simplify:
|
||||
@@ -99,7 +99,7 @@ def _get_suitable_downloader(info_dict, protocol, params, default):
|
||||
if external_downloader is None:
|
||||
if info_dict['to_stdout'] and FFmpegFD.can_merge_formats(info_dict, params):
|
||||
return FFmpegFD
|
||||
elif external_downloader.lower() != 'native':
|
||||
elif external_downloader.lower() != 'native' and info_dict.get('impersonate') is None:
|
||||
ed = get_external_downloader(external_downloader)
|
||||
if ed.can_download(info_dict, external_downloader):
|
||||
return ed
|
||||
|
||||
50
plugins/youtube_download/yt_dlp/downloader/bunnycdn.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import hashlib
|
||||
import random
|
||||
import threading
|
||||
|
||||
from .common import FileDownloader
|
||||
from . import HlsFD
|
||||
from ..networking import Request
|
||||
from ..networking.exceptions import network_exceptions
|
||||
|
||||
|
||||
class BunnyCdnFD(FileDownloader):
|
||||
"""
|
||||
Downloads from BunnyCDN with required pings
|
||||
Note, this is not a part of public API, and will be removed without notice.
|
||||
DO NOT USE
|
||||
"""
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading from BunnyCDN')
|
||||
|
||||
fd = HlsFD(self.ydl, self.params)
|
||||
|
||||
stop_event = threading.Event()
|
||||
ping_thread = threading.Thread(target=self.ping_thread, args=(stop_event,), kwargs=info_dict['_bunnycdn_ping_data'])
|
||||
ping_thread.start()
|
||||
|
||||
try:
|
||||
return fd.real_download(filename, info_dict)
|
||||
finally:
|
||||
stop_event.set()
|
||||
|
||||
def ping_thread(self, stop_event, url, headers, secret, context_id):
|
||||
# Site sends ping every 4 seconds, but this throttles the download. Pinging every 2 seconds seems to work.
|
||||
ping_interval = 2
|
||||
# Hard coded resolution as it doesn't seem to matter
|
||||
res = 1080
|
||||
paused = 'false'
|
||||
current_time = 0
|
||||
|
||||
while not stop_event.wait(ping_interval):
|
||||
current_time += ping_interval
|
||||
|
||||
time = current_time + round(random.random(), 6)
|
||||
md5_hash = hashlib.md5(f'{secret}_{context_id}_{time}_{paused}_{res}'.encode()).hexdigest()
|
||||
ping_url = f'{url}?hash={md5_hash}&time={time}&paused={paused}&resolution={res}'
|
||||
|
||||
try:
|
||||
self.ydl.urlopen(Request(ping_url, headers=headers)).read()
|
||||
except network_exceptions as e:
|
||||
self.to_screen(f'[{self.FD_NAME}] Ping failed: {e}')
|
||||
@@ -4,6 +4,7 @@ import functools
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
|
||||
from ..minicurses import (
|
||||
@@ -19,9 +20,7 @@ from ..utils import (
|
||||
Namespace,
|
||||
RetryManager,
|
||||
classproperty,
|
||||
decodeArgument,
|
||||
deprecation_warning,
|
||||
encodeFilename,
|
||||
format_bytes,
|
||||
join_nonempty,
|
||||
parse_bytes,
|
||||
@@ -32,6 +31,7 @@ from ..utils import (
|
||||
timetuple_from_msec,
|
||||
try_call,
|
||||
)
|
||||
from ..utils._utils import _ProgressState
|
||||
|
||||
|
||||
class FileDownloader:
|
||||
@@ -62,7 +62,7 @@ class FileDownloader:
|
||||
test: Download only first bytes to test the downloader.
|
||||
min_filesize: Skip files smaller than this size
|
||||
max_filesize: Skip files larger than this size
|
||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||
progress_delta: The minimum time between progress output, in seconds
|
||||
external_downloader_args: A dictionary of downloader keys (in lower case)
|
||||
and a list of additional command-line arguments for the
|
||||
executable. Use 'default' as the name for arguments to be
|
||||
@@ -88,6 +88,9 @@ class FileDownloader:
|
||||
self.params = params
|
||||
self._prepare_multiline_status()
|
||||
self.add_progress_hook(self.report_progress)
|
||||
if self.params.get('progress_delta'):
|
||||
self._progress_delta_lock = threading.Lock()
|
||||
self._progress_delta_time = time.monotonic()
|
||||
|
||||
def _set_ydl(self, ydl):
|
||||
self.ydl = ydl
|
||||
@@ -214,7 +217,7 @@ class FileDownloader:
|
||||
def temp_name(self, filename):
|
||||
"""Returns a temporary filename for the given filename."""
|
||||
if self.params.get('nopart', False) or filename == '-' or \
|
||||
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
||||
(os.path.exists(filename) and not os.path.isfile(filename)):
|
||||
return filename
|
||||
return filename + '.part'
|
||||
|
||||
@@ -268,7 +271,7 @@ class FileDownloader:
|
||||
"""Try to set the last-modified time of the given file."""
|
||||
if last_modified_hdr is None:
|
||||
return
|
||||
if not os.path.isfile(encodeFilename(filename)):
|
||||
if not os.path.isfile(filename):
|
||||
return
|
||||
timestr = last_modified_hdr
|
||||
if timestr is None:
|
||||
@@ -330,7 +333,7 @@ class FileDownloader:
|
||||
progress_dict), s.get('progress_idx') or 0)
|
||||
self.to_console_title(self.ydl.evaluate_outtmpl(
|
||||
progress_template.get('download-title') or 'yt-dlp %(progress._default_template)s',
|
||||
progress_dict))
|
||||
progress_dict), _ProgressState.from_dict(s), s.get('_percent'))
|
||||
|
||||
def _format_progress(self, *args, **kwargs):
|
||||
return self.ydl._format_text(
|
||||
@@ -354,6 +357,7 @@ class FileDownloader:
|
||||
'_speed_str': self.format_speed(speed).strip(),
|
||||
'_total_bytes_str': _format_bytes('total_bytes'),
|
||||
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
||||
'_percent': 100.0,
|
||||
'_percent_str': self.format_percent(100),
|
||||
})
|
||||
self._report_progress_status(s, join_nonempty(
|
||||
@@ -366,13 +370,21 @@ class FileDownloader:
|
||||
if s['status'] != 'downloading':
|
||||
return
|
||||
|
||||
if update_delta := self.params.get('progress_delta'):
|
||||
with self._progress_delta_lock:
|
||||
if time.monotonic() < self._progress_delta_time:
|
||||
return
|
||||
self._progress_delta_time += update_delta
|
||||
|
||||
progress = try_call(
|
||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes'],
|
||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes_estimate'],
|
||||
lambda: s['downloaded_bytes'] == 0 and 0)
|
||||
s.update({
|
||||
'_eta_str': self.format_eta(s.get('eta')).strip(),
|
||||
'_speed_str': self.format_speed(s.get('speed')),
|
||||
'_percent_str': self.format_percent(try_call(
|
||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes'],
|
||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes_estimate'],
|
||||
lambda: s['downloaded_bytes'] == 0 and 0)),
|
||||
'_percent': progress,
|
||||
'_percent_str': self.format_percent(progress),
|
||||
'_total_bytes_str': _format_bytes('total_bytes'),
|
||||
'_total_bytes_estimate_str': _format_bytes('total_bytes_estimate'),
|
||||
'_downloaded_bytes_str': _format_bytes('downloaded_bytes'),
|
||||
@@ -393,7 +405,7 @@ class FileDownloader:
|
||||
|
||||
def report_resuming_byte(self, resume_len):
|
||||
"""Report attempt to resume at given byte."""
|
||||
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
||||
self.to_screen(f'[download] Resuming download at byte {resume_len}')
|
||||
|
||||
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
||||
"""Report retry"""
|
||||
@@ -421,13 +433,13 @@ class FileDownloader:
|
||||
"""
|
||||
nooverwrites_and_exists = (
|
||||
not self.params.get('overwrites', True)
|
||||
and os.path.exists(encodeFilename(filename))
|
||||
and os.path.exists(filename)
|
||||
)
|
||||
|
||||
if not hasattr(filename, 'write'):
|
||||
continuedl_and_exists = (
|
||||
self.params.get('continuedl', True)
|
||||
and os.path.isfile(encodeFilename(filename))
|
||||
and os.path.isfile(filename)
|
||||
and not self.params.get('nopart', False)
|
||||
)
|
||||
|
||||
@@ -437,19 +449,32 @@ class FileDownloader:
|
||||
self._hook_progress({
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
'total_bytes': os.path.getsize(encodeFilename(filename)),
|
||||
'total_bytes': os.path.getsize(filename),
|
||||
}, info_dict)
|
||||
self._finish_multiline_status()
|
||||
return True, False
|
||||
|
||||
sleep_note = ''
|
||||
if subtitle:
|
||||
sleep_interval = self.params.get('sleep_interval_subtitles') or 0
|
||||
else:
|
||||
min_sleep_interval = self.params.get('sleep_interval') or 0
|
||||
max_sleep_interval = self.params.get('max_sleep_interval') or 0
|
||||
|
||||
requested_formats = info_dict.get('requested_formats') or [info_dict]
|
||||
if available_at := max(f.get('available_at') or 0 for f in requested_formats):
|
||||
forced_sleep_interval = available_at - int(time.time())
|
||||
if forced_sleep_interval > min_sleep_interval:
|
||||
sleep_note = 'as required by the site'
|
||||
min_sleep_interval = forced_sleep_interval
|
||||
if forced_sleep_interval > max_sleep_interval:
|
||||
max_sleep_interval = forced_sleep_interval
|
||||
|
||||
sleep_interval = random.uniform(
|
||||
min_sleep_interval, self.params.get('max_sleep_interval') or min_sleep_interval)
|
||||
min_sleep_interval, max_sleep_interval or min_sleep_interval)
|
||||
|
||||
if sleep_interval > 0:
|
||||
self.to_screen(f'[download] Sleeping {sleep_interval:.2f} seconds ...')
|
||||
self.to_screen(f'[download] Sleeping {sleep_interval:.2f} seconds {sleep_note}...')
|
||||
time.sleep(sleep_interval)
|
||||
|
||||
ret = self.real_download(filename, info_dict)
|
||||
@@ -478,9 +503,18 @@ class FileDownloader:
|
||||
if not self.params.get('verbose', False):
|
||||
return
|
||||
|
||||
str_args = [decodeArgument(a) for a in args]
|
||||
|
||||
if exe is None:
|
||||
exe = os.path.basename(str_args[0])
|
||||
exe = os.path.basename(args[0])
|
||||
|
||||
self.write_debug(f'{exe} command line: {shell_quote(str_args)}')
|
||||
self.write_debug(f'{exe} command line: {shell_quote(args)}')
|
||||
|
||||
def _get_impersonate_target(self, info_dict):
|
||||
impersonate = info_dict.get('impersonate')
|
||||
if impersonate is None:
|
||||
return None
|
||||
available_target, requested_targets = self.ydl._parse_impersonate_targets(impersonate)
|
||||
if available_target:
|
||||
return available_target
|
||||
elif requested_targets:
|
||||
self.report_warning(self.ydl._unavailable_targets_message(requested_targets))
|
||||
return None
|
||||
|
||||
@@ -3,7 +3,7 @@ import urllib.parse
|
||||
|
||||
from . import get_suitable_downloader
|
||||
from .fragment import FragmentFD
|
||||
from ..utils import update_url_query, urljoin
|
||||
from ..utils import ReExtractInfo, update_url_query, urljoin
|
||||
|
||||
|
||||
class DashSegmentsFD(FragmentFD):
|
||||
@@ -15,16 +15,24 @@ class DashSegmentsFD(FragmentFD):
|
||||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
if info_dict.get('is_live') and set(info_dict['protocol'].split('+')) != {'http_dash_segments_generator'}:
|
||||
if 'http_dash_segments_generator' in info_dict['protocol'].split('+'):
|
||||
real_downloader = None # No external FD can support --live-from-start
|
||||
else:
|
||||
if info_dict.get('is_live'):
|
||||
self.report_error('Live DASH videos are not supported')
|
||||
|
||||
real_start = time.time()
|
||||
real_downloader = get_suitable_downloader(
|
||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||
|
||||
real_start = time.time()
|
||||
|
||||
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
||||
args = []
|
||||
for fmt in requested_formats or [info_dict]:
|
||||
# Re-extract if --load-info-json is used and 'fragments' was originally a generator
|
||||
# See https://github.com/yt-dlp/yt-dlp/issues/13906
|
||||
if isinstance(fmt['fragments'], str):
|
||||
raise ReExtractInfo('the stream needs to be re-extracted', expected=True)
|
||||
|
||||
try:
|
||||
fragment_count = 1 if self.params.get('test') else len(fmt['fragments'])
|
||||
except TypeError:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import enum
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
@@ -9,7 +10,6 @@ import time
|
||||
import uuid
|
||||
|
||||
from .fragment import FragmentFD
|
||||
from ..compat import functools
|
||||
from ..networking import Request
|
||||
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
||||
from ..utils import (
|
||||
@@ -23,7 +23,6 @@ from ..utils import (
|
||||
cli_valueless_option,
|
||||
determine_ext,
|
||||
encodeArgument,
|
||||
encodeFilename,
|
||||
find_available_port,
|
||||
remove_end,
|
||||
traverse_obj,
|
||||
@@ -55,7 +54,7 @@ class ExternalFD(FragmentFD):
|
||||
# correct and expected termination thus all postprocessing
|
||||
# should take place
|
||||
retval = 0
|
||||
self.to_screen('[%s] Interrupted by user' % self.get_basename())
|
||||
self.to_screen(f'[{self.get_basename()}] Interrupted by user')
|
||||
finally:
|
||||
if self._cookies_tempfile:
|
||||
self.try_remove(self._cookies_tempfile)
|
||||
@@ -67,7 +66,7 @@ class ExternalFD(FragmentFD):
|
||||
'elapsed': time.time() - started,
|
||||
}
|
||||
if filename != '-':
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
fsize = os.path.getsize(tmpfilename)
|
||||
self.try_rename(tmpfilename, filename)
|
||||
status.update({
|
||||
'downloaded_bytes': fsize,
|
||||
@@ -108,7 +107,7 @@ class ExternalFD(FragmentFD):
|
||||
return all((
|
||||
not info_dict.get('to_stdout') or Features.TO_STDOUT in cls.SUPPORTED_FEATURES,
|
||||
'+' not in info_dict['protocol'] or Features.MULTIPLE_FORMATS in cls.SUPPORTED_FEATURES,
|
||||
not traverse_obj(info_dict, ('hls_aes', ...), 'extra_param_to_segment_url'),
|
||||
not traverse_obj(info_dict, ('hls_aes', ...), 'extra_param_to_segment_url', 'extra_param_to_key_url'),
|
||||
all(proto in cls.SUPPORTED_PROTOCOLS for proto in info_dict['protocol'].split('+')),
|
||||
))
|
||||
|
||||
@@ -172,7 +171,7 @@ class ExternalFD(FragmentFD):
|
||||
decrypt_fragment = self.decrypter(info_dict)
|
||||
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
||||
fragment_filename = f'{tmpfilename}-Frag{frag_index}'
|
||||
try:
|
||||
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
||||
except OSError as err:
|
||||
@@ -184,9 +183,9 @@ class ExternalFD(FragmentFD):
|
||||
dest.write(decrypt_fragment(fragment, src.read()))
|
||||
src.close()
|
||||
if not self.params.get('keep_fragments', False):
|
||||
self.try_remove(encodeFilename(fragment_filename))
|
||||
self.try_remove(fragment_filename)
|
||||
dest.close()
|
||||
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||
self.try_remove(f'{tmpfilename}.frag.urls')
|
||||
return 0
|
||||
|
||||
def _call_process(self, cmd, info_dict):
|
||||
@@ -335,12 +334,12 @@ class Aria2cFD(ExternalFD):
|
||||
cmd += ['--auto-file-renaming=false']
|
||||
|
||||
if 'fragments' in info_dict:
|
||||
cmd += ['--file-allocation=none', '--uri-selector=inorder']
|
||||
url_list_file = '%s.frag.urls' % tmpfilename
|
||||
cmd += ['--uri-selector=inorder']
|
||||
url_list_file = f'{tmpfilename}.frag.urls'
|
||||
url_list = []
|
||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
||||
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||
fragment_filename = f'{os.path.basename(tmpfilename)}-Frag{frag_index}'
|
||||
url_list.append('{}\n\tout={}'.format(fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||
stream.write('\n'.join(url_list).encode())
|
||||
stream.close()
|
||||
@@ -357,7 +356,7 @@ class Aria2cFD(ExternalFD):
|
||||
'id': sanitycheck,
|
||||
'method': method,
|
||||
'params': [f'token:{rpc_secret}', *params],
|
||||
}).encode('utf-8')
|
||||
}).encode()
|
||||
request = Request(
|
||||
f'http://localhost:{rpc_port}/jsonrpc',
|
||||
data=d, headers={
|
||||
@@ -416,7 +415,7 @@ class Aria2cFD(ExternalFD):
|
||||
'total_bytes_estimate': total,
|
||||
'eta': (total - downloaded) / (speed or 1),
|
||||
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
||||
'elapsed': time.time() - started
|
||||
'elapsed': time.time() - started,
|
||||
})
|
||||
self._hook_progress(status, info_dict)
|
||||
|
||||
@@ -491,30 +490,16 @@ class FFmpegFD(ExternalFD):
|
||||
if not self.params.get('verbose'):
|
||||
args += ['-hide_banner']
|
||||
|
||||
args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args'), default=[])
|
||||
|
||||
# These exists only for compatibility. Extractors should use
|
||||
# info_dict['downloader_options']['ffmpeg_args'] instead
|
||||
args += info_dict.get('_ffmpeg_args') or []
|
||||
seekable = info_dict.get('_seekable')
|
||||
if seekable is not None:
|
||||
# setting -seekable prevents ffmpeg from guessing if the server
|
||||
# supports seeking(by adding the header `Range: bytes=0-`), which
|
||||
# can cause problems in some cases
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/11800#issuecomment-275037127
|
||||
# http://trac.ffmpeg.org/ticket/6125#comment:10
|
||||
args += ['-seekable', '1' if seekable else '0']
|
||||
|
||||
env = None
|
||||
proxy = self.params.get('proxy')
|
||||
if proxy:
|
||||
if not re.match(r'^[\da-zA-Z]+://', proxy):
|
||||
proxy = 'http://%s' % proxy
|
||||
if not re.match(r'[\da-zA-Z]+://', proxy):
|
||||
proxy = f'http://{proxy}'
|
||||
|
||||
if proxy.startswith('socks'):
|
||||
self.report_warning(
|
||||
'%s does not support SOCKS proxies. Downloading is likely to fail. '
|
||||
'Consider adding --hls-prefer-native to your command.' % self.get_basename())
|
||||
f'{self.get_basename()} does not support SOCKS proxies. Downloading is likely to fail. '
|
||||
'Consider adding --hls-prefer-native to your command.')
|
||||
|
||||
# Since December 2015 ffmpeg supports -http_proxy option (see
|
||||
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
|
||||
@@ -524,17 +509,39 @@ class FFmpegFD(ExternalFD):
|
||||
env['HTTP_PROXY'] = proxy
|
||||
env['http_proxy'] = proxy
|
||||
|
||||
protocol = info_dict.get('protocol')
|
||||
start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end')
|
||||
|
||||
fallback_input_args = traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args', ...))
|
||||
|
||||
selected_formats = info_dict.get('requested_formats') or [info_dict]
|
||||
for i, fmt in enumerate(selected_formats):
|
||||
is_http = re.match(r'https?://', fmt['url'])
|
||||
cookies = self.ydl.cookiejar.get_cookies_for_url(fmt['url']) if is_http else []
|
||||
if cookies:
|
||||
args.extend(['-cookies', ''.join(
|
||||
f'{cookie.name}={cookie.value}; path={cookie.path}; domain={cookie.domain};\r\n'
|
||||
for cookie in cookies)])
|
||||
if fmt.get('http_headers') and is_http:
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in fmt['http_headers'].items())])
|
||||
|
||||
if start_time:
|
||||
args += ['-ss', str(start_time)]
|
||||
if end_time:
|
||||
args += ['-t', str(end_time - start_time)]
|
||||
|
||||
protocol = fmt.get('protocol')
|
||||
|
||||
if protocol == 'rtmp':
|
||||
player_url = info_dict.get('player_url')
|
||||
page_url = info_dict.get('page_url')
|
||||
app = info_dict.get('app')
|
||||
play_path = info_dict.get('play_path')
|
||||
tc_url = info_dict.get('tc_url')
|
||||
flash_version = info_dict.get('flash_version')
|
||||
live = info_dict.get('rtmp_live', False)
|
||||
conn = info_dict.get('rtmp_conn')
|
||||
player_url = fmt.get('player_url')
|
||||
page_url = fmt.get('page_url')
|
||||
app = fmt.get('app')
|
||||
play_path = fmt.get('play_path')
|
||||
tc_url = fmt.get('tc_url')
|
||||
flash_version = fmt.get('flash_version')
|
||||
live = fmt.get('rtmp_live', False)
|
||||
conn = fmt.get('rtmp_conn')
|
||||
if player_url is not None:
|
||||
args += ['-rtmp_swfverify', player_url]
|
||||
if page_url is not None:
|
||||
@@ -555,27 +562,29 @@ class FFmpegFD(ExternalFD):
|
||||
elif isinstance(conn, str):
|
||||
args += ['-rtmp_conn', conn]
|
||||
|
||||
start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end')
|
||||
elif protocol == 'http_dash_segments' and info_dict.get('is_live'):
|
||||
# ffmpeg may try to read past the latest available segments for
|
||||
# live DASH streams unless we pass `-re`. In modern ffmpeg, this
|
||||
# is an alias of `-readrate 1`, but `-readrate` was not added
|
||||
# until ffmpeg 5.0, so we must stick to using `-re`
|
||||
args += ['-re']
|
||||
|
||||
selected_formats = info_dict.get('requested_formats') or [info_dict]
|
||||
for i, fmt in enumerate(selected_formats):
|
||||
is_http = re.match(r'^https?://', fmt['url'])
|
||||
cookies = self.ydl.cookiejar.get_cookies_for_url(fmt['url']) if is_http else []
|
||||
if cookies:
|
||||
args.extend(['-cookies', ''.join(
|
||||
f'{cookie.name}={cookie.value}; path={cookie.path}; domain={cookie.domain};\r\n'
|
||||
for cookie in cookies)])
|
||||
if fmt.get('http_headers') and is_http:
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in fmt['http_headers'].items())])
|
||||
url = fmt['url']
|
||||
if self.params.get('enable_file_urls') and url.startswith('file:'):
|
||||
# The default protocol_whitelist is 'file,crypto,data' when reading local m3u8 URLs,
|
||||
# so only local segments can be read unless we also include 'http,https,tcp,tls'
|
||||
args += ['-protocol_whitelist', 'file,crypto,data,http,https,tcp,tls']
|
||||
# ffmpeg incorrectly handles 'file:' URLs by only removing the
|
||||
# 'file:' prefix and treating the rest as if it's a normal filepath.
|
||||
# FFmpegPostProcessor also depends on this behavior, so we need to fixup the URLs:
|
||||
# - On Windows/Cygwin, replace 'file:///' and 'file://localhost/' with 'file:'
|
||||
# - On *nix, replace 'file://localhost/' with 'file:/'
|
||||
# Ref: https://github.com/yt-dlp/yt-dlp/issues/13781
|
||||
# https://trac.ffmpeg.org/ticket/2702
|
||||
url = re.sub(r'^file://(?:localhost)?/', 'file:' if os.name == 'nt' else 'file:/', url)
|
||||
|
||||
if start_time:
|
||||
args += ['-ss', str(start_time)]
|
||||
if end_time:
|
||||
args += ['-t', str(end_time - start_time)]
|
||||
|
||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']]
|
||||
args += traverse_obj(fmt, ('downloader_options', 'ffmpeg_args', ...)) or fallback_input_args
|
||||
args += [*self._configuration_args((f'_i{i + 1}', '_i')), '-i', url]
|
||||
|
||||
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
||||
args += ['-c', 'copy']
|
||||
@@ -615,10 +624,12 @@ class FFmpegFD(ExternalFD):
|
||||
else:
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||
|
||||
args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args_out', ...))
|
||||
|
||||
args += self._configuration_args(('_o1', '_o', ''))
|
||||
|
||||
args = [encodeArgument(opt) for opt in args]
|
||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||
args.append(ffpp._ffmpeg_filename_argument(tmpfilename))
|
||||
self._debug_cmd(args)
|
||||
|
||||
piped = any(fmt['url'] in ('-', 'pipe:') for fmt in selected_formats)
|
||||
@@ -641,10 +652,6 @@ class FFmpegFD(ExternalFD):
|
||||
return retval
|
||||
|
||||
|
||||
class AVconvFD(FFmpegFD):
|
||||
pass
|
||||
|
||||
|
||||
_BY_NAME = {
|
||||
klass.get_basename(): klass
|
||||
for name, klass in globals().items()
|
||||
|
||||
@@ -67,12 +67,12 @@ class FlvReader(io.BytesIO):
|
||||
self.read_bytes(3)
|
||||
quality_entry_count = self.read_unsigned_char()
|
||||
# QualityEntryCount
|
||||
for i in range(quality_entry_count):
|
||||
for _ in range(quality_entry_count):
|
||||
self.read_string()
|
||||
|
||||
segment_run_count = self.read_unsigned_int()
|
||||
segments = []
|
||||
for i in range(segment_run_count):
|
||||
for _ in range(segment_run_count):
|
||||
first_segment = self.read_unsigned_int()
|
||||
fragments_per_segment = self.read_unsigned_int()
|
||||
segments.append((first_segment, fragments_per_segment))
|
||||
@@ -91,12 +91,12 @@ class FlvReader(io.BytesIO):
|
||||
|
||||
quality_entry_count = self.read_unsigned_char()
|
||||
# QualitySegmentUrlModifiers
|
||||
for i in range(quality_entry_count):
|
||||
for _ in range(quality_entry_count):
|
||||
self.read_string()
|
||||
|
||||
fragments_count = self.read_unsigned_int()
|
||||
fragments = []
|
||||
for i in range(fragments_count):
|
||||
for _ in range(fragments_count):
|
||||
first = self.read_unsigned_int()
|
||||
first_ts = self.read_unsigned_long_long()
|
||||
duration = self.read_unsigned_int()
|
||||
@@ -135,11 +135,11 @@ class FlvReader(io.BytesIO):
|
||||
self.read_string() # MovieIdentifier
|
||||
server_count = self.read_unsigned_char()
|
||||
# ServerEntryTable
|
||||
for i in range(server_count):
|
||||
for _ in range(server_count):
|
||||
self.read_string()
|
||||
quality_count = self.read_unsigned_char()
|
||||
# QualityEntryTable
|
||||
for i in range(quality_count):
|
||||
for _ in range(quality_count):
|
||||
self.read_string()
|
||||
# DrmData
|
||||
self.read_string()
|
||||
@@ -148,15 +148,15 @@ class FlvReader(io.BytesIO):
|
||||
|
||||
segments_count = self.read_unsigned_char()
|
||||
segments = []
|
||||
for i in range(segments_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
for _ in range(segments_count):
|
||||
_box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'asrt'
|
||||
segment = FlvReader(box_data).read_asrt()
|
||||
segments.append(segment)
|
||||
fragments_run_count = self.read_unsigned_char()
|
||||
fragments = []
|
||||
for i in range(fragments_run_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
for _ in range(fragments_run_count):
|
||||
_box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'afrt'
|
||||
fragments.append(FlvReader(box_data).read_afrt())
|
||||
|
||||
@@ -167,7 +167,7 @@ class FlvReader(io.BytesIO):
|
||||
}
|
||||
|
||||
def read_bootstrap_info(self):
|
||||
total_size, box_type, box_data = self.read_box_info()
|
||||
_, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'abst'
|
||||
return FlvReader(box_data).read_abst()
|
||||
|
||||
@@ -309,7 +309,7 @@ class F4mFD(FragmentFD):
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
requested_bitrate = info_dict.get('tbr')
|
||||
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading f4m manifest')
|
||||
|
||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||
man_url = urlh.url
|
||||
@@ -324,10 +324,10 @@ class F4mFD(FragmentFD):
|
||||
if requested_bitrate is None or len(formats) == 1:
|
||||
# get the best format
|
||||
formats = sorted(formats, key=lambda f: f[0])
|
||||
rate, media = formats[-1]
|
||||
_, media = formats[-1]
|
||||
else:
|
||||
rate, media = list(filter(
|
||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
||||
_, media = next(filter(
|
||||
lambda f: int(f[0]) == requested_bitrate, formats))
|
||||
|
||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||
man_base_url = get_base_url(doc) or man_url
|
||||
|
||||
@@ -9,11 +9,11 @@ import time
|
||||
from .common import FileDownloader
|
||||
from .http import HttpFD
|
||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||
from ..compat import compat_os_name
|
||||
from ..networking import Request
|
||||
from ..networking.exceptions import HTTPError, IncompleteRead
|
||||
from ..utils import DownloadError, RetryManager, encodeFilename, traverse_obj
|
||||
from ..utils import DownloadError, RetryManager, traverse_obj
|
||||
from ..utils.networking import HTTPHeaderDict
|
||||
from ..utils.progress import ProgressCalculator
|
||||
|
||||
|
||||
class HttpQuietDownloader(HttpFD):
|
||||
@@ -151,7 +151,7 @@ class FragmentFD(FileDownloader):
|
||||
if self.__do_ytdl_file(ctx):
|
||||
self._write_ytdl_file(ctx)
|
||||
if not self.params.get('keep_fragments', False):
|
||||
self.try_remove(encodeFilename(ctx['fragment_filename_sanitized']))
|
||||
self.try_remove(ctx['fragment_filename_sanitized'])
|
||||
del ctx['fragment_filename_sanitized']
|
||||
|
||||
def _prepare_frag_download(self, ctx):
|
||||
@@ -187,7 +187,7 @@ class FragmentFD(FileDownloader):
|
||||
})
|
||||
|
||||
if self.__do_ytdl_file(ctx):
|
||||
ytdl_file_exists = os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename'])))
|
||||
ytdl_file_exists = os.path.isfile(self.ytdl_filename(ctx['filename']))
|
||||
continuedl = self.params.get('continuedl', True)
|
||||
if continuedl and ytdl_file_exists:
|
||||
self._read_ytdl_file(ctx)
|
||||
@@ -198,7 +198,7 @@ class FragmentFD(FileDownloader):
|
||||
'.ytdl file is corrupt' if is_corrupt else
|
||||
'Inconsistent state of incomplete fragment download')
|
||||
self.report_warning(
|
||||
'%s. Restarting from the beginning ...' % message)
|
||||
f'{message}. Restarting from the beginning ...')
|
||||
ctx['fragment_index'] = resume_len = 0
|
||||
if 'ytdl_corrupt' in ctx:
|
||||
del ctx['ytdl_corrupt']
|
||||
@@ -226,8 +226,7 @@ class FragmentFD(FileDownloader):
|
||||
resume_len = ctx['complete_frags_downloaded_bytes']
|
||||
total_frags = ctx['total_frags']
|
||||
ctx_id = ctx.get('ctx_id')
|
||||
# This dict stores the download progress, it's updated by the progress
|
||||
# hook
|
||||
# Stores the download progress, updated by the progress hook
|
||||
state = {
|
||||
'status': 'downloading',
|
||||
'downloaded_bytes': resume_len,
|
||||
@@ -237,14 +236,8 @@ class FragmentFD(FileDownloader):
|
||||
'tmpfilename': ctx['tmpfilename'],
|
||||
}
|
||||
|
||||
start = time.time()
|
||||
ctx.update({
|
||||
'started': start,
|
||||
'fragment_started': start,
|
||||
# Amount of fragment's bytes downloaded by the time of the previous
|
||||
# frag progress hook invocation
|
||||
'prev_frag_downloaded_bytes': 0,
|
||||
})
|
||||
ctx['started'] = time.time()
|
||||
progress = ProgressCalculator(resume_len)
|
||||
|
||||
def frag_progress_hook(s):
|
||||
if s['status'] not in ('downloading', 'finished'):
|
||||
@@ -259,38 +252,35 @@ class FragmentFD(FileDownloader):
|
||||
state['max_progress'] = ctx.get('max_progress')
|
||||
state['progress_idx'] = ctx.get('progress_idx')
|
||||
|
||||
time_now = time.time()
|
||||
state['elapsed'] = time_now - start
|
||||
state['elapsed'] = progress.elapsed
|
||||
frag_total_bytes = s.get('total_bytes') or 0
|
||||
s['fragment_info_dict'] = s.pop('info_dict', {})
|
||||
|
||||
# XXX: Fragment resume is not accounted for here
|
||||
if not ctx['live']:
|
||||
estimated_size = (
|
||||
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
||||
/ (state['fragment_index'] + 1) * total_frags)
|
||||
state['total_bytes_estimate'] = estimated_size
|
||||
progress.total = estimated_size
|
||||
progress.update(s.get('downloaded_bytes'))
|
||||
state['total_bytes_estimate'] = progress.total
|
||||
else:
|
||||
progress.update(s.get('downloaded_bytes'))
|
||||
|
||||
if s['status'] == 'finished':
|
||||
state['fragment_index'] += 1
|
||||
ctx['fragment_index'] = state['fragment_index']
|
||||
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
||||
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
||||
ctx['speed'] = state['speed'] = self.calc_speed(
|
||||
ctx['fragment_started'], time_now, frag_total_bytes)
|
||||
ctx['fragment_started'] = time.time()
|
||||
ctx['prev_frag_downloaded_bytes'] = 0
|
||||
else:
|
||||
frag_downloaded_bytes = s['downloaded_bytes']
|
||||
state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
|
||||
ctx['speed'] = state['speed'] = self.calc_speed(
|
||||
ctx['fragment_started'], time_now, frag_downloaded_bytes - ctx.get('frag_resume_len', 0))
|
||||
if not ctx['live']:
|
||||
state['eta'] = self.calc_eta(state['speed'], estimated_size - state['downloaded_bytes'])
|
||||
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
||||
progress.thread_reset()
|
||||
|
||||
state['downloaded_bytes'] = ctx['complete_frags_downloaded_bytes'] = progress.downloaded
|
||||
state['speed'] = ctx['speed'] = progress.speed.smooth
|
||||
state['eta'] = progress.eta.smooth
|
||||
|
||||
self._hook_progress(state, info_dict)
|
||||
|
||||
ctx['dl'].add_progress_hook(frag_progress_hook)
|
||||
|
||||
return start
|
||||
return ctx['started']
|
||||
|
||||
def _finish_frag_download(self, ctx, info_dict):
|
||||
ctx['dest_stream'].close()
|
||||
@@ -312,7 +302,7 @@ class FragmentFD(FileDownloader):
|
||||
elif to_file:
|
||||
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
||||
filetime = ctx.get('fragment_filetime')
|
||||
if self.params.get('updatetime', True) and filetime:
|
||||
if self.params.get('updatetime') and filetime:
|
||||
with contextlib.suppress(Exception):
|
||||
os.utime(ctx['filename'], (time.time(), filetime))
|
||||
|
||||
@@ -375,10 +365,10 @@ class FragmentFD(FileDownloader):
|
||||
return decrypt_fragment
|
||||
|
||||
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
||||
'''
|
||||
"""
|
||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||
all args must be either tuple or list
|
||||
'''
|
||||
"""
|
||||
interrupt_trigger = [True]
|
||||
max_progress = len(args)
|
||||
if max_progress == 1:
|
||||
@@ -399,7 +389,7 @@ class FragmentFD(FileDownloader):
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
|
||||
if compat_os_name == 'nt':
|
||||
if os.name == 'nt':
|
||||
def future_result(future):
|
||||
while True:
|
||||
try:
|
||||
@@ -433,7 +423,7 @@ class FragmentFD(FileDownloader):
|
||||
finally:
|
||||
tpe.shutdown(wait=True)
|
||||
if not interrupt_trigger[0] and not is_live:
|
||||
raise KeyboardInterrupt()
|
||||
raise KeyboardInterrupt
|
||||
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
||||
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
||||
return result
|
||||
@@ -500,7 +490,6 @@ class FragmentFD(FileDownloader):
|
||||
download_fragment(fragment, ctx_copy)
|
||||
return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized')
|
||||
|
||||
self.report_warning('The download speed shown is only of one thread. This is a known issue')
|
||||
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||
try:
|
||||
for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
||||
|
||||
@@ -16,6 +16,7 @@ from ..utils import (
|
||||
update_url_query,
|
||||
urljoin,
|
||||
)
|
||||
from ..utils._utils import _request_dump_filename
|
||||
|
||||
|
||||
class HlsFD(FragmentFD):
|
||||
@@ -72,21 +73,40 @@ class HlsFD(FragmentFD):
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
||||
|
||||
s = info_dict.get('hls_media_playlist_data')
|
||||
if s:
|
||||
self.to_screen(f'[{self.FD_NAME}] Using m3u8 manifest from extracted info')
|
||||
else:
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading m3u8 manifest')
|
||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||
man_url = urlh.url
|
||||
s = urlh.read().decode('utf-8', 'ignore')
|
||||
s_bytes = urlh.read()
|
||||
if self.params.get('write_pages'):
|
||||
dump_filename = _request_dump_filename(
|
||||
man_url, info_dict['id'], None,
|
||||
trim_length=self.params.get('trim_file_name'))
|
||||
self.to_screen(f'[{self.FD_NAME}] Saving request to {dump_filename}')
|
||||
with open(dump_filename, 'wb') as outf:
|
||||
outf.write(s_bytes)
|
||||
s = s_bytes.decode('utf-8', 'ignore')
|
||||
|
||||
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
||||
if can_download:
|
||||
has_ffmpeg = FFmpegFD.available()
|
||||
no_crypto = not Cryptodome.AES and '#EXT-X-KEY:METHOD=AES-128' in s
|
||||
if no_crypto and has_ffmpeg:
|
||||
can_download, message = False, 'The stream has AES-128 encryption and pycryptodomex is not available'
|
||||
elif no_crypto:
|
||||
message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; '
|
||||
'Decryption will be performed natively, but will be extremely slow')
|
||||
if not Cryptodome.AES and '#EXT-X-KEY:METHOD=AES-128' in s:
|
||||
# Even if pycryptodomex isn't available, force HlsFD for m3u8s that won't work with ffmpeg
|
||||
ffmpeg_can_dl = not traverse_obj(info_dict, ((
|
||||
'extra_param_to_segment_url', 'extra_param_to_key_url',
|
||||
'hls_media_playlist_data', ('hls_aes', ('uri', 'key', 'iv')),
|
||||
), any))
|
||||
message = 'The stream has AES-128 encryption and {} available'.format(
|
||||
'neither ffmpeg nor pycryptodomex are' if ffmpeg_can_dl and not has_ffmpeg else
|
||||
'pycryptodomex is not')
|
||||
if has_ffmpeg and ffmpeg_can_dl:
|
||||
can_download = False
|
||||
else:
|
||||
message += '; decryption will be performed natively, but will be extremely slow'
|
||||
elif info_dict.get('extractor_key') == 'Generic' and re.search(r'(?m)#EXT-X-MEDIA-SEQUENCE:(?!0$)', s):
|
||||
install_ffmpeg = '' if has_ffmpeg else 'install ffmpeg and '
|
||||
message = ('Live HLS streams are not supported by the native downloader. If this is a livestream, '
|
||||
@@ -119,12 +139,12 @@ class HlsFD(FragmentFD):
|
||||
self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
|
||||
|
||||
def is_ad_fragment_start(s):
|
||||
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
|
||||
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
|
||||
return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s)
|
||||
or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')))
|
||||
|
||||
def is_ad_fragment_end(s):
|
||||
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
|
||||
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
|
||||
return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s)
|
||||
or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')))
|
||||
|
||||
fragments = []
|
||||
|
||||
@@ -160,10 +180,12 @@ class HlsFD(FragmentFD):
|
||||
extra_state = ctx.setdefault('extra_state', {})
|
||||
|
||||
format_index = info_dict.get('format_index')
|
||||
extra_query = None
|
||||
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
|
||||
if extra_param_to_segment_url:
|
||||
extra_query = urllib.parse.parse_qs(extra_param_to_segment_url)
|
||||
extra_segment_query = None
|
||||
if extra_param_to_segment_url := info_dict.get('extra_param_to_segment_url'):
|
||||
extra_segment_query = urllib.parse.parse_qs(extra_param_to_segment_url)
|
||||
extra_key_query = None
|
||||
if extra_param_to_key_url := info_dict.get('extra_param_to_key_url'):
|
||||
extra_key_query = urllib.parse.parse_qs(extra_param_to_key_url)
|
||||
i = 0
|
||||
media_sequence = 0
|
||||
decrypt_info = {'METHOD': 'NONE'}
|
||||
@@ -175,6 +197,7 @@ class HlsFD(FragmentFD):
|
||||
if external_aes_iv:
|
||||
external_aes_iv = binascii.unhexlify(remove_start(external_aes_iv, '0x').zfill(32))
|
||||
byte_range = {}
|
||||
byte_range_offset = 0
|
||||
discontinuity_count = 0
|
||||
frag_index = 0
|
||||
ad_frag_next = False
|
||||
@@ -182,7 +205,7 @@ class HlsFD(FragmentFD):
|
||||
line = line.strip()
|
||||
if line:
|
||||
if not line.startswith('#'):
|
||||
if format_index and discontinuity_count != format_index:
|
||||
if format_index is not None and discontinuity_count != format_index:
|
||||
continue
|
||||
if ad_frag_next:
|
||||
continue
|
||||
@@ -190,8 +213,8 @@ class HlsFD(FragmentFD):
|
||||
if frag_index <= ctx['fragment_index']:
|
||||
continue
|
||||
frag_url = urljoin(man_url, line)
|
||||
if extra_query:
|
||||
frag_url = update_url_query(frag_url, extra_query)
|
||||
if extra_segment_query:
|
||||
frag_url = update_url_query(frag_url, extra_segment_query)
|
||||
|
||||
fragments.append({
|
||||
'frag_index': frag_index,
|
||||
@@ -202,8 +225,13 @@ class HlsFD(FragmentFD):
|
||||
})
|
||||
media_sequence += 1
|
||||
|
||||
# If the byte_range is truthy, reset it after appending a fragment that uses it
|
||||
if byte_range:
|
||||
byte_range_offset = byte_range['end']
|
||||
byte_range = {}
|
||||
|
||||
elif line.startswith('#EXT-X-MAP'):
|
||||
if format_index and discontinuity_count != format_index:
|
||||
if format_index is not None and discontinuity_count != format_index:
|
||||
continue
|
||||
if frag_index > 0:
|
||||
self.report_error(
|
||||
@@ -212,13 +240,15 @@ class HlsFD(FragmentFD):
|
||||
frag_index += 1
|
||||
map_info = parse_m3u8_attributes(line[11:])
|
||||
frag_url = urljoin(man_url, map_info.get('URI'))
|
||||
if extra_query:
|
||||
frag_url = update_url_query(frag_url, extra_query)
|
||||
if extra_segment_query:
|
||||
frag_url = update_url_query(frag_url, extra_segment_query)
|
||||
|
||||
map_byte_range = {}
|
||||
|
||||
if map_info.get('BYTERANGE'):
|
||||
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
||||
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
|
||||
byte_range = {
|
||||
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else 0
|
||||
map_byte_range = {
|
||||
'start': sub_range_start,
|
||||
'end': sub_range_start + int(splitted_byte_range[0]),
|
||||
}
|
||||
@@ -227,8 +257,8 @@ class HlsFD(FragmentFD):
|
||||
'frag_index': frag_index,
|
||||
'url': frag_url,
|
||||
'decrypt_info': decrypt_info,
|
||||
'byte_range': byte_range,
|
||||
'media_sequence': media_sequence
|
||||
'byte_range': map_byte_range,
|
||||
'media_sequence': media_sequence,
|
||||
})
|
||||
media_sequence += 1
|
||||
|
||||
@@ -244,8 +274,10 @@ class HlsFD(FragmentFD):
|
||||
decrypt_info['KEY'] = external_aes_key
|
||||
else:
|
||||
decrypt_info['URI'] = urljoin(man_url, decrypt_info['URI'])
|
||||
if extra_query:
|
||||
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)
|
||||
if extra_key_query or extra_segment_query:
|
||||
# Fall back to extra_segment_query to key for backwards compat
|
||||
decrypt_info['URI'] = update_url_query(
|
||||
decrypt_info['URI'], extra_key_query or extra_segment_query)
|
||||
if decrypt_url != decrypt_info['URI']:
|
||||
decrypt_info['KEY'] = None
|
||||
|
||||
@@ -253,7 +285,7 @@ class HlsFD(FragmentFD):
|
||||
media_sequence = int(line[22:])
|
||||
elif line.startswith('#EXT-X-BYTERANGE'):
|
||||
splitted_byte_range = line[17:].split('@')
|
||||
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
|
||||
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range_offset
|
||||
byte_range = {
|
||||
'start': sub_range_start,
|
||||
'end': sub_range_start + int(splitted_byte_range[0]),
|
||||
@@ -350,9 +382,8 @@ class HlsFD(FragmentFD):
|
||||
# XXX: this should probably be silent as well
|
||||
# or verify that all segments contain the same data
|
||||
self.report_warning(bug_reports_message(
|
||||
'Discarding a %s block found in the middle of the stream; '
|
||||
'if the subtitles display incorrectly,'
|
||||
% (type(block).__name__)))
|
||||
f'Discarding a {type(block).__name__} block found in the middle of the stream; '
|
||||
'if the subtitles display incorrectly,'))
|
||||
continue
|
||||
block.write_into(output)
|
||||
|
||||
@@ -369,6 +400,9 @@ class HlsFD(FragmentFD):
|
||||
|
||||
return output.getvalue().encode()
|
||||
|
||||
if len(fragments) == 1:
|
||||
self.download_and_append_fragments(ctx, fragments, info_dict)
|
||||
else:
|
||||
self.download_and_append_fragments(
|
||||
ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments)
|
||||
else:
|
||||
|
||||
@@ -13,13 +13,9 @@ from ..utils import (
|
||||
ContentTooShortError,
|
||||
RetryManager,
|
||||
ThrottledDownload,
|
||||
XAttrMetadataError,
|
||||
XAttrUnavailableError,
|
||||
encodeFilename,
|
||||
int_or_none,
|
||||
parse_http_range,
|
||||
try_call,
|
||||
write_xattr,
|
||||
)
|
||||
from ..utils.networking import HTTPHeaderDict
|
||||
|
||||
@@ -28,6 +24,10 @@ class HttpFD(FileDownloader):
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
request_data = info_dict.get('request_data', None)
|
||||
request_extensions = {}
|
||||
impersonate_target = self._get_impersonate_target(info_dict)
|
||||
if impersonate_target is not None:
|
||||
request_extensions['impersonate'] = impersonate_target
|
||||
|
||||
class DownloadContext(dict):
|
||||
__getattr__ = dict.get
|
||||
@@ -58,9 +58,8 @@ class HttpFD(FileDownloader):
|
||||
|
||||
if self.params.get('continuedl', True):
|
||||
# Establish possible resume length
|
||||
if os.path.isfile(encodeFilename(ctx.tmpfilename)):
|
||||
ctx.resume_len = os.path.getsize(
|
||||
encodeFilename(ctx.tmpfilename))
|
||||
if os.path.isfile(ctx.tmpfilename):
|
||||
ctx.resume_len = os.path.getsize(ctx.tmpfilename)
|
||||
|
||||
ctx.is_resume = ctx.resume_len > 0
|
||||
|
||||
@@ -111,7 +110,7 @@ class HttpFD(FileDownloader):
|
||||
if try_call(lambda: range_end >= ctx.content_len):
|
||||
range_end = ctx.content_len - 1
|
||||
|
||||
request = Request(url, request_data, headers)
|
||||
request = Request(url, request_data, headers, extensions=request_extensions)
|
||||
has_range = range_start is not None
|
||||
if has_range:
|
||||
request.headers['Range'] = f'bytes={int(range_start)}-{int_or_none(range_end) or ""}'
|
||||
@@ -176,7 +175,7 @@ class HttpFD(FileDownloader):
|
||||
'downloaded_bytes': ctx.resume_len,
|
||||
'total_bytes': ctx.resume_len,
|
||||
}, info_dict)
|
||||
raise SucceedDownload()
|
||||
raise SucceedDownload
|
||||
else:
|
||||
# The length does not match, we start the download over
|
||||
self.report_unable_to_resume()
|
||||
@@ -194,7 +193,7 @@ class HttpFD(FileDownloader):
|
||||
|
||||
def close_stream():
|
||||
if ctx.stream is not None:
|
||||
if not ctx.tmpfilename == '-':
|
||||
if ctx.tmpfilename != '-':
|
||||
ctx.stream.close()
|
||||
ctx.stream = None
|
||||
|
||||
@@ -237,8 +236,13 @@ class HttpFD(FileDownloader):
|
||||
|
||||
def retry(e):
|
||||
close_stream()
|
||||
ctx.resume_len = (byte_counter if ctx.tmpfilename == '-'
|
||||
else os.path.getsize(encodeFilename(ctx.tmpfilename)))
|
||||
if ctx.tmpfilename == '-':
|
||||
ctx.resume_len = byte_counter
|
||||
else:
|
||||
try:
|
||||
ctx.resume_len = os.path.getsize(ctx.tmpfilename)
|
||||
except FileNotFoundError:
|
||||
ctx.resume_len = 0
|
||||
raise RetryDownload(e)
|
||||
|
||||
while True:
|
||||
@@ -263,20 +267,14 @@ class HttpFD(FileDownloader):
|
||||
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
||||
self.report_destination(ctx.filename)
|
||||
except OSError as err:
|
||||
self.report_error('unable to open for writing: %s' % str(err))
|
||||
self.report_error(f'unable to open for writing: {err}')
|
||||
return False
|
||||
|
||||
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
||||
try:
|
||||
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
|
||||
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
||||
self.report_error('unable to set filesize xattr: %s' % str(err))
|
||||
|
||||
try:
|
||||
ctx.stream.write(data_block)
|
||||
except OSError as err:
|
||||
self.to_stderr('\n')
|
||||
self.report_error('unable to write data: %s' % str(err))
|
||||
self.report_error(f'unable to write data: {err}')
|
||||
return False
|
||||
|
||||
# Apply rate limit
|
||||
@@ -322,7 +320,7 @@ class HttpFD(FileDownloader):
|
||||
elif now - ctx.throttle_start > 3:
|
||||
if ctx.stream is not None and ctx.tmpfilename != '-':
|
||||
ctx.stream.close()
|
||||
raise ThrottledDownload()
|
||||
raise ThrottledDownload
|
||||
elif speed:
|
||||
ctx.throttle_start = None
|
||||
|
||||
@@ -333,7 +331,7 @@ class HttpFD(FileDownloader):
|
||||
|
||||
if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
|
||||
ctx.resume_len = byte_counter
|
||||
raise NextFragment()
|
||||
raise NextFragment
|
||||
|
||||
if ctx.tmpfilename != '-':
|
||||
ctx.stream.close()
|
||||
@@ -345,7 +343,7 @@ class HttpFD(FileDownloader):
|
||||
self.try_rename(ctx.tmpfilename, ctx.filename)
|
||||
|
||||
# Update file modification time
|
||||
if self.params.get('updatetime', True):
|
||||
if self.params.get('updatetime'):
|
||||
info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.headers.get('last-modified', None))
|
||||
|
||||
self._hook_progress({
|
||||
|
||||
@@ -251,7 +251,7 @@ class IsmFD(FragmentFD):
|
||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||
|
||||
frag_index = 0
|
||||
for i, segment in enumerate(segments):
|
||||
for segment in segments:
|
||||
frag_index += 1
|
||||
if frag_index <= ctx['fragment_index']:
|
||||
continue
|
||||
|
||||
@@ -10,7 +10,7 @@ from ..version import __version__ as YT_DLP_VERSION
|
||||
|
||||
|
||||
class MhtmlFD(FragmentFD):
|
||||
_STYLESHEET = """\
|
||||
_STYLESHEET = '''\
|
||||
html, body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
@@ -45,7 +45,7 @@ body > figure > img {
|
||||
max-width: 100%;
|
||||
max-height: calc(100vh - 5em);
|
||||
}
|
||||
"""
|
||||
'''
|
||||
_STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET)
|
||||
_STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET)
|
||||
|
||||
@@ -57,24 +57,19 @@ body > figure > img {
|
||||
)).decode('us-ascii') + '?='
|
||||
|
||||
def _gen_cid(self, i, fragment, frag_boundary):
|
||||
return '%u.%s@yt-dlp.github.io.invalid' % (i, frag_boundary)
|
||||
return f'{i}.{frag_boundary}@yt-dlp.github.io.invalid'
|
||||
|
||||
def _gen_stub(self, *, fragments, frag_boundary, title):
|
||||
output = io.StringIO()
|
||||
|
||||
output.write((
|
||||
output.write(
|
||||
'<!DOCTYPE html>'
|
||||
'<html>'
|
||||
'<head>'
|
||||
'' '<meta name="generator" content="yt-dlp {version}">'
|
||||
'' '<title>{title}</title>'
|
||||
'' '<style>{styles}</style>'
|
||||
'<body>'
|
||||
).format(
|
||||
version=escapeHTML(YT_DLP_VERSION),
|
||||
styles=self._STYLESHEET,
|
||||
title=escapeHTML(title)
|
||||
))
|
||||
f'<meta name="generator" content="yt-dlp {escapeHTML(YT_DLP_VERSION)}">'
|
||||
f'<title>{escapeHTML(title)}</title>'
|
||||
f'<style>{self._STYLESHEET}</style>'
|
||||
'<body>')
|
||||
|
||||
t0 = 0
|
||||
for i, frag in enumerate(fragments):
|
||||
@@ -87,15 +82,12 @@ body > figure > img {
|
||||
num=i + 1,
|
||||
t0=srt_subtitles_timecode(t0),
|
||||
t1=srt_subtitles_timecode(t1),
|
||||
duration=formatSeconds(frag['duration'], msec=True)
|
||||
duration=formatSeconds(frag['duration'], msec=True),
|
||||
))
|
||||
except (KeyError, ValueError, TypeError):
|
||||
t1 = None
|
||||
output.write((
|
||||
'<figcaption>Slide #{num}</figcaption>'
|
||||
).format(num=i + 1))
|
||||
output.write('<img src="cid:{cid}">'.format(
|
||||
cid=self._gen_cid(i, frag, frag_boundary)))
|
||||
output.write(f'<figcaption>Slide #{i + 1}</figcaption>')
|
||||
output.write(f'<img src="cid:{self._gen_cid(i, frag, frag_boundary)}">')
|
||||
output.write('</figure>')
|
||||
t0 = t1
|
||||
|
||||
@@ -126,31 +118,24 @@ body > figure > img {
|
||||
stub = self._gen_stub(
|
||||
fragments=fragments,
|
||||
frag_boundary=frag_boundary,
|
||||
title=title
|
||||
title=title,
|
||||
)
|
||||
|
||||
ctx['dest_stream'].write((
|
||||
'MIME-Version: 1.0\r\n'
|
||||
'From: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||
'To: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||
'Subject: {title}\r\n'
|
||||
f'Subject: {self._escape_mime(title)}\r\n'
|
||||
'Content-type: multipart/related; '
|
||||
'' 'boundary="{boundary}"; '
|
||||
'' 'type="text/html"\r\n'
|
||||
'X.yt-dlp.Origin: {origin}\r\n'
|
||||
f'boundary="{frag_boundary}"; '
|
||||
'type="text/html"\r\n'
|
||||
f'X.yt-dlp.Origin: {origin}\r\n'
|
||||
'\r\n'
|
||||
'--{boundary}\r\n'
|
||||
f'--{frag_boundary}\r\n'
|
||||
'Content-Type: text/html; charset=utf-8\r\n'
|
||||
'Content-Length: {length}\r\n'
|
||||
f'Content-Length: {len(stub)}\r\n'
|
||||
'\r\n'
|
||||
'{stub}\r\n'
|
||||
).format(
|
||||
origin=origin,
|
||||
boundary=frag_boundary,
|
||||
length=len(stub),
|
||||
title=self._escape_mime(title),
|
||||
stub=stub
|
||||
).encode())
|
||||
f'{stub}\r\n').encode())
|
||||
extra_state['header_written'] = True
|
||||
|
||||
for i, fragment in enumerate(fragments):
|
||||
|
||||
@@ -2,103 +2,49 @@ import json
|
||||
import threading
|
||||
import time
|
||||
|
||||
from . import get_suitable_downloader
|
||||
from .common import FileDownloader
|
||||
from .external import FFmpegFD
|
||||
from ..networking import Request
|
||||
from ..utils import DownloadError, WebSocketsWrapper, str_or_none, try_get
|
||||
|
||||
|
||||
class NiconicoDmcFD(FileDownloader):
|
||||
""" Downloading niconico douga from DMC with heartbeat """
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
from ..extractor.niconico import NiconicoIE
|
||||
|
||||
self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)
|
||||
ie = NiconicoIE(self.ydl)
|
||||
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
||||
|
||||
fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)
|
||||
|
||||
success = download_complete = False
|
||||
timer = [None]
|
||||
heartbeat_lock = threading.Lock()
|
||||
heartbeat_url = heartbeat_info_dict['url']
|
||||
heartbeat_data = heartbeat_info_dict['data'].encode()
|
||||
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
||||
|
||||
request = Request(heartbeat_url, heartbeat_data)
|
||||
|
||||
def heartbeat():
|
||||
try:
|
||||
self.ydl.urlopen(request).read()
|
||||
except Exception:
|
||||
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
||||
|
||||
with heartbeat_lock:
|
||||
if not download_complete:
|
||||
timer[0] = threading.Timer(heartbeat_interval, heartbeat)
|
||||
timer[0].start()
|
||||
|
||||
heartbeat_info_dict['ping']()
|
||||
self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))
|
||||
try:
|
||||
heartbeat()
|
||||
if type(fd).__name__ == 'HlsFD':
|
||||
info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])
|
||||
success = fd.real_download(filename, info_dict)
|
||||
finally:
|
||||
if heartbeat_lock:
|
||||
with heartbeat_lock:
|
||||
timer[0].cancel()
|
||||
download_complete = True
|
||||
return success
|
||||
from ..networking.websocket import WebSocketResponse
|
||||
from ..utils import DownloadError, str_or_none, truncate_string
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class NiconicoLiveFD(FileDownloader):
|
||||
""" Downloads niconico live without being stopped """
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
video_id = info_dict['video_id']
|
||||
ws_url = info_dict['url']
|
||||
ws_extractor = info_dict['ws']
|
||||
ws_origin_host = info_dict['origin']
|
||||
cookies = info_dict.get('cookies')
|
||||
live_quality = info_dict.get('live_quality', 'high')
|
||||
live_latency = info_dict.get('live_latency', 'high')
|
||||
video_id = info_dict['id']
|
||||
opts = info_dict['downloader_options']
|
||||
quality, ws_extractor, ws_url = opts['max_quality'], opts['ws'], opts['ws_url']
|
||||
dl = FFmpegFD(self.ydl, self.params or {})
|
||||
|
||||
new_info_dict = info_dict.copy()
|
||||
new_info_dict.update({
|
||||
'protocol': 'm3u8',
|
||||
})
|
||||
new_info_dict['protocol'] = 'm3u8'
|
||||
|
||||
def communicate_ws(reconnect):
|
||||
if reconnect:
|
||||
ws = WebSocketsWrapper(ws_url, {
|
||||
'Cookies': str_or_none(cookies) or '',
|
||||
'Origin': f'https://{ws_origin_host}',
|
||||
'Accept': '*/*',
|
||||
'User-Agent': self.params['http_headers']['User-Agent'],
|
||||
})
|
||||
# Support --load-info-json as if it is a reconnect attempt
|
||||
if reconnect or not isinstance(ws_extractor, WebSocketResponse):
|
||||
ws = self.ydl.urlopen(Request(
|
||||
ws_url, headers={'Origin': 'https://live.nicovideo.jp'}))
|
||||
if self.ydl.params.get('verbose', False):
|
||||
self.to_screen('[debug] Sending startWatching request')
|
||||
self.write_debug('Sending startWatching request')
|
||||
ws.send(json.dumps({
|
||||
'type': 'startWatching',
|
||||
'data': {
|
||||
'stream': {
|
||||
'quality': live_quality,
|
||||
'protocol': 'hls+fmp4',
|
||||
'latency': live_latency,
|
||||
'chasePlay': False
|
||||
},
|
||||
'room': {
|
||||
'protocol': 'webSocket',
|
||||
'commentable': True
|
||||
},
|
||||
'reconnect': True,
|
||||
}
|
||||
'room': {
|
||||
'commentable': True,
|
||||
'protocol': 'webSocket',
|
||||
},
|
||||
'stream': {
|
||||
'accessRightMethod': 'single_cookie',
|
||||
'chasePlay': False,
|
||||
'latency': 'high',
|
||||
'protocol': 'hls',
|
||||
'quality': quality,
|
||||
},
|
||||
},
|
||||
'type': 'startWatching',
|
||||
}))
|
||||
else:
|
||||
ws = ws_extractor
|
||||
@@ -111,7 +57,6 @@ class NiconicoLiveFD(FileDownloader):
|
||||
if not data or not isinstance(data, dict):
|
||||
continue
|
||||
if data.get('type') == 'ping':
|
||||
# pong back
|
||||
ws.send(r'{"type":"pong"}')
|
||||
ws.send(r'{"type":"keepSeat"}')
|
||||
elif data.get('type') == 'disconnect':
|
||||
@@ -119,12 +64,10 @@ class NiconicoLiveFD(FileDownloader):
|
||||
return True
|
||||
elif data.get('type') == 'error':
|
||||
self.write_debug(data)
|
||||
message = try_get(data, lambda x: x['body']['code'], str) or recv
|
||||
message = traverse_obj(data, ('body', 'code', {str_or_none}), default=recv)
|
||||
return DownloadError(message)
|
||||
elif self.ydl.params.get('verbose', False):
|
||||
if len(recv) > 100:
|
||||
recv = recv[:100] + '...'
|
||||
self.to_screen('[debug] Server said: %s' % recv)
|
||||
self.write_debug(f'Server response: {truncate_string(recv, 100)}')
|
||||
|
||||
def ws_main():
|
||||
reconnect = False
|
||||
@@ -134,7 +77,8 @@ class NiconicoLiveFD(FileDownloader):
|
||||
if ret is True:
|
||||
return
|
||||
except BaseException as e:
|
||||
self.to_screen('[%s] %s: Connection error occured, reconnecting after 10 seconds: %s' % ('niconico:live', video_id, str_or_none(e)))
|
||||
self.to_screen(
|
||||
f'[niconico:live] {video_id}: Connection error occured, reconnecting after 10 seconds: {e}')
|
||||
time.sleep(10)
|
||||
continue
|
||||
finally:
|
||||
|
||||
@@ -8,7 +8,6 @@ from ..utils import (
|
||||
Popen,
|
||||
check_executable,
|
||||
encodeArgument,
|
||||
encodeFilename,
|
||||
get_exe_version,
|
||||
)
|
||||
|
||||
@@ -179,15 +178,15 @@ class RtmpFD(FileDownloader):
|
||||
return False
|
||||
|
||||
while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
|
||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize)
|
||||
prevsize = os.path.getsize(tmpfilename)
|
||||
self.to_screen(f'[rtmpdump] Downloaded {prevsize} bytes')
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
args = basic_args + ['--resume']
|
||||
args = [*basic_args, '--resume']
|
||||
if retval == RD_FAILED:
|
||||
args += ['--skip', '1']
|
||||
args = [encodeArgument(a) for a in args]
|
||||
retval = run_rtmpdump(args)
|
||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
cursize = os.path.getsize(tmpfilename)
|
||||
if prevsize == cursize and retval == RD_FAILED:
|
||||
break
|
||||
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
|
||||
@@ -196,8 +195,8 @@ class RtmpFD(FileDownloader):
|
||||
retval = RD_SUCCESS
|
||||
break
|
||||
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize)
|
||||
fsize = os.path.getsize(tmpfilename)
|
||||
self.to_screen(f'[rtmpdump] Downloaded {fsize} bytes')
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
|
||||
@@ -2,7 +2,7 @@ import os
|
||||
import subprocess
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..utils import check_executable, encodeFilename
|
||||
from ..utils import check_executable
|
||||
|
||||
|
||||
class RtspFD(FileDownloader):
|
||||
@@ -26,7 +26,7 @@ class RtspFD(FileDownloader):
|
||||
|
||||
retval = subprocess.call(args)
|
||||
if retval == 0:
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
fsize = os.path.getsize(tmpfilename)
|
||||
self.to_screen(f'\r[{args[0]}] {fsize} bytes')
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
|
||||
@@ -18,7 +18,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
video_id = info_dict['video_id']
|
||||
self.to_screen('[%s] Downloading live chat' % self.FD_NAME)
|
||||
self.to_screen(f'[{self.FD_NAME}] Downloading live chat')
|
||||
if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat':
|
||||
self.report_warning('Live chat download runs until the livestream ends. '
|
||||
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
||||
@@ -123,8 +123,8 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||
data,
|
||||
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
||||
|
||||
func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live
|
||||
or frag_index == 1 and try_refresh_replay_beginning
|
||||
func = ((info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live)
|
||||
or (frag_index == 1 and try_refresh_replay_beginning)
|
||||
or parse_actions_replay)
|
||||
return (True, *func(live_chat_continuation))
|
||||
except HTTPError as err:
|
||||
|
||||
@@ -1,16 +1,25 @@
|
||||
from ..compat.compat_utils import passthrough_module
|
||||
from ..globals import extractors as _extractors_context
|
||||
from ..globals import plugin_ies as _plugin_ies_context
|
||||
from ..plugins import PluginSpec, register_plugin_spec
|
||||
|
||||
passthrough_module(__name__, '.extractors')
|
||||
del passthrough_module
|
||||
|
||||
register_plugin_spec(PluginSpec(
|
||||
module_name='extractor',
|
||||
suffix='IE',
|
||||
destination=_extractors_context,
|
||||
plugin_destination=_plugin_ies_context,
|
||||
))
|
||||
|
||||
|
||||
def gen_extractor_classes():
|
||||
""" Return a list of supported extractors.
|
||||
The order does matter; the first extractor matched is the one handling the URL.
|
||||
"""
|
||||
from .extractors import _ALL_CLASSES
|
||||
|
||||
return _ALL_CLASSES
|
||||
import_extractors()
|
||||
return list(_extractors_context.value.values())
|
||||
|
||||
|
||||
def gen_extractors():
|
||||
@@ -37,6 +46,9 @@ def list_extractors(age_limit=None):
|
||||
|
||||
def get_info_extractor(ie_name):
|
||||
"""Returns the info extractor class with the given ie_name"""
|
||||
from . import extractors
|
||||
import_extractors()
|
||||
return _extractors_context.value[f'{ie_name}IE']
|
||||
|
||||
return getattr(extractors, f'{ie_name}IE')
|
||||
|
||||
def import_extractors():
|
||||
from . import extractors # noqa: F401
|
||||
|
||||