Compare commits
38 Commits
a5f864b802
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
| d55bc3ae97 | |||
| 5c808c579a | |||
| f58bc53c24 | |||
| 21120cd61e | |||
| c74f97aca7 | |||
| 5e9354ba58 | |||
| 6a3bfbeb13 | |||
| ad06ea50ad | |||
| 9932f34782 | |||
| 76a4b2e0e2 | |||
| 0942af249b | |||
| 28fefafa6f | |||
| d96ace1504 | |||
| e0723e7b9e | |||
| d68d9ce4f9 | |||
| 3a2e8eeb08 | |||
| 35456f2bca | |||
| 9d3a5b9f3b | |||
| ce00970171 | |||
| 2f954f4c79 | |||
| a362039e73 | |||
| 02c31719d1 | |||
| d65ea8dec8 | |||
| fec0d26ab7 | |||
| a47bd23e78 | |||
| 44ef6ea2bb | |||
| be7be00f78 | |||
| 8e5ae4824c | |||
| 37e3265be5 | |||
| 4cafb7ff9f | |||
| 9336df2afa | |||
| d936b17429 | |||
| e6739c3087 | |||
| 2de4de6b22 | |||
| 39959dc947 | |||
| ce0f45e168 | |||
| 8862a80eea | |||
| 6fe4db7c63 |
2
.gitignore
vendored
@@ -1,3 +1,5 @@
|
|||||||
|
cookies.txt
|
||||||
|
|
||||||
docs/
|
docs/
|
||||||
.idea/
|
.idea/
|
||||||
*.zip
|
*.zip
|
||||||
|
|||||||
@@ -2,13 +2,11 @@
|
|||||||
SolarFM is a Gtk+ Python file manager.
|
SolarFM is a Gtk+ Python file manager.
|
||||||
|
|
||||||
# Notes
|
# Notes
|
||||||
<b>Still Work in progress! Use at own risk!</b>
|
If not building a .deb then just move the contents of user_config to their respective folders.
|
||||||
|
|
||||||
Additionally, if not building a .deb then just move the contents of user_config to their respective folders.
|
|
||||||
Copy the share/solarfm folder to your user .config/ directory too.
|
Copy the share/solarfm folder to your user .config/ directory too.
|
||||||
|
|
||||||
`pyrightconfig.json`
|
`pyrightconfig.json`
|
||||||
<p>The pyrightconfig file needs to stay on same level as the .git folders in order to have settings detected when using pyright with lsp functionality.</p>
|
<p>The pyrightconfig file needs to stay on same level as the .git folders in order to have settings detected when using pyright with lsp functionality. "pyrightconfig.json" can prompt IDEs such as Zed on settings to use and where imports are located- look at venvPath and venv. "venvPath" is parent path of "venv" where "venv" is just the name of the folder under the parent path that is the python created venv.
|
||||||
|
|
||||||
<h6>Install Setup</h6>
|
<h6>Install Setup</h6>
|
||||||
```
|
```
|
||||||
@@ -17,7 +15,7 @@ sudo apt-get install xclip python3.8 python3-setproctitle python3-gi wget ffmpeg
|
|||||||
|
|
||||||
# Known Issues
|
# Known Issues
|
||||||
<ul>
|
<ul>
|
||||||
<li>There's a memory leak. Still analyzing where exactly.</li>
|
<li>There is a memory leak that has been slowed down but can get to 2GB over a long enough time period OR active accessing image based dirs.</li>
|
||||||
<li>Doing Ctrl+D when in Terminator (maybe other terminals too) somehow propagates the signal to SolarFM too.
|
<li>Doing Ctrl+D when in Terminator (maybe other terminals too) somehow propagates the signal to SolarFM too.
|
||||||
A selected file in the active quad-pane will move to trash since it is the default key-binding for that action.</li>
|
A selected file in the active quad-pane will move to trash since it is the default key-binding for that action.</li>
|
||||||
</ul>
|
</ul>
|
||||||
@@ -25,7 +23,6 @@ A selected file in the active quad-pane will move to trash since it is the defau
|
|||||||
# TODO
|
# TODO
|
||||||
<ul>
|
<ul>
|
||||||
<li>Add simpleish preview plugin for various file types.</li>
|
<li>Add simpleish preview plugin for various file types.</li>
|
||||||
<li>Add simpleish bulk-renamer.</li>
|
|
||||||
</ul>
|
</ul>
|
||||||
|
|
||||||
# Images
|
# Images
|
||||||
|
|||||||
BIN
images/pic1.png
|
Before Width: | Height: | Size: 504 KiB After Width: | Height: | Size: 1.0 MiB |
BIN
images/pic2.png
|
Before Width: | Height: | Size: 316 KiB After Width: | Height: | Size: 1.1 MiB |
BIN
images/pic3.png
|
Before Width: | Height: | Size: 307 KiB After Width: | Height: | Size: 1.2 MiB |
BIN
images/pic4.png
|
Before Width: | Height: | Size: 464 KiB After Width: | Height: | Size: 1.3 MiB |
@@ -14,6 +14,7 @@ class Manifest:
|
|||||||
'ui_target': "plugin_control_list",
|
'ui_target': "plugin_control_list",
|
||||||
'pass_fm_events': "true"
|
'pass_fm_events': "true"
|
||||||
}
|
}
|
||||||
|
pre_launch: bool = False
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Archiver",
|
"name": "Archiver",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "context_menu_plugins",
|
"ui_target": "context_menu_plugins",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Disk Usage",
|
"name": "Disk Usage",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "context_menu_plugins",
|
"ui_target": "context_menu_plugins",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +1,12 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Favorites",
|
"name": "Favorites",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "main_menu_bttn_box_bar",
|
"ui_target": "main_menu_bttn_box_bar",
|
||||||
"pass_fm_events": "true",
|
"pass_fm_events": true,
|
||||||
"pass_ui_objects": ["path_entry"],
|
"pass_ui_objects": ["path_entry"],
|
||||||
"bind_keys": ["Favorites||show_favorites_menu:<Control>f"]
|
"bind_keys": ["Favorites||show_favorites_menu:<Control>f"]
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Properties",
|
"name": "Properties",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "context_menu",
|
"ui_target": "context_menu",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -122,7 +122,6 @@ class Plugin(PluginBase):
|
|||||||
uri = state.uris[0]
|
uri = state.uris[0]
|
||||||
path = state.tab.get_current_directory()
|
path = state.tab.get_current_directory()
|
||||||
|
|
||||||
|
|
||||||
properties = self._set_ui_data(uri, path)
|
properties = self._set_ui_data(uri, path)
|
||||||
response = self._properties_dialog.run()
|
response = self._properties_dialog.run()
|
||||||
if response in [Gtk.ResponseType.CANCEL, Gtk.ResponseType.DELETE_EVENT]:
|
if response in [Gtk.ResponseType.CANCEL, Gtk.ResponseType.DELETE_EVENT]:
|
||||||
@@ -168,13 +167,13 @@ class Plugin(PluginBase):
|
|||||||
|
|
||||||
def _set_ui_data(self, uri, path):
|
def _set_ui_data(self, uri, path):
|
||||||
properties = Properties()
|
properties = Properties()
|
||||||
file_info = Gio.File.new_for_path(uri).query_info(attributes="standard::*,owner::*,time::access,time::changed",
|
file_info = Gio.File.new_for_path(uri).query_info(attributes = "standard::*,owner::*,time::access,time::changed",
|
||||||
flags=Gio.FileQueryInfoFlags.NONE,
|
flags = Gio.FileQueryInfoFlags.NONE,
|
||||||
cancellable=None)
|
cancellable = None)
|
||||||
|
|
||||||
is_symlink = file_info.get_attribute_as_string("standard::is-symlink")
|
is_symlink = file_info.get_attribute_as_string("standard::is-symlink")
|
||||||
properties.file_uri = uri
|
properties.file_uri = uri
|
||||||
properties.file_target = file_info.get_attribute_as_string("standard::symlink-target") if is_symlink else ""
|
properties.file_target = file_info.get_attribute_as_string("standard::symlink-target") if is_symlink in [True, "TRUE"] else ""
|
||||||
properties.file_name = file_info.get_display_name()
|
properties.file_name = file_info.get_display_name()
|
||||||
properties.file_location = path
|
properties.file_location = path
|
||||||
properties.mime_type = file_info.get_content_type()
|
properties.mime_type = file_info.get_content_type()
|
||||||
@@ -186,7 +185,7 @@ class Plugin(PluginBase):
|
|||||||
|
|
||||||
# NOTE: Read = 4, Write = 2, Exec = 1
|
# NOTE: Read = 4, Write = 2, Exec = 1
|
||||||
command = ["stat", "-c", "%a", uri]
|
command = ["stat", "-c", "%a", uri]
|
||||||
with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:
|
with subprocess.Popen(command, stdout = subprocess.PIPE) as proc:
|
||||||
properties.chmod_stat = list(proc.stdout.read().decode("UTF-8").strip())
|
properties.chmod_stat = list(proc.stdout.read().decode("UTF-8").strip())
|
||||||
owner = self._chmod_map[f"{properties.chmod_stat[0]}"]
|
owner = self._chmod_map[f"{properties.chmod_stat[0]}"]
|
||||||
group = self._chmod_map[f"{properties.chmod_stat[1]}"]
|
group = self._chmod_map[f"{properties.chmod_stat[1]}"]
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Git Clone",
|
"name": "Git Clone",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "plugin_control_list",
|
"ui_target": "plugin_control_list",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Movie/TV Info",
|
"name": "Movie/TV Info",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "context_menu_plugins",
|
"ui_target": "context_menu_plugins",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "PyRun",
|
"name": "PyRun",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "plugin_control_list",
|
"ui_target": "plugin_control_list",
|
||||||
"pass_fm_events": "true",
|
"pass_fm_events": true,
|
||||||
"bind_keys": ["PyRun||send_message:<Shift><Control>r"]
|
"bind_keys": ["PyRun||send_message:<Shift><Control>r"]
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,11 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Search",
|
"name": "Search",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "context_menu",
|
"ui_target": "context_menu",
|
||||||
"pass_fm_events": "true",
|
"pass_fm_events": true,
|
||||||
"bind_keys": ["Search||show_search_page:<Control>s"]
|
"bind_keys": ["Search||show_search_page:<Control>s"]
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,22 +18,22 @@ from ..widgets.file_preview_widget import FilePreviewWidget
|
|||||||
|
|
||||||
|
|
||||||
# NOTE: Threads WILL NOT die with parent's destruction.
|
# NOTE: Threads WILL NOT die with parent's destruction.
|
||||||
def threaded(fn):
|
# def threaded(fn):
|
||||||
def wrapper(*args, **kwargs):
|
# def wrapper(*args, **kwargs):
|
||||||
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start()
|
# threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start()
|
||||||
|
#
|
||||||
return wrapper
|
# return wrapper
|
||||||
|
#
|
||||||
# NOTE: Threads WILL die with parent's destruction.
|
# NOTE: Threads WILL die with parent's destruction.
|
||||||
def daemon_threaded(fn):
|
# def daemon_threaded(fn):
|
||||||
def wrapper(*args, **kwargs):
|
# def wrapper(*args, **kwargs):
|
||||||
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
|
# threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
|
||||||
|
#
|
||||||
return wrapper
|
# return wrapper
|
||||||
|
|
||||||
|
|
||||||
class FileSearchMixin:
|
class FileSearchMixin:
|
||||||
def _run_find_file_query(self, widget=None, eve=None):
|
def _run_find_file_query(self, widget = None, eve = None):
|
||||||
self._queue_search = True
|
self._queue_search = True
|
||||||
|
|
||||||
if not self._search_watcher_running:
|
if not self._search_watcher_running:
|
||||||
@@ -43,6 +43,50 @@ class FileSearchMixin:
|
|||||||
self.reset_file_list_box()
|
self.reset_file_list_box()
|
||||||
self.run_fsearch_watcher(query=widget)
|
self.run_fsearch_watcher(query=widget)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Need to implement this over the threaded stuffs....
|
||||||
|
|
||||||
|
#
|
||||||
|
# def cancel_timer(self):
|
||||||
|
# if self.timer:
|
||||||
|
# self.timer.cancel()
|
||||||
|
# GLib.idle_remove_by_data(None)
|
||||||
|
#
|
||||||
|
# def delay_search_glib(self, query):
|
||||||
|
# GLib.idle_add(self._exec_find_file_query, *(query,))
|
||||||
|
#
|
||||||
|
# def delay_search(self):
|
||||||
|
# wait_time = self.search_time / len(self.search_text)
|
||||||
|
# wait_time = max(wait_time, 0.05)
|
||||||
|
#
|
||||||
|
# self.timer = threading.Timer(wait_time, self.delay_search_glib, *(query,))
|
||||||
|
# self.timer.daemon = True
|
||||||
|
# self.timer.start()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@daemon_threaded
|
@daemon_threaded
|
||||||
def run_fsearch_watcher(self, query):
|
def run_fsearch_watcher(self, query):
|
||||||
while True:
|
while True:
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ class GrepPreviewWidget(Gtk.Box):
|
|||||||
return bytes(f"\n<span foreground='{color}'>{target}</span>", "utf-8").decode("utf-8")
|
return bytes(f"\n<span foreground='{color}'>{target}</span>", "utf-8").decode("utf-8")
|
||||||
|
|
||||||
def make_utf8_line_highlight(self, buffer, itr, i, color, target, query):
|
def make_utf8_line_highlight(self, buffer, itr, i, color, target, query):
|
||||||
parts = re.split(r"(" + query + ")(?i)", target.replace("\n", ""))
|
parts = re.split(r"(?i)(" + query + ")", target.replace("\n", ""))
|
||||||
for part in parts:
|
for part in parts:
|
||||||
itr = buffer.get_end_iter()
|
itr = buffer.get_end_iter()
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Example Plugin",
|
"name": "Example Plugin",
|
||||||
"author": "John Doe",
|
"author": "John Doe",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "plugin_control_list",
|
"ui_target": "plugin_control_list",
|
||||||
"pass_fm_events": "true",
|
"pass_fm_events": true,
|
||||||
"bind_keys": ["Example Plugin||send_message:<Control>f"]
|
"bind_keys": ["Example Plugin||send_message:<Control>f"]
|
||||||
}
|
},
|
||||||
}
|
"pre_launch": false
|
||||||
}
|
}
|
||||||
|
|||||||
3
plugins/thumbnailer/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/thumbnailer/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
75
plugins/thumbnailer/icons/controller.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Python imports
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from os import path
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from .icon import Icon
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class IconController(Icon):
|
||||||
|
def __init__(self):
|
||||||
|
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
# NOTE: app_name should be defined using python 'builtins' and so too must be logger used in the various classes
|
||||||
|
app_name_exists = False
|
||||||
|
try:
|
||||||
|
app_name
|
||||||
|
app_name_exists = True
|
||||||
|
except Exception as e:
|
||||||
|
...
|
||||||
|
|
||||||
|
APP_CONTEXT = f"{app_name.lower()}" if app_name_exists else "shellfm"
|
||||||
|
USR_APP_CONTEXT = f"/usr/share/{APP_CONTEXT}"
|
||||||
|
USER_HOME = path.expanduser('~')
|
||||||
|
CONFIG_PATH = f"{USER_HOME}/.config/{APP_CONTEXT}"
|
||||||
|
self.DEFAULT_ICONS = f"{CONFIG_PATH}/icons"
|
||||||
|
self.DEFAULT_ICON = f"{self.DEFAULT_ICONS}/text.png"
|
||||||
|
self.FFMPG_THUMBNLR = f"{CONFIG_PATH}/ffmpegthumbnailer" # Thumbnail generator binary
|
||||||
|
self.BLENDER_THUMBNLR = f"{CONFIG_PATH}/blender-thumbnailer" # Blender thumbnail generator binary
|
||||||
|
|
||||||
|
self.ICON_DIRS = ["/usr/share/icons", f"{USER_HOME}/.icons" "/usr/share/pixmaps"]
|
||||||
|
self.BASE_THUMBS_PTH = f"{USER_HOME}/.thumbnails"
|
||||||
|
self.ABS_THUMBS_PTH = f"{self.BASE_THUMBS_PTH}/normal"
|
||||||
|
self.STEAM_ICONS_PTH = f"{self.BASE_THUMBS_PTH}/steam_icons"
|
||||||
|
self.STEAM_CDN_URL = ""
|
||||||
|
|
||||||
|
if not path.isdir(self.BASE_THUMBS_PTH):
|
||||||
|
os.mkdir(self.BASE_THUMBS_PTH)
|
||||||
|
|
||||||
|
if not path.isdir(self.ABS_THUMBS_PTH):
|
||||||
|
os.mkdir(self.ABS_THUMBS_PTH)
|
||||||
|
|
||||||
|
if not path.isdir(self.STEAM_ICONS_PTH):
|
||||||
|
os.mkdir(self.STEAM_ICONS_PTH)
|
||||||
|
|
||||||
|
if not os.path.exists(self.DEFAULT_ICONS):
|
||||||
|
self.DEFAULT_ICONS = f"{USR_APP_CONTEXT}/icons"
|
||||||
|
self.DEFAULT_ICON = f"{self.DEFAULT_ICONS}/text.png"
|
||||||
|
|
||||||
|
CONFIG_FILE = f"{CURRENT_PATH}/../settings.json"
|
||||||
|
with open(CONFIG_FILE) as f:
|
||||||
|
settings = json.load(f)
|
||||||
|
config = settings["config"]
|
||||||
|
|
||||||
|
self.container_icon_wh = config["container_icon_wh"]
|
||||||
|
self.video_icon_wh = config["video_icon_wh"]
|
||||||
|
self.sys_icon_wh = config["sys_icon_wh"]
|
||||||
|
self.STEAM_CDN_URL = config["steam_cdn_url"]
|
||||||
|
|
||||||
|
# Filters
|
||||||
|
filters = settings["filters"]
|
||||||
|
self.fmeshs = tuple(filters["meshs"])
|
||||||
|
self.fcode = tuple(filters["code"])
|
||||||
|
self.fvideos = tuple(filters["videos"])
|
||||||
|
self.foffice = tuple(filters["office"])
|
||||||
|
self.fimages = tuple(filters["images"])
|
||||||
|
self.ftext = tuple(filters["text"])
|
||||||
|
self.fmusic = tuple(filters["music"])
|
||||||
|
self.fpdf = tuple(filters["pdf"])
|
||||||
@@ -29,13 +29,17 @@ class IconException(Exception):
|
|||||||
|
|
||||||
|
|
||||||
class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
||||||
|
cache = {}
|
||||||
|
|
||||||
def create_icon(self, dir, file):
|
def create_icon(self, dir, file):
|
||||||
full_path = f"{dir}/{file}"
|
full_path = f"{dir}/{file}"
|
||||||
return self.get_icon_image(dir, file, full_path)
|
return self.get_icon_image(dir, file, full_path)
|
||||||
|
|
||||||
def get_icon_image(self, dir, file, full_path):
|
def get_icon_image(self, dir, file, full_path):
|
||||||
try:
|
try:
|
||||||
thumbnl = None
|
thumbnl = self.cache.get(full_path)
|
||||||
|
if thumbnl:
|
||||||
|
return thumbnl
|
||||||
|
|
||||||
if file.lower().endswith(self.fmeshs): # 3D Mesh icon
|
if file.lower().endswith(self.fmeshs): # 3D Mesh icon
|
||||||
...
|
...
|
||||||
@@ -50,12 +54,12 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
|||||||
|
|
||||||
if not thumbnl:
|
if not thumbnl:
|
||||||
# TODO: Detect if not in a thread and use directly for speed get_system_thumbnail
|
# TODO: Detect if not in a thread and use directly for speed get_system_thumbnail
|
||||||
thumbnl = self.get_system_thumbnail(full_path, self.sys_icon_wh[0])
|
# thumbnl = self.get_system_thumbnail(full_path, self.sys_icon_wh[0])
|
||||||
# thumbnl = self._get_system_thumbnail_gtk_thread(full_path, self.sys_icon_wh[0])
|
thumbnl = self._get_system_thumbnail_gtk_thread(full_path, self.sys_icon_wh[0])
|
||||||
if not thumbnl:
|
if not thumbnl:
|
||||||
raise IconException("No known icons found.")
|
raise IconException("No known icons found.")
|
||||||
|
|
||||||
|
self.cache[full_path] = thumbnl
|
||||||
return thumbnl
|
return thumbnl
|
||||||
except IconException:
|
except IconException:
|
||||||
...
|
...
|
||||||
@@ -138,11 +142,14 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
|||||||
def _call_gtk_thread(event, result):
|
def _call_gtk_thread(event, result):
|
||||||
result.append( self.get_system_thumbnail(full_path, size) )
|
result.append( self.get_system_thumbnail(full_path, size) )
|
||||||
event.set()
|
event.set()
|
||||||
|
return False
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
event = threading.Event()
|
event = threading.Event()
|
||||||
GLib.idle_add(_call_gtk_thread, event, result)
|
GLib.idle_add(_call_gtk_thread, event, result)
|
||||||
event.wait()
|
event.wait()
|
||||||
|
|
||||||
|
event = None
|
||||||
return result[0]
|
return result[0]
|
||||||
|
|
||||||
|
|
||||||
@@ -151,11 +158,12 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin):
|
|||||||
gio_file = Gio.File.new_for_path(full_path)
|
gio_file = Gio.File.new_for_path(full_path)
|
||||||
info = gio_file.query_info('standard::icon' , 0, None)
|
info = gio_file.query_info('standard::icon' , 0, None)
|
||||||
icon = info.get_icon().get_names()[0]
|
icon = info.get_icon().get_names()[0]
|
||||||
data = settings_manager.get_icon_theme().lookup_icon(icon , size , 0)
|
data = settings_manager.get_icon_theme().lookup_icon(icon , size, 0)
|
||||||
|
|
||||||
if data:
|
if data:
|
||||||
icon_path = data.get_filename()
|
icon_path = data.get_filename()
|
||||||
return GdkPixbuf.Pixbuf.new_from_file(icon_path)
|
|
||||||
|
return GdkPixbuf.Pixbuf.new_from_file_at_size(icon_path, width = size, height = size)
|
||||||
|
|
||||||
raise IconException("No system icon found...")
|
raise IconException("No system icon found...")
|
||||||
except IconException:
|
except IconException:
|
||||||
@@ -14,4 +14,4 @@ class MeshsIconMixin:
|
|||||||
proc = subprocess.Popen([self.BLENDER_THUMBNLR, full_path, hash_img_path])
|
proc = subprocess.Popen([self.BLENDER_THUMBNLR, full_path, hash_img_path])
|
||||||
proc.wait()
|
proc.wait()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.debug(repr(e))
|
logger.debug(repr(e))
|
||||||
@@ -14,7 +14,7 @@ class VideoIconMixin:
|
|||||||
proc = subprocess.Popen([self.FFMPG_THUMBNLR, "-t", scrub_percent, "-s", "300", "-c", "jpg", "-i", full_path, "-o", hash_img_path])
|
proc = subprocess.Popen([self.FFMPG_THUMBNLR, "-t", scrub_percent, "-s", "300", "-c", "jpg", "-i", full_path, "-o", hash_img_path])
|
||||||
proc.wait()
|
proc.wait()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.debug(repr(e))
|
logger.info(e)
|
||||||
self.ffprobe_generate_video_thumbnail(full_path, hash_img_path)
|
self.ffprobe_generate_video_thumbnail(full_path, hash_img_path)
|
||||||
|
|
||||||
|
|
||||||
@@ -51,5 +51,4 @@ class VideoIconMixin:
|
|||||||
proc.wait()
|
proc.wait()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print("Video thumbnail generation issue in thread:")
|
print("Video thumbnail generation issue in thread:")
|
||||||
print( repr(e) )
|
logger.info(repr(e))
|
||||||
self.logger.debug(repr(e))
|
|
||||||
10
plugins/thumbnailer/manifest.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"name": "Thumbnailer",
|
||||||
|
"author": "ITDominator",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"support": "",
|
||||||
|
"pre_launch": true,
|
||||||
|
"requests": {
|
||||||
|
"pass_fm_events": true
|
||||||
|
}
|
||||||
|
}
|
||||||
75
plugins/thumbnailer/plugin.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# Python imports
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
from .icons.controller import IconController
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.name = "Thumbnailer" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
# self.path = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self.icon_controller = IconController()
|
||||||
|
self._event_system.subscribe("create-thumbnail", self.create_thumbnail)
|
||||||
|
self._event_system.subscribe("create-video-thumbnail", self.create_video_thumbnail)
|
||||||
|
self._event_system.subscribe("create-scaled-image", self.create_scaled_image)
|
||||||
|
self._event_system.subscribe("get-thumbnail-hash", self.get_thumbnail_hash)
|
||||||
|
self._event_system.subscribe("get-thumbnails-path", self.get_thumbnails_path)
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def create_thumbnail(self, dir, file) -> str:
|
||||||
|
return self.icon_controller.create_icon(dir, file)
|
||||||
|
|
||||||
|
def create_video_thumbnail(self, file, scrub_percent, replace):
|
||||||
|
return self.icon_controller.create_video_thumbnail(file, scrub_percent, replace)
|
||||||
|
|
||||||
|
def create_scaled_image(self, hash_img_pth):
|
||||||
|
return self.icon_controller.create_scaled_image(hash_img_pth)
|
||||||
|
|
||||||
|
def get_thumbnail_hash(self, file):
|
||||||
|
return self.icon_controller.generate_hash_and_path(file)
|
||||||
|
|
||||||
|
def get_thumbnails_path(self) -> str:
|
||||||
|
return self.icon_controller.ABS_THUMBS_PTH
|
||||||
|
|
||||||
|
def get_video_icons(self, dir) -> list:
|
||||||
|
data = []
|
||||||
|
|
||||||
|
def get_video_icons(self) -> list:
|
||||||
|
data = []
|
||||||
|
fvideos = self.icon_controller.fvideos
|
||||||
|
vids = [ file for file in os.path.list_dir(dir) if file.lower().endswith(fvideos) ]
|
||||||
|
|
||||||
|
for file in vids:
|
||||||
|
img_hash, hash_img_path = self.create_video_thumbnail(full_path = f"{dir}/{file}", returnHashInstead = True)
|
||||||
|
data.append([img_hash, hash_img_path])
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
def get_pixbuf_icon_str_combo(self, dir) -> list:
|
||||||
|
data = []
|
||||||
|
for file in os.path.list_dir(dir):
|
||||||
|
icon = self.icon_controller.create_icon(dir, file).get_pixbuf()
|
||||||
|
data.append([icon, file])
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
def get_gtk_icon_str_combo(self, dir) -> list:
|
||||||
|
data = []
|
||||||
|
for file in os.path.list_dir(dir):
|
||||||
|
icon = self.icon_controller.create_icon(dir, file)
|
||||||
|
data.append([icon, file[0]])
|
||||||
|
|
||||||
|
return data
|
||||||
101
plugins/thumbnailer/settings.json
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
{
|
||||||
|
"config":{
|
||||||
|
"thumbnailer_path":"ffmpegthumbnailer",
|
||||||
|
"blender_thumbnailer_path":"",
|
||||||
|
"container_icon_wh":[
|
||||||
|
128,
|
||||||
|
128
|
||||||
|
],
|
||||||
|
"video_icon_wh":[
|
||||||
|
128,
|
||||||
|
64
|
||||||
|
],
|
||||||
|
"sys_icon_wh":[
|
||||||
|
56,
|
||||||
|
56
|
||||||
|
],
|
||||||
|
"steam_cdn_url":"https://steamcdn-a.akamaihd.net/steam/apps/",
|
||||||
|
"remux_folder_max_disk_usage":"8589934592"
|
||||||
|
},
|
||||||
|
"filters":{
|
||||||
|
"meshs":[
|
||||||
|
".dae",
|
||||||
|
".fbx",
|
||||||
|
".gltf",
|
||||||
|
".obj",
|
||||||
|
".stl"
|
||||||
|
],
|
||||||
|
"code":[
|
||||||
|
".cpp",
|
||||||
|
".css",
|
||||||
|
".c",
|
||||||
|
".go",
|
||||||
|
".html",
|
||||||
|
".htm",
|
||||||
|
".java",
|
||||||
|
".js",
|
||||||
|
".json",
|
||||||
|
".lua",
|
||||||
|
".md",
|
||||||
|
".py",
|
||||||
|
".rs",
|
||||||
|
".toml",
|
||||||
|
".xml",
|
||||||
|
".pom"
|
||||||
|
],
|
||||||
|
"videos":[
|
||||||
|
".mkv",
|
||||||
|
".mp4",
|
||||||
|
".webm",
|
||||||
|
".avi",
|
||||||
|
".mov",
|
||||||
|
".m4v",
|
||||||
|
".mpg",
|
||||||
|
".mpeg",
|
||||||
|
".wmv",
|
||||||
|
".flv"
|
||||||
|
],
|
||||||
|
"office":[
|
||||||
|
".doc",
|
||||||
|
".docx",
|
||||||
|
".xls",
|
||||||
|
".xlsx",
|
||||||
|
".xlt",
|
||||||
|
".xltx",
|
||||||
|
".xlm",
|
||||||
|
".ppt",
|
||||||
|
".pptx",
|
||||||
|
".pps",
|
||||||
|
".ppsx",
|
||||||
|
".odt",
|
||||||
|
".rtf"
|
||||||
|
],
|
||||||
|
"images":[
|
||||||
|
".png",
|
||||||
|
".jpg",
|
||||||
|
".jpeg",
|
||||||
|
".gif",
|
||||||
|
".ico",
|
||||||
|
".tga",
|
||||||
|
".webp"
|
||||||
|
],
|
||||||
|
"text":[
|
||||||
|
".txt",
|
||||||
|
".text",
|
||||||
|
".sh",
|
||||||
|
".cfg",
|
||||||
|
".conf",
|
||||||
|
".log"
|
||||||
|
],
|
||||||
|
"music":[
|
||||||
|
".psf",
|
||||||
|
".mp3",
|
||||||
|
".ogg",
|
||||||
|
".flac",
|
||||||
|
".m4a"
|
||||||
|
],
|
||||||
|
"pdf":[
|
||||||
|
".pdf"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Translate",
|
"name": "Translate",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "plugin_control_list",
|
"ui_target": "plugin_control_list",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -184,8 +184,8 @@ class Plugin(PluginBase):
|
|||||||
response = requests.post(self.vqd_link, headers=self.vqd_headers, data=self.vqd_data, timeout=2)
|
response = requests.post(self.vqd_link, headers=self.vqd_headers, data=self.vqd_data, timeout=2)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
data = response.content
|
data = response.content
|
||||||
vqd_start_index = data.index(b"vqd='") + 5
|
vqd_start_index = data.index(b"vqd=\"") + 5
|
||||||
vqd_end_index = data.index(b"'", vqd_start_index)
|
vqd_end_index = data.index(b"\"", vqd_start_index)
|
||||||
self._vqd_attrib = data[vqd_start_index:vqd_end_index].decode("utf-8")
|
self._vqd_attrib = data[vqd_start_index:vqd_end_index].decode("utf-8")
|
||||||
|
|
||||||
print(f"Translation VQD: {self._vqd_attrib}")
|
print(f"Translation VQD: {self._vqd_attrib}")
|
||||||
|
|||||||
@@ -1,16 +1,14 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Trasher",
|
"name": "Trasher",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "context_menu",
|
"ui_target": "context_menu",
|
||||||
"pass_fm_events": "true",
|
"pass_fm_events": true,
|
||||||
"bind_keys": [
|
"bind_keys": [
|
||||||
"Trasher||delete_files:Delete",
|
"Trasher||delete_files:Delete",
|
||||||
"Trasher||trash_files:<Control>d"
|
"Trasher||trash_files:<Control>d"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -111,6 +111,8 @@ class Plugin(PluginBase):
|
|||||||
for uri in state.uris:
|
for uri in state.uris:
|
||||||
self.trashman.trash(uri, verbocity)
|
self.trashman.trash(uri, verbocity)
|
||||||
|
|
||||||
|
self.trashman.regenerate()
|
||||||
|
|
||||||
def restore_trash_files(self, widget = None, eve = None, verbocity = False):
|
def restore_trash_files(self, widget = None, eve = None, verbocity = False):
|
||||||
self._event_system.emit("get_current_state")
|
self._event_system.emit("get_current_state")
|
||||||
state = self._fm_state
|
state = self._fm_state
|
||||||
|
|||||||
@@ -43,4 +43,4 @@ class Trash(object):
|
|||||||
|
|
||||||
def restore(self, filename, verbose):
|
def restore(self, filename, verbose):
|
||||||
"""Restore a file from trash."""
|
"""Restore a file from trash."""
|
||||||
raise NotImplementedError(_('Backend didn’t \ implement this functionality'))
|
raise NotImplementedError(_('Backend didn’t implement this functionality'))
|
||||||
|
|||||||
@@ -127,7 +127,7 @@ DeletionDate={}
|
|||||||
f.write(infofile)
|
f.write(infofile)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
self.regenerate()
|
# self.regenerate()
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
sys.stderr.write(_('trashed \'{}\'\n').format(filename))
|
sys.stderr.write(_('trashed \'{}\'\n').format(filename))
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "VOD Thumbnailer",
|
"name": "VOD Thumbnailer",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "context_menu_plugins",
|
"ui_target": "context_menu_plugins",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,6 +26,8 @@ def threaded(fn):
|
|||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class VODThumbnailerException(Exception):
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
class Plugin(PluginBase):
|
class Plugin(PluginBase):
|
||||||
@@ -94,32 +96,39 @@ class Plugin(PluginBase):
|
|||||||
file = self._file_name.get_text()
|
file = self._file_name.get_text()
|
||||||
dir = self._file_location.get_text()
|
dir = self._file_location.get_text()
|
||||||
file_hash = self._file_hash.get_text()
|
file_hash = self._file_hash.get_text()
|
||||||
hash_img_pth = f"{self._fm_state.tab.ABS_THUMBS_PTH}/{file_hash}.jpg"
|
hash_img_pth = f"{self.ABS_THUMBS_PTH}/{file_hash}.jpg"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._fm_state.tab.create_video_thumbnail(f"{dir}/{file}", f"{scrub_percent}%", True)
|
self._event_system.emit_and_await("create-video-thumbnail", (f"{dir}/{file}", f"{scrub_percent}%", True,))
|
||||||
preview_pixbuf = GdkPixbuf.Pixbuf.new_from_file(hash_img_pth)
|
preview_pixbuf = GdkPixbuf.Pixbuf.new_from_file(hash_img_pth)
|
||||||
self._thumbnail_preview_img.set_from_pixbuf(preview_pixbuf)
|
self._thumbnail_preview_img.set_from_pixbuf(preview_pixbuf)
|
||||||
|
|
||||||
img_pixbuf = self._fm_state.tab.create_scaled_image(hash_img_pth)
|
img_pixbuf = self._event_system.emit_and_await("create-scaled-image", (hash_img_pth,))
|
||||||
tree_pth = self._fm_state.icon_grid.get_selected_items()[0]
|
tree_pth = self._fm_state.icon_grid.get_selected_items()[0]
|
||||||
itr = self._fm_state.store.get_iter(tree_pth)
|
itr = self._fm_state.store.get_iter(tree_pth)
|
||||||
pixbuff = self._fm_state.store.get(itr, 0)[0]
|
pixbuff = self._fm_state.store.get(itr, 0)[0]
|
||||||
self._fm_state.store.set(itr, 0, img_pixbuf)
|
self._fm_state.store.set(itr, 0, img_pixbuf)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(repr(e))
|
|
||||||
print("Couldn't regenerate thumbnail!")
|
print("Couldn't regenerate thumbnail!")
|
||||||
|
print(repr(e))
|
||||||
|
|
||||||
|
|
||||||
def _set_ui_data(self):
|
def _set_ui_data(self):
|
||||||
uri = self._fm_state.uris[0]
|
uri = self._fm_state.uris[0]
|
||||||
path = self._fm_state.tab.get_current_directory()
|
path = self._fm_state.tab.get_current_directory()
|
||||||
parts = uri.split("/")
|
parts = uri.split("/")
|
||||||
file_hash = self._fm_state.tab.fast_hash(uri)
|
path_exists, \
|
||||||
hash_img_pth = f"{self._fm_state.tab.ABS_THUMBS_PTH}/{file_hash}.jpg"
|
img_hash, \
|
||||||
|
hash_img_pth = self._event_system.emit_and_await("get-thumbnail-hash", (uri,))
|
||||||
|
|
||||||
|
if not path_exists:
|
||||||
|
raise VODThumbnailerException(f"Could not generate file_hash from: {uri}")
|
||||||
|
|
||||||
|
|
||||||
|
self.ABS_THUMBS_PTH = self._event_system.emit_and_await("get-thumbnails-path")
|
||||||
preview_pixbuf = GdkPixbuf.Pixbuf.new_from_file(hash_img_pth)
|
preview_pixbuf = GdkPixbuf.Pixbuf.new_from_file(hash_img_pth)
|
||||||
|
|
||||||
self._thumbnail_preview_img.set_from_pixbuf(preview_pixbuf)
|
self._thumbnail_preview_img.set_from_pixbuf(preview_pixbuf)
|
||||||
self._file_name.set_text(parts[ len(parts) - 1 ])
|
self._file_name.set_text(parts[ len(parts) - 1 ])
|
||||||
self._file_location.set_text(path)
|
self._file_location.set_text(path)
|
||||||
self._file_hash.set_text(file_hash)
|
self._file_hash.set_text(img_hash)
|
||||||
|
|||||||
@@ -8,12 +8,29 @@
|
|||||||
|
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
cd "$(dirname "")"
|
_STARGET="${1}"
|
||||||
echo "Working Dir: " $(pwd)
|
_SPATH="${HOME}/.config/solarfm/plugins/youtube_download"
|
||||||
LINK=`xclip -selection clipboard -o`
|
LINK=`xclip -selection clipboard -o`
|
||||||
|
|
||||||
python "${HOME}/.config/solarfm/plugins/youtube_download/yt_dlp/__main__.py" \
|
cd "${_SPATH}"
|
||||||
--write-sub --embed-sub --sub-langs en \
|
echo "Working Dir: " $(pwd)
|
||||||
-o "${1}/%(title)s.%(ext)s" "${LINK}"
|
|
||||||
|
rm "${_SPATH}/../../cookies.txt"
|
||||||
|
|
||||||
|
# Note: Export cookies to file
|
||||||
|
python "${_SPATH}/yt_dlp/__main__.py" \
|
||||||
|
--cookies-from-browser firefox --cookies "${_SPATH}/../../cookies.txt"
|
||||||
|
|
||||||
|
# Note: Use cookies from browser directly
|
||||||
|
# python "${_SPATH}/yt_dlp/__main__.py" \
|
||||||
|
# --cookies-from-browser firefox --write-sub --embed-sub --sub-langs en \
|
||||||
|
# -o "${_STARGET}/%(title)s.%(ext)s" "${LINK}"
|
||||||
|
|
||||||
|
# Note: Download video
|
||||||
|
python "${_SPATH}/yt_dlp/__main__.py" \
|
||||||
|
-f "bestvideo[height<=1080][ext=mp4][vcodec^=av]+bestaudio[ext=m4a]/best[ext=mp4]/best" \
|
||||||
|
--cookies "${_SPATH}/../../cookies.txt" --write-sub --embed-sub --sub-langs en \
|
||||||
|
-o "${_STARGET}/%(title)s.%(ext)s" "${LINK}"
|
||||||
|
|
||||||
}
|
}
|
||||||
main "$@";
|
main "$@";
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
{
|
{
|
||||||
"manifest": {
|
|
||||||
"name": "Youtube Download",
|
"name": "Youtube Download",
|
||||||
"author": "ITDominator",
|
"author": "ITDominator",
|
||||||
"version": "0.0.1",
|
"version": "0.0.1",
|
||||||
"support": "",
|
"support": "",
|
||||||
"requests": {
|
"requests": {
|
||||||
"ui_target": "plugin_control_list",
|
"ui_target": "plugin_control_list",
|
||||||
"pass_fm_events": "true"
|
"pass_fm_events": true
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,4 +47,4 @@ class Plugin(PluginBase):
|
|||||||
|
|
||||||
@threaded
|
@threaded
|
||||||
def _download(self, dir):
|
def _download(self, dir):
|
||||||
subprocess.Popen([f'{self.path}/download.sh', dir])
|
subprocess.Popen([f'{self.path}/download.sh', dir], start_new_session=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, close_fds=True)
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
try:
|
import sys
|
||||||
import contextvars # noqa: F401
|
|
||||||
except Exception:
|
|
||||||
raise Exception(
|
|
||||||
f'You are using an unsupported version of Python. Only Python versions 3.7 and above are supported by yt-dlp') # noqa: F541
|
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
if sys.version_info < (3, 10):
|
||||||
|
raise ImportError(
|
||||||
|
f'You are using an unsupported version of Python. Only Python versions 3.10 and above are supported by yt-dlp') # noqa: F541
|
||||||
|
|
||||||
|
__license__ = 'The Unlicense'
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import getpass
|
import getpass
|
||||||
@@ -12,15 +12,16 @@ import itertools
|
|||||||
import optparse
|
import optparse
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from .compat import compat_shlex_quote
|
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS, CookieLoadError
|
||||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
|
||||||
from .downloader.external import get_external_downloader
|
from .downloader.external import get_external_downloader
|
||||||
from .extractor import list_extractor_classes
|
from .extractor import list_extractor_classes
|
||||||
from .extractor.adobepass import MSO_INFO
|
from .extractor.adobepass import MSO_INFO
|
||||||
|
from .networking.impersonate import ImpersonateTarget
|
||||||
|
from .globals import IN_CLI, plugin_dirs
|
||||||
from .options import parseOpts
|
from .options import parseOpts
|
||||||
|
from .plugins import load_all_plugins as _load_all_plugins
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
FFmpegExtractAudioPP,
|
FFmpegExtractAudioPP,
|
||||||
FFmpegMergerPP,
|
FFmpegMergerPP,
|
||||||
@@ -43,12 +44,12 @@ from .utils import (
|
|||||||
GeoUtils,
|
GeoUtils,
|
||||||
PlaylistEntries,
|
PlaylistEntries,
|
||||||
SameFileError,
|
SameFileError,
|
||||||
decodeOption,
|
|
||||||
download_range_func,
|
download_range_func,
|
||||||
expand_path,
|
expand_path,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
match_filter_func,
|
match_filter_func,
|
||||||
parse_bytes,
|
parse_bytes,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
@@ -57,14 +58,19 @@ from .utils import (
|
|||||||
read_stdin,
|
read_stdin,
|
||||||
render_table,
|
render_table,
|
||||||
setproctitle,
|
setproctitle,
|
||||||
traverse_obj,
|
shell_quote,
|
||||||
variadic,
|
variadic,
|
||||||
write_string,
|
write_string,
|
||||||
)
|
|
||||||
from .utils.networking import std_headers
|
|
||||||
from .YoutubeDL import YoutubeDL
|
|
||||||
|
|
||||||
_IN_CLI = False
|
)
|
||||||
|
from .utils._utils import _UnsafeExtensionError
|
||||||
|
from .utils._jsruntime import (
|
||||||
|
BunJsRuntime as _BunJsRuntime,
|
||||||
|
DenoJsRuntime as _DenoJsRuntime,
|
||||||
|
NodeJsRuntime as _NodeJsRuntime,
|
||||||
|
QuickJsRuntime as _QuickJsRuntime,
|
||||||
|
)
|
||||||
|
from .YoutubeDL import YoutubeDL
|
||||||
|
|
||||||
|
|
||||||
def _exit(status=0, *args):
|
def _exit(status=0, *args):
|
||||||
@@ -74,14 +80,16 @@ def _exit(status=0, *args):
|
|||||||
|
|
||||||
|
|
||||||
def get_urls(urls, batchfile, verbose):
|
def get_urls(urls, batchfile, verbose):
|
||||||
# Batch file verification
|
"""
|
||||||
|
@param verbose -1: quiet, 0: normal, 1: verbose
|
||||||
|
"""
|
||||||
batch_urls = []
|
batch_urls = []
|
||||||
if batchfile is not None:
|
if batchfile is not None:
|
||||||
try:
|
try:
|
||||||
batch_urls = read_batch_urls(
|
batch_urls = read_batch_urls(
|
||||||
read_stdin('URLs') if batchfile == '-'
|
read_stdin(None if verbose == -1 else 'URLs') if batchfile == '-'
|
||||||
else open(expand_path(batchfile), encoding='utf-8', errors='ignore'))
|
else open(expand_path(batchfile), encoding='utf-8', errors='ignore'))
|
||||||
if verbose:
|
if verbose == 1:
|
||||||
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
|
||||||
except OSError:
|
except OSError:
|
||||||
_exit(f'ERROR: batch file {batchfile} could not be read')
|
_exit(f'ERROR: batch file {batchfile} could not be read')
|
||||||
@@ -112,9 +120,9 @@ def print_extractor_information(opts, urls):
|
|||||||
ie.description(markdown=False, search_examples=_SEARCHES)
|
ie.description(markdown=False, search_examples=_SEARCHES)
|
||||||
for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False)
|
for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False)
|
||||||
elif opts.ap_list_mso:
|
elif opts.ap_list_mso:
|
||||||
out = 'Supported TV Providers:\n%s\n' % render_table(
|
out = 'Supported TV Providers:\n{}\n'.format(render_table(
|
||||||
['mso', 'mso name'],
|
['mso', 'mso name'],
|
||||||
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()])
|
[[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]))
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
write_string(out, out=sys.stdout)
|
write_string(out, out=sys.stdout)
|
||||||
@@ -126,7 +134,7 @@ def set_compat_opts(opts):
|
|||||||
if name not in opts.compat_opts:
|
if name not in opts.compat_opts:
|
||||||
return False
|
return False
|
||||||
opts.compat_opts.discard(name)
|
opts.compat_opts.discard(name)
|
||||||
opts.compat_opts.update(['*%s' % name])
|
opts.compat_opts.update([f'*{name}'])
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
def set_default_compat(compat_name, opt_name, default=True, remove_compat=True):
|
||||||
@@ -153,6 +161,15 @@ def set_compat_opts(opts):
|
|||||||
opts.embed_infojson = False
|
opts.embed_infojson = False
|
||||||
if 'format-sort' in opts.compat_opts:
|
if 'format-sort' in opts.compat_opts:
|
||||||
opts.format_sort.extend(FormatSorter.ytdl_default)
|
opts.format_sort.extend(FormatSorter.ytdl_default)
|
||||||
|
elif 'prefer-vp9-sort' in opts.compat_opts:
|
||||||
|
FormatSorter.default = FormatSorter._prefer_vp9_sort
|
||||||
|
|
||||||
|
if 'mtime-by-default' in opts.compat_opts:
|
||||||
|
if opts.updatetime is None:
|
||||||
|
opts.updatetime = True
|
||||||
|
else:
|
||||||
|
_unused_compat_opt('mtime-by-default')
|
||||||
|
|
||||||
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
_video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False)
|
||||||
_audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False)
|
_audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False)
|
||||||
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
if _video_multistreams_set is False and _audio_multistreams_set is False:
|
||||||
@@ -219,7 +236,7 @@ def validate_options(opts):
|
|||||||
validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval')
|
validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval')
|
||||||
|
|
||||||
if opts.wait_for_video is not None:
|
if opts.wait_for_video is not None:
|
||||||
min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None])
|
min_wait, max_wait, *_ = map(parse_duration, [*opts.wait_for_video.split('-', 1), None])
|
||||||
validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video),
|
validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video),
|
||||||
'time range to wait for video', opts.wait_for_video)
|
'time range to wait for video', opts.wait_for_video)
|
||||||
validate_minmax(min_wait, max_wait, 'time range to wait for video')
|
validate_minmax(min_wait, max_wait, 'time range to wait for video')
|
||||||
@@ -230,6 +247,11 @@ def validate_options(opts):
|
|||||||
validate_regex('format sorting', f, FormatSorter.regex)
|
validate_regex('format sorting', f, FormatSorter.regex)
|
||||||
|
|
||||||
# Postprocessor formats
|
# Postprocessor formats
|
||||||
|
if opts.convertsubtitles == 'none':
|
||||||
|
opts.convertsubtitles = None
|
||||||
|
if opts.convertthumbnails == 'none':
|
||||||
|
opts.convertthumbnails = None
|
||||||
|
|
||||||
validate_regex('merge output format', opts.merge_output_format,
|
validate_regex('merge output format', opts.merge_output_format,
|
||||||
r'({0})(/({0}))*'.format('|'.join(map(re.escape, FFmpegMergerPP.SUPPORTED_EXTS))))
|
r'({0})(/({0}))*'.format('|'.join(map(re.escape, FFmpegMergerPP.SUPPORTED_EXTS))))
|
||||||
validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE)
|
validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE)
|
||||||
@@ -249,9 +271,11 @@ def validate_options(opts):
|
|||||||
elif value in ('inf', 'infinite'):
|
elif value in ('inf', 'infinite'):
|
||||||
return float('inf')
|
return float('inf')
|
||||||
try:
|
try:
|
||||||
return int(value)
|
int_value = int(value)
|
||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
validate(False, f'{name} retry count', value)
|
validate(False, f'{name} retry count', value)
|
||||||
|
validate_positive(f'{name} retry count', int_value)
|
||||||
|
return int_value
|
||||||
|
|
||||||
opts.retries = parse_retries('download', opts.retries)
|
opts.retries = parse_retries('download', opts.retries)
|
||||||
opts.fragment_retries = parse_retries('fragment', opts.fragment_retries)
|
opts.fragment_retries = parse_retries('fragment', opts.fragment_retries)
|
||||||
@@ -261,9 +285,9 @@ def validate_options(opts):
|
|||||||
# Retry sleep function
|
# Retry sleep function
|
||||||
def parse_sleep_func(expr):
|
def parse_sleep_func(expr):
|
||||||
NUMBER_RE = r'\d+(?:\.\d+)?'
|
NUMBER_RE = r'\d+(?:\.\d+)?'
|
||||||
op, start, limit, step, *_ = tuple(re.fullmatch(
|
op, start, limit, step, *_ = (*tuple(re.fullmatch(
|
||||||
rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?',
|
rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?',
|
||||||
expr.strip()).groups()) + (None, None)
|
expr.strip()).groups()), None, None)
|
||||||
|
|
||||||
if op == 'exp':
|
if op == 'exp':
|
||||||
return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf'))
|
return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf'))
|
||||||
@@ -281,18 +305,20 @@ def validate_options(opts):
|
|||||||
raise ValueError(f'invalid {key} retry sleep expression {expr!r}')
|
raise ValueError(f'invalid {key} retry sleep expression {expr!r}')
|
||||||
|
|
||||||
# Bytes
|
# Bytes
|
||||||
def validate_bytes(name, value):
|
def validate_bytes(name, value, strict_positive=False):
|
||||||
if value is None:
|
if value is None:
|
||||||
return None
|
return None
|
||||||
numeric_limit = parse_bytes(value)
|
numeric_limit = parse_bytes(value)
|
||||||
validate(numeric_limit is not None, 'rate limit', value)
|
validate(numeric_limit is not None, name, value)
|
||||||
|
if strict_positive:
|
||||||
|
validate_positive(name, numeric_limit, True)
|
||||||
return numeric_limit
|
return numeric_limit
|
||||||
|
|
||||||
opts.ratelimit = validate_bytes('rate limit', opts.ratelimit)
|
opts.ratelimit = validate_bytes('rate limit', opts.ratelimit, True)
|
||||||
opts.throttledratelimit = validate_bytes('throttled rate limit', opts.throttledratelimit)
|
opts.throttledratelimit = validate_bytes('throttled rate limit', opts.throttledratelimit)
|
||||||
opts.min_filesize = validate_bytes('min filesize', opts.min_filesize)
|
opts.min_filesize = validate_bytes('min filesize', opts.min_filesize)
|
||||||
opts.max_filesize = validate_bytes('max filesize', opts.max_filesize)
|
opts.max_filesize = validate_bytes('max filesize', opts.max_filesize)
|
||||||
opts.buffersize = validate_bytes('buffer size', opts.buffersize)
|
opts.buffersize = validate_bytes('buffer size', opts.buffersize, True)
|
||||||
opts.http_chunk_size = validate_bytes('http chunk size', opts.http_chunk_size)
|
opts.http_chunk_size = validate_bytes('http chunk size', opts.http_chunk_size)
|
||||||
|
|
||||||
# Output templates
|
# Output templates
|
||||||
@@ -387,16 +413,19 @@ def validate_options(opts):
|
|||||||
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}')
|
||||||
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
opts.cookiesfrombrowser = (browser_name, profile, keyring, container)
|
||||||
|
|
||||||
|
if opts.impersonate is not None:
|
||||||
|
opts.impersonate = ImpersonateTarget.from_str(opts.impersonate.lower())
|
||||||
|
|
||||||
# MetadataParser
|
# MetadataParser
|
||||||
def metadataparser_actions(f):
|
def metadataparser_actions(f):
|
||||||
if isinstance(f, str):
|
if isinstance(f, str):
|
||||||
cmd = '--parse-metadata %s' % compat_shlex_quote(f)
|
cmd = f'--parse-metadata {shell_quote(f)}'
|
||||||
try:
|
try:
|
||||||
actions = [MetadataFromFieldPP.to_action(f)]
|
actions = [MetadataFromFieldPP.to_action(f)]
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ValueError(f'{cmd} is invalid; {err}')
|
raise ValueError(f'{cmd} is invalid; {err}')
|
||||||
else:
|
else:
|
||||||
cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f))
|
cmd = f'--replace-in-metadata {shell_quote(f)}'
|
||||||
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(','))
|
||||||
|
|
||||||
for action in actions:
|
for action in actions:
|
||||||
@@ -407,13 +436,17 @@ def validate_options(opts):
|
|||||||
yield action
|
yield action
|
||||||
|
|
||||||
if opts.metafromtitle is not None:
|
if opts.metafromtitle is not None:
|
||||||
opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle)
|
opts.parse_metadata.setdefault('pre_process', []).append(f'title:{opts.metafromtitle}')
|
||||||
opts.parse_metadata = {
|
opts.parse_metadata = {
|
||||||
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
||||||
for k, v in opts.parse_metadata.items()
|
for k, v in opts.parse_metadata.items()
|
||||||
}
|
}
|
||||||
|
|
||||||
# Other options
|
# Other options
|
||||||
|
opts.plugin_dirs = opts.plugin_dirs
|
||||||
|
if opts.plugin_dirs is None:
|
||||||
|
opts.plugin_dirs = ['default']
|
||||||
|
|
||||||
if opts.playlist_items is not None:
|
if opts.playlist_items is not None:
|
||||||
try:
|
try:
|
||||||
tuple(PlaylistEntries.parse_playlist_items(opts.playlist_items))
|
tuple(PlaylistEntries.parse_playlist_items(opts.playlist_items))
|
||||||
@@ -460,7 +493,7 @@ def validate_options(opts):
|
|||||||
default_downloader = ed.get_basename()
|
default_downloader = ed.get_basename()
|
||||||
|
|
||||||
for policy in opts.color.values():
|
for policy in opts.color.values():
|
||||||
if policy not in ('always', 'auto', 'no_color', 'never'):
|
if policy not in ('always', 'auto', 'auto-tty', 'no_color', 'no_color-tty', 'never'):
|
||||||
raise ValueError(f'"{policy}" is not a valid color policy')
|
raise ValueError(f'"{policy}" is not a valid color policy')
|
||||||
|
|
||||||
warnings, deprecation_warnings = [], []
|
warnings, deprecation_warnings = [], []
|
||||||
@@ -472,6 +505,14 @@ def validate_options(opts):
|
|||||||
'To let yt-dlp download and merge the best available formats, simply do not pass any format selection',
|
'To let yt-dlp download and merge the best available formats, simply do not pass any format selection',
|
||||||
'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning')))
|
'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning')))
|
||||||
|
|
||||||
|
# Common mistake: -f mp4
|
||||||
|
if opts.format == 'mp4':
|
||||||
|
warnings.append('.\n '.join((
|
||||||
|
'"-f mp4" selects the best pre-merged mp4 format which is often not what\'s intended',
|
||||||
|
'Pre-merged mp4 formats are not available from all sites, or may only be available in lower quality',
|
||||||
|
'To prioritize the best h264 video and aac audio in an mp4 container, use "-t mp4" instead',
|
||||||
|
'If you know what you are doing and want a pre-merged mp4 format, use "-f b[ext=mp4]" instead to suppress this warning')))
|
||||||
|
|
||||||
# --(postprocessor/downloader)-args without name
|
# --(postprocessor/downloader)-args without name
|
||||||
def report_args_compat(name, value, key1, key2=None, where=None):
|
def report_args_compat(name, value, key1, key2=None, where=None):
|
||||||
if key1 in value and key2 not in value:
|
if key1 in value and key2 not in value:
|
||||||
@@ -487,7 +528,6 @@ def validate_options(opts):
|
|||||||
|
|
||||||
if report_args_compat('post-processor', opts.postprocessor_args, 'default-compat', 'default'):
|
if report_args_compat('post-processor', opts.postprocessor_args, 'default-compat', 'default'):
|
||||||
opts.postprocessor_args['default'] = opts.postprocessor_args.pop('default-compat')
|
opts.postprocessor_args['default'] = opts.postprocessor_args.pop('default-compat')
|
||||||
opts.postprocessor_args.setdefault('sponskrub', [])
|
|
||||||
|
|
||||||
def report_conflict(arg1, opt1, arg2='--allow-unplayable-formats', opt2='allow_unplayable_formats',
|
def report_conflict(arg1, opt1, arg2='--allow-unplayable-formats', opt2='allow_unplayable_formats',
|
||||||
val1=NO_DEFAULT, val2=NO_DEFAULT, default=False):
|
val1=NO_DEFAULT, val2=NO_DEFAULT, default=False):
|
||||||
@@ -512,11 +552,6 @@ def validate_options(opts):
|
|||||||
'"--exec before_dl:"', 'exec_cmd', val2=opts.exec_cmd.get('before_dl'))
|
'"--exec before_dl:"', 'exec_cmd', val2=opts.exec_cmd.get('before_dl'))
|
||||||
report_conflict('--id', 'useid', '--output', 'outtmpl', val2=opts.outtmpl.get('default'))
|
report_conflict('--id', 'useid', '--output', 'outtmpl', val2=opts.outtmpl.get('default'))
|
||||||
report_conflict('--remux-video', 'remuxvideo', '--recode-video', 'recodevideo')
|
report_conflict('--remux-video', 'remuxvideo', '--recode-video', 'recodevideo')
|
||||||
report_conflict('--sponskrub', 'sponskrub', '--remove-chapters', 'remove_chapters')
|
|
||||||
report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-mark', 'sponsorblock_mark')
|
|
||||||
report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-remove', 'sponsorblock_remove')
|
|
||||||
report_conflict('--sponskrub-cut', 'sponskrub_cut', '--split-chapter', 'split_chapters',
|
|
||||||
val1=opts.sponskrub and opts.sponskrub_cut)
|
|
||||||
|
|
||||||
# Conflicts with --allow-unplayable-formats
|
# Conflicts with --allow-unplayable-formats
|
||||||
report_conflict('--embed-metadata', 'addmetadata')
|
report_conflict('--embed-metadata', 'addmetadata')
|
||||||
@@ -529,23 +564,15 @@ def validate_options(opts):
|
|||||||
report_conflict('--recode-video', 'recodevideo')
|
report_conflict('--recode-video', 'recodevideo')
|
||||||
report_conflict('--remove-chapters', 'remove_chapters', default=[])
|
report_conflict('--remove-chapters', 'remove_chapters', default=[])
|
||||||
report_conflict('--remux-video', 'remuxvideo')
|
report_conflict('--remux-video', 'remuxvideo')
|
||||||
report_conflict('--sponskrub', 'sponskrub')
|
|
||||||
report_conflict('--sponsorblock-remove', 'sponsorblock_remove', default=set())
|
report_conflict('--sponsorblock-remove', 'sponsorblock_remove', default=set())
|
||||||
report_conflict('--xattrs', 'xattrs')
|
report_conflict('--xattrs', 'xattrs')
|
||||||
|
|
||||||
# Fully deprecated options
|
if hasattr(opts, '_deprecated_options'):
|
||||||
def report_deprecation(val, old, new=None):
|
|
||||||
if not val:
|
|
||||||
return
|
|
||||||
deprecation_warnings.append(
|
deprecation_warnings.append(
|
||||||
f'{old} is deprecated and may be removed in a future version. Use {new} instead' if new
|
f'The following options have been deprecated: {", ".join(opts._deprecated_options)}\n'
|
||||||
else f'{old} is deprecated and may not work as expected')
|
'Please remove them from your command/configuration to avoid future errors.\n'
|
||||||
|
'See https://github.com/yt-dlp/yt-dlp/issues/14198 for more details')
|
||||||
report_deprecation(opts.sponskrub, '--sponskrub', '--sponsorblock-mark or --sponsorblock-remove')
|
del opts._deprecated_options
|
||||||
report_deprecation(not opts.prefer_ffmpeg, '--prefer-avconv', 'ffmpeg')
|
|
||||||
# report_deprecation(opts.include_ads, '--include-ads') # We may re-implement this in future
|
|
||||||
# report_deprecation(opts.call_home, '--call-home') # We may re-implement this in future
|
|
||||||
# report_deprecation(opts.writeannotations, '--write-annotations') # It's just that no website has it
|
|
||||||
|
|
||||||
# Dependent options
|
# Dependent options
|
||||||
opts.date = DateRange.day(opts.date) if opts.date else DateRange(opts.dateafter, opts.datebefore)
|
opts.date = DateRange.day(opts.date) if opts.date else DateRange(opts.dateafter, opts.datebefore)
|
||||||
@@ -586,6 +613,13 @@ def validate_options(opts):
|
|||||||
if opts.ap_username is not None and opts.ap_password is None:
|
if opts.ap_username is not None and opts.ap_password is None:
|
||||||
opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ')
|
opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ')
|
||||||
|
|
||||||
|
# compat option changes global state destructively; only allow from cli
|
||||||
|
if 'allow-unsafe-ext' in opts.compat_opts:
|
||||||
|
warnings.append(
|
||||||
|
'Using allow-unsafe-ext opens you up to potential attacks. '
|
||||||
|
'Use with great care!')
|
||||||
|
_UnsafeExtensionError.sanitize_extension = lambda x, prepend=False: x
|
||||||
|
|
||||||
return warnings, deprecation_warnings
|
return warnings, deprecation_warnings
|
||||||
|
|
||||||
|
|
||||||
@@ -596,7 +630,7 @@ def get_postprocessors(opts):
|
|||||||
yield {
|
yield {
|
||||||
'key': 'MetadataParser',
|
'key': 'MetadataParser',
|
||||||
'actions': actions,
|
'actions': actions,
|
||||||
'when': when
|
'when': when,
|
||||||
}
|
}
|
||||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||||
if sponsorblock_query:
|
if sponsorblock_query:
|
||||||
@@ -604,19 +638,19 @@ def get_postprocessors(opts):
|
|||||||
'key': 'SponsorBlock',
|
'key': 'SponsorBlock',
|
||||||
'categories': sponsorblock_query,
|
'categories': sponsorblock_query,
|
||||||
'api': opts.sponsorblock_api,
|
'api': opts.sponsorblock_api,
|
||||||
'when': 'after_filter'
|
'when': 'after_filter',
|
||||||
}
|
}
|
||||||
if opts.convertsubtitles:
|
if opts.convertsubtitles:
|
||||||
yield {
|
yield {
|
||||||
'key': 'FFmpegSubtitlesConvertor',
|
'key': 'FFmpegSubtitlesConvertor',
|
||||||
'format': opts.convertsubtitles,
|
'format': opts.convertsubtitles,
|
||||||
'when': 'before_dl'
|
'when': 'before_dl',
|
||||||
}
|
}
|
||||||
if opts.convertthumbnails:
|
if opts.convertthumbnails:
|
||||||
yield {
|
yield {
|
||||||
'key': 'FFmpegThumbnailsConvertor',
|
'key': 'FFmpegThumbnailsConvertor',
|
||||||
'format': opts.convertthumbnails,
|
'format': opts.convertthumbnails,
|
||||||
'when': 'before_dl'
|
'when': 'before_dl',
|
||||||
}
|
}
|
||||||
if opts.extractaudio:
|
if opts.extractaudio:
|
||||||
yield {
|
yield {
|
||||||
@@ -641,7 +675,7 @@ def get_postprocessors(opts):
|
|||||||
yield {
|
yield {
|
||||||
'key': 'FFmpegEmbedSubtitle',
|
'key': 'FFmpegEmbedSubtitle',
|
||||||
# already_have_subtitle = True prevents the file from being deleted after embedding
|
# already_have_subtitle = True prevents the file from being deleted after embedding
|
||||||
'already_have_subtitle': opts.writesubtitles and keep_subs
|
'already_have_subtitle': opts.writesubtitles and keep_subs,
|
||||||
}
|
}
|
||||||
if not opts.writeautomaticsub and keep_subs:
|
if not opts.writeautomaticsub and keep_subs:
|
||||||
opts.writesubtitles = True
|
opts.writesubtitles = True
|
||||||
@@ -654,7 +688,7 @@ def get_postprocessors(opts):
|
|||||||
'remove_sponsor_segments': opts.sponsorblock_remove,
|
'remove_sponsor_segments': opts.sponsorblock_remove,
|
||||||
'remove_ranges': opts.remove_ranges,
|
'remove_ranges': opts.remove_ranges,
|
||||||
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
'sponsorblock_chapter_title': opts.sponsorblock_chapter_title,
|
||||||
'force_keyframes': opts.force_keyframes_at_cuts
|
'force_keyframes': opts.force_keyframes_at_cuts,
|
||||||
}
|
}
|
||||||
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
# FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and
|
||||||
# FFmpegExtractAudioPP as containers before conversion may not support
|
# FFmpegExtractAudioPP as containers before conversion may not support
|
||||||
@@ -669,26 +703,11 @@ def get_postprocessors(opts):
|
|||||||
'add_metadata': opts.addmetadata,
|
'add_metadata': opts.addmetadata,
|
||||||
'add_infojson': opts.embed_infojson,
|
'add_infojson': opts.embed_infojson,
|
||||||
}
|
}
|
||||||
# Deprecated
|
|
||||||
# This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment
|
|
||||||
# but must be below EmbedSubtitle and FFmpegMetadata
|
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29
|
|
||||||
# If opts.sponskrub is None, sponskrub is used, but it silently fails if the executable can't be found
|
|
||||||
if opts.sponskrub is not False:
|
|
||||||
yield {
|
|
||||||
'key': 'SponSkrub',
|
|
||||||
'path': opts.sponskrub_path,
|
|
||||||
'args': opts.sponskrub_args,
|
|
||||||
'cut': opts.sponskrub_cut,
|
|
||||||
'force': opts.sponskrub_force,
|
|
||||||
'ignoreerror': opts.sponskrub is None,
|
|
||||||
'_from_cli': True,
|
|
||||||
}
|
|
||||||
if opts.embedthumbnail:
|
if opts.embedthumbnail:
|
||||||
yield {
|
yield {
|
||||||
'key': 'EmbedThumbnail',
|
'key': 'EmbedThumbnail',
|
||||||
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
# already_have_thumbnail = True prevents the file from being deleted after embedding
|
||||||
'already_have_thumbnail': opts.writethumbnail
|
'already_have_thumbnail': opts.writethumbnail,
|
||||||
}
|
}
|
||||||
if not opts.writethumbnail:
|
if not opts.writethumbnail:
|
||||||
opts.writethumbnail = True
|
opts.writethumbnail = True
|
||||||
@@ -722,7 +741,7 @@ ParsedOptions = collections.namedtuple('ParsedOptions', ('parser', 'options', 'u
|
|||||||
def parse_options(argv=None):
|
def parse_options(argv=None):
|
||||||
"""@returns ParsedOptions(parser, opts, urls, ydl_opts)"""
|
"""@returns ParsedOptions(parser, opts, urls, ydl_opts)"""
|
||||||
parser, opts, urls = parseOpts(argv)
|
parser, opts, urls = parseOpts(argv)
|
||||||
urls = get_urls(urls, opts.batchfile, opts.verbose)
|
urls = get_urls(urls, opts.batchfile, -1 if opts.quiet and not opts.verbose else opts.verbose)
|
||||||
|
|
||||||
set_compat_opts(opts)
|
set_compat_opts(opts)
|
||||||
try:
|
try:
|
||||||
@@ -735,7 +754,7 @@ def parse_options(argv=None):
|
|||||||
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
||||||
any_getting = any(getattr(opts, k) for k in (
|
any_getting = any(getattr(opts, k) for k in (
|
||||||
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
||||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl'
|
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl',
|
||||||
))
|
))
|
||||||
if opts.quiet is None:
|
if opts.quiet is None:
|
||||||
opts.quiet = any_getting or opts.print_json or bool(opts.forceprint)
|
opts.quiet = any_getting or opts.print_json or bool(opts.forceprint)
|
||||||
@@ -761,6 +780,10 @@ def parse_options(argv=None):
|
|||||||
else opts.audioformat if (opts.extractaudio and opts.audioformat in FFmpegExtractAudioPP.SUPPORTED_EXTS)
|
else opts.audioformat if (opts.extractaudio and opts.audioformat in FFmpegExtractAudioPP.SUPPORTED_EXTS)
|
||||||
else None)
|
else None)
|
||||||
|
|
||||||
|
js_runtimes = {
|
||||||
|
runtime.lower(): {'path': path} for runtime, path in (
|
||||||
|
[*arg.split(':', 1), None][:2] for arg in opts.js_runtimes)}
|
||||||
|
|
||||||
return ParsedOptions(parser, opts, urls, {
|
return ParsedOptions(parser, opts, urls, {
|
||||||
'usenetrc': opts.usenetrc,
|
'usenetrc': opts.usenetrc,
|
||||||
'netrc_location': opts.netrc_location,
|
'netrc_location': opts.netrc_location,
|
||||||
@@ -830,6 +853,7 @@ def parse_options(argv=None):
|
|||||||
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
|
'noprogress': opts.quiet if opts.noprogress is None else opts.noprogress,
|
||||||
'progress_with_newline': opts.progress_with_newline,
|
'progress_with_newline': opts.progress_with_newline,
|
||||||
'progress_template': opts.progress_template,
|
'progress_template': opts.progress_template,
|
||||||
|
'progress_delta': opts.progress_delta,
|
||||||
'playliststart': opts.playliststart,
|
'playliststart': opts.playliststart,
|
||||||
'playlistend': opts.playlistend,
|
'playlistend': opts.playlistend,
|
||||||
'playlistreverse': opts.playlist_reverse,
|
'playlistreverse': opts.playlist_reverse,
|
||||||
@@ -841,7 +865,6 @@ def parse_options(argv=None):
|
|||||||
'nopart': opts.nopart,
|
'nopart': opts.nopart,
|
||||||
'updatetime': opts.updatetime,
|
'updatetime': opts.updatetime,
|
||||||
'writedescription': opts.writedescription,
|
'writedescription': opts.writedescription,
|
||||||
'writeannotations': opts.writeannotations,
|
|
||||||
'writeinfojson': opts.writeinfojson,
|
'writeinfojson': opts.writeinfojson,
|
||||||
'allow_playlist_files': opts.allow_playlist_files,
|
'allow_playlist_files': opts.allow_playlist_files,
|
||||||
'clean_infojson': opts.clean_infojson,
|
'clean_infojson': opts.clean_infojson,
|
||||||
@@ -858,8 +881,8 @@ def parse_options(argv=None):
|
|||||||
'listsubtitles': opts.listsubtitles,
|
'listsubtitles': opts.listsubtitles,
|
||||||
'subtitlesformat': opts.subtitlesformat,
|
'subtitlesformat': opts.subtitlesformat,
|
||||||
'subtitleslangs': opts.subtitleslangs,
|
'subtitleslangs': opts.subtitleslangs,
|
||||||
'matchtitle': decodeOption(opts.matchtitle),
|
'matchtitle': opts.matchtitle,
|
||||||
'rejecttitle': decodeOption(opts.rejecttitle),
|
'rejecttitle': opts.rejecttitle,
|
||||||
'max_downloads': opts.max_downloads,
|
'max_downloads': opts.max_downloads,
|
||||||
'prefer_free_formats': opts.prefer_free_formats,
|
'prefer_free_formats': opts.prefer_free_formats,
|
||||||
'trim_file_name': opts.trim_file_name,
|
'trim_file_name': opts.trim_file_name,
|
||||||
@@ -875,7 +898,6 @@ def parse_options(argv=None):
|
|||||||
'max_views': opts.max_views,
|
'max_views': opts.max_views,
|
||||||
'daterange': opts.date,
|
'daterange': opts.date,
|
||||||
'cachedir': opts.cachedir,
|
'cachedir': opts.cachedir,
|
||||||
'youtube_print_sig_code': opts.youtube_print_sig_code,
|
|
||||||
'age_limit': opts.age_limit,
|
'age_limit': opts.age_limit,
|
||||||
'download_archive': opts.download_archive,
|
'download_archive': opts.download_archive,
|
||||||
'break_on_existing': opts.break_on_existing,
|
'break_on_existing': opts.break_on_existing,
|
||||||
@@ -893,13 +915,9 @@ def parse_options(argv=None):
|
|||||||
'socket_timeout': opts.socket_timeout,
|
'socket_timeout': opts.socket_timeout,
|
||||||
'bidi_workaround': opts.bidi_workaround,
|
'bidi_workaround': opts.bidi_workaround,
|
||||||
'debug_printtraffic': opts.debug_printtraffic,
|
'debug_printtraffic': opts.debug_printtraffic,
|
||||||
'prefer_ffmpeg': opts.prefer_ffmpeg,
|
|
||||||
'include_ads': opts.include_ads,
|
|
||||||
'default_search': opts.default_search,
|
'default_search': opts.default_search,
|
||||||
'dynamic_mpd': opts.dynamic_mpd,
|
'dynamic_mpd': opts.dynamic_mpd,
|
||||||
'extractor_args': opts.extractor_args,
|
'extractor_args': opts.extractor_args,
|
||||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
|
||||||
'youtube_include_hls_manifest': opts.youtube_include_hls_manifest,
|
|
||||||
'encoding': opts.encoding,
|
'encoding': opts.encoding,
|
||||||
'extract_flat': opts.extract_flat,
|
'extract_flat': opts.extract_flat,
|
||||||
'live_from_start': opts.live_from_start,
|
'live_from_start': opts.live_from_start,
|
||||||
@@ -910,7 +928,7 @@ def parse_options(argv=None):
|
|||||||
'postprocessors': postprocessors,
|
'postprocessors': postprocessors,
|
||||||
'fixup': opts.fixup,
|
'fixup': opts.fixup,
|
||||||
'source_address': opts.source_address,
|
'source_address': opts.source_address,
|
||||||
'call_home': opts.call_home,
|
'impersonate': opts.impersonate,
|
||||||
'sleep_interval_requests': opts.sleep_interval_requests,
|
'sleep_interval_requests': opts.sleep_interval_requests,
|
||||||
'sleep_interval': opts.sleep_interval,
|
'sleep_interval': opts.sleep_interval,
|
||||||
'max_sleep_interval': opts.max_sleep_interval,
|
'max_sleep_interval': opts.max_sleep_interval,
|
||||||
@@ -920,7 +938,6 @@ def parse_options(argv=None):
|
|||||||
'force_keyframes_at_cuts': opts.force_keyframes_at_cuts,
|
'force_keyframes_at_cuts': opts.force_keyframes_at_cuts,
|
||||||
'list_thumbnails': opts.list_thumbnails,
|
'list_thumbnails': opts.list_thumbnails,
|
||||||
'playlist_items': opts.playlist_items,
|
'playlist_items': opts.playlist_items,
|
||||||
'xattr_set_filesize': opts.xattr_set_filesize,
|
|
||||||
'match_filter': opts.match_filter,
|
'match_filter': opts.match_filter,
|
||||||
'color': opts.color,
|
'color': opts.color,
|
||||||
'ffmpeg_location': opts.ffmpeg_location,
|
'ffmpeg_location': opts.ffmpeg_location,
|
||||||
@@ -929,11 +946,14 @@ def parse_options(argv=None):
|
|||||||
'hls_split_discontinuity': opts.hls_split_discontinuity,
|
'hls_split_discontinuity': opts.hls_split_discontinuity,
|
||||||
'external_downloader_args': opts.external_downloader_args,
|
'external_downloader_args': opts.external_downloader_args,
|
||||||
'postprocessor_args': opts.postprocessor_args,
|
'postprocessor_args': opts.postprocessor_args,
|
||||||
'cn_verification_proxy': opts.cn_verification_proxy,
|
|
||||||
'geo_verification_proxy': opts.geo_verification_proxy,
|
'geo_verification_proxy': opts.geo_verification_proxy,
|
||||||
'geo_bypass': opts.geo_bypass,
|
'geo_bypass': opts.geo_bypass,
|
||||||
'geo_bypass_country': opts.geo_bypass_country,
|
'geo_bypass_country': opts.geo_bypass_country,
|
||||||
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
'geo_bypass_ip_block': opts.geo_bypass_ip_block,
|
||||||
|
'useid': opts.useid or None,
|
||||||
|
'js_runtimes': js_runtimes,
|
||||||
|
'remote_components': opts.remote_components,
|
||||||
|
'warn_when_outdated': opts.update_self is None,
|
||||||
'_warnings': warnings,
|
'_warnings': warnings,
|
||||||
'_deprecation_warnings': deprecation_warnings,
|
'_deprecation_warnings': deprecation_warnings,
|
||||||
'compat_opts': opts.compat_opts,
|
'compat_opts': opts.compat_opts,
|
||||||
@@ -945,12 +965,6 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
parser, opts, all_urls, ydl_opts = parse_options(argv)
|
parser, opts, all_urls, ydl_opts = parse_options(argv)
|
||||||
|
|
||||||
# Dump user agent
|
|
||||||
if opts.dump_user_agent:
|
|
||||||
ua = traverse_obj(opts.headers, 'User-Agent', casesense=False, default=std_headers['User-Agent'])
|
|
||||||
write_string(f'{ua}\n', out=sys.stdout)
|
|
||||||
return
|
|
||||||
|
|
||||||
if print_extractor_information(opts, all_urls):
|
if print_extractor_information(opts, all_urls):
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -959,6 +973,11 @@ def _real_main(argv=None):
|
|||||||
if opts.ffmpeg_location:
|
if opts.ffmpeg_location:
|
||||||
FFmpegPostProcessor._ffmpeg_location.set(opts.ffmpeg_location)
|
FFmpegPostProcessor._ffmpeg_location.set(opts.ffmpeg_location)
|
||||||
|
|
||||||
|
# load all plugins into the global lookup
|
||||||
|
plugin_dirs.value = opts.plugin_dirs
|
||||||
|
if plugin_dirs.value:
|
||||||
|
_load_all_plugins()
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
pre_process = opts.update_self or opts.rm_cachedir
|
pre_process = opts.update_self or opts.rm_cachedir
|
||||||
actual_use = all_urls or opts.load_info_filename
|
actual_use = all_urls or opts.load_info_filename
|
||||||
@@ -968,22 +987,75 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
updater = Updater(ydl, opts.update_self)
|
updater = Updater(ydl, opts.update_self)
|
||||||
if opts.update_self and updater.update() and actual_use:
|
if opts.update_self and updater.update() and actual_use and updater.cmd:
|
||||||
if updater.cmd:
|
|
||||||
return updater.restart()
|
return updater.restart()
|
||||||
# This code is reachable only for zip variant in py < 3.10
|
|
||||||
# It makes sense to exit here, but the old behavior is to continue
|
|
||||||
ydl.report_warning('Restart yt-dlp to use the updated version')
|
|
||||||
# return 100, 'ERROR: The program must exit for the update to complete'
|
|
||||||
except Exception:
|
except Exception:
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
ydl._download_retcode = 100
|
ydl._download_retcode = 100
|
||||||
|
|
||||||
|
if opts.list_impersonate_targets:
|
||||||
|
|
||||||
|
known_targets = [
|
||||||
|
# List of simplified targets we know are supported,
|
||||||
|
# to help users know what dependencies may be required.
|
||||||
|
(ImpersonateTarget('chrome'), 'curl_cffi'),
|
||||||
|
(ImpersonateTarget('safari'), 'curl_cffi'),
|
||||||
|
(ImpersonateTarget('firefox'), 'curl_cffi>=0.10'),
|
||||||
|
(ImpersonateTarget('edge'), 'curl_cffi'),
|
||||||
|
(ImpersonateTarget('tor'), 'curl_cffi>=0.11'),
|
||||||
|
]
|
||||||
|
|
||||||
|
available_targets = ydl._get_available_impersonate_targets()
|
||||||
|
|
||||||
|
def make_row(target, handler):
|
||||||
|
return [
|
||||||
|
join_nonempty(target.client.title(), target.version, delim='-') or '-',
|
||||||
|
join_nonempty((target.os or '').title(), target.os_version, delim='-') or '-',
|
||||||
|
handler,
|
||||||
|
]
|
||||||
|
|
||||||
|
rows = [make_row(target, handler) for target, handler in available_targets]
|
||||||
|
|
||||||
|
for known_target, known_handler in known_targets:
|
||||||
|
if not any(
|
||||||
|
known_target in target and known_handler.startswith(handler)
|
||||||
|
for target, handler in available_targets
|
||||||
|
):
|
||||||
|
rows.insert(0, [
|
||||||
|
ydl._format_out(text, ydl.Styles.SUPPRESS)
|
||||||
|
for text in make_row(known_target, f'{known_handler} (unavailable)')
|
||||||
|
])
|
||||||
|
|
||||||
|
ydl.to_screen('[info] Available impersonate targets')
|
||||||
|
ydl.to_stdout(render_table(['Client', 'OS', 'Source'], rows, extra_gap=2, delim='-'))
|
||||||
|
return
|
||||||
|
|
||||||
if not actual_use:
|
if not actual_use:
|
||||||
if pre_process:
|
if pre_process:
|
||||||
return ydl._download_retcode
|
return ydl._download_retcode
|
||||||
|
|
||||||
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
|
args = sys.argv[1:] if argv is None else argv
|
||||||
|
ydl.warn_if_short_id(args)
|
||||||
|
|
||||||
|
# Show a useful error message and wait for keypress if not launched from shell on Windows
|
||||||
|
if not args and os.name == 'nt' and getattr(sys, 'frozen', False):
|
||||||
|
import ctypes.wintypes
|
||||||
|
import msvcrt
|
||||||
|
|
||||||
|
kernel32 = ctypes.WinDLL('Kernel32')
|
||||||
|
|
||||||
|
buffer = (1 * ctypes.wintypes.DWORD)()
|
||||||
|
attached_processes = kernel32.GetConsoleProcessList(buffer, 1)
|
||||||
|
# If we only have a single process attached, then the executable was double clicked
|
||||||
|
# When using `pyinstaller` with `--onefile`, two processes get attached
|
||||||
|
is_onefile = hasattr(sys, '_MEIPASS') and os.path.basename(sys._MEIPASS).startswith('_MEI')
|
||||||
|
if attached_processes == 1 or (is_onefile and attached_processes == 2):
|
||||||
|
print(parser._generate_error_message(
|
||||||
|
'Do not double-click the executable, instead call it from a command line.\n'
|
||||||
|
'Please read the README for further information on how to use yt-dlp: '
|
||||||
|
'https://github.com/yt-dlp/yt-dlp#readme'))
|
||||||
|
msvcrt.getch()
|
||||||
|
_exit(2)
|
||||||
parser.error(
|
parser.error(
|
||||||
'You must provide at least one URL.\n'
|
'You must provide at least one URL.\n'
|
||||||
'Type yt-dlp --help to see a list of all options.')
|
'Type yt-dlp --help to see a list of all options.')
|
||||||
@@ -1002,11 +1074,10 @@ def _real_main(argv=None):
|
|||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
global _IN_CLI
|
IN_CLI.value = True
|
||||||
_IN_CLI = True
|
|
||||||
try:
|
try:
|
||||||
_exit(*variadic(_real_main(argv)))
|
_exit(*variadic(_real_main(argv)))
|
||||||
except DownloadError:
|
except (CookieLoadError, DownloadError):
|
||||||
_exit(1)
|
_exit(1)
|
||||||
except SameFileError as e:
|
except SameFileError as e:
|
||||||
_exit(f'ERROR: {e}')
|
_exit(f'ERROR: {e}')
|
||||||
@@ -1023,10 +1094,20 @@ def main(argv=None):
|
|||||||
|
|
||||||
from .extractor import gen_extractors, list_extractors
|
from .extractor import gen_extractors, list_extractors
|
||||||
|
|
||||||
|
# Register JS runtimes and remote components
|
||||||
|
from .globals import supported_js_runtimes, supported_remote_components
|
||||||
|
supported_js_runtimes.value['deno'] = _DenoJsRuntime
|
||||||
|
supported_js_runtimes.value['node'] = _NodeJsRuntime
|
||||||
|
supported_js_runtimes.value['bun'] = _BunJsRuntime
|
||||||
|
supported_js_runtimes.value['quickjs'] = _QuickJsRuntime
|
||||||
|
|
||||||
|
supported_remote_components.value.append('ejs:github')
|
||||||
|
supported_remote_components.value.append('ejs:npm')
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'main',
|
|
||||||
'YoutubeDL',
|
'YoutubeDL',
|
||||||
'parse_options',
|
|
||||||
'gen_extractors',
|
'gen_extractors',
|
||||||
'list_extractors',
|
'list_extractors',
|
||||||
|
'main',
|
||||||
|
'parse_options',
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Execute with
|
# Execute with
|
||||||
# $ python -m yt_dlp
|
# $ python3 -m yt_dlp
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from PyInstaller.utils.hooks import collect_submodules
|
from PyInstaller.utils.hooks import collect_submodules, collect_data_files
|
||||||
|
|
||||||
|
|
||||||
def pycryptodome_module():
|
def pycryptodome_module():
|
||||||
@@ -10,7 +10,7 @@ def pycryptodome_module():
|
|||||||
try:
|
try:
|
||||||
import Crypto # noqa: F401
|
import Crypto # noqa: F401
|
||||||
print('WARNING: Using Crypto since Cryptodome is not available. '
|
print('WARNING: Using Crypto since Cryptodome is not available. '
|
||||||
'Install with: pip install pycryptodomex', file=sys.stderr)
|
'Install with: python3 -m pip install pycryptodomex', file=sys.stderr)
|
||||||
return 'Crypto'
|
return 'Crypto'
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
@@ -21,12 +21,17 @@ def get_hidden_imports():
|
|||||||
yield from ('yt_dlp.compat._legacy', 'yt_dlp.compat._deprecated')
|
yield from ('yt_dlp.compat._legacy', 'yt_dlp.compat._deprecated')
|
||||||
yield from ('yt_dlp.utils._legacy', 'yt_dlp.utils._deprecated')
|
yield from ('yt_dlp.utils._legacy', 'yt_dlp.utils._deprecated')
|
||||||
yield pycryptodome_module()
|
yield pycryptodome_module()
|
||||||
yield from collect_submodules('websockets')
|
# Only `websockets` is required, others are collected just in case
|
||||||
|
for module in ('websockets', 'requests', 'urllib3'):
|
||||||
|
yield from collect_submodules(module)
|
||||||
# These are auto-detected, but explicitly add them just in case
|
# These are auto-detected, but explicitly add them just in case
|
||||||
yield from ('mutagen', 'brotli', 'certifi')
|
yield from ('mutagen', 'brotli', 'certifi', 'secretstorage', 'curl_cffi')
|
||||||
|
|
||||||
|
|
||||||
hiddenimports = list(get_hidden_imports())
|
hiddenimports = list(get_hidden_imports())
|
||||||
print(f'Adding imports: {hiddenimports}')
|
print(f'Adding imports: {hiddenimports}')
|
||||||
|
|
||||||
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts']
|
excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle']
|
||||||
|
|
||||||
|
datas = collect_data_files('curl_cffi', includes=['cacert.pem'])
|
||||||
|
datas += collect_data_files('yt_dlp_ejs', includes=['**/*.js'])
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ from math import ceil
|
|||||||
|
|
||||||
from .compat import compat_ord
|
from .compat import compat_ord
|
||||||
from .dependencies import Cryptodome
|
from .dependencies import Cryptodome
|
||||||
from .utils import bytes_to_intlist, intlist_to_bytes
|
|
||||||
|
|
||||||
if Cryptodome.AES:
|
if Cryptodome.AES:
|
||||||
def aes_cbc_decrypt_bytes(data, key, iv):
|
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||||
@@ -17,15 +16,15 @@ if Cryptodome.AES:
|
|||||||
else:
|
else:
|
||||||
def aes_cbc_decrypt_bytes(data, key, iv):
|
def aes_cbc_decrypt_bytes(data, key, iv):
|
||||||
""" Decrypt bytes with AES-CBC using native implementation since pycryptodome is unavailable """
|
""" Decrypt bytes with AES-CBC using native implementation since pycryptodome is unavailable """
|
||||||
return intlist_to_bytes(aes_cbc_decrypt(*map(bytes_to_intlist, (data, key, iv))))
|
return bytes(aes_cbc_decrypt(*map(list, (data, key, iv))))
|
||||||
|
|
||||||
def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce):
|
||||||
""" Decrypt bytes with AES-GCM using native implementation since pycryptodome is unavailable """
|
""" Decrypt bytes with AES-GCM using native implementation since pycryptodome is unavailable """
|
||||||
return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce))))
|
return bytes(aes_gcm_decrypt_and_verify(*map(list, (data, key, tag, nonce))))
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_encrypt_bytes(data, key, iv, **kwargs):
|
def aes_cbc_encrypt_bytes(data, key, iv, **kwargs):
|
||||||
return intlist_to_bytes(aes_cbc_encrypt(*map(bytes_to_intlist, (data, key, iv)), **kwargs))
|
return bytes(aes_cbc_encrypt(*map(list, (data, key, iv)), **kwargs))
|
||||||
|
|
||||||
|
|
||||||
BLOCK_SIZE_BYTES = 16
|
BLOCK_SIZE_BYTES = 16
|
||||||
@@ -68,7 +67,7 @@ def pad_block(block, padding_mode):
|
|||||||
raise NotImplementedError(f'Padding mode {padding_mode} is not implemented')
|
raise NotImplementedError(f'Padding mode {padding_mode} is not implemented')
|
||||||
|
|
||||||
if padding_mode == 'iso7816' and padding_size:
|
if padding_mode == 'iso7816' and padding_size:
|
||||||
block = block + [0x80] # NB: += mutates list
|
block = [*block, 0x80] # NB: += mutates list
|
||||||
padding_size -= 1
|
padding_size -= 1
|
||||||
|
|
||||||
return block + [PADDING_BYTE[padding_mode]] * padding_size
|
return block + [PADDING_BYTE[padding_mode]] * padding_size
|
||||||
@@ -84,7 +83,7 @@ def aes_ecb_encrypt(data, key, iv=None):
|
|||||||
@returns {int[]} encrypted data
|
@returns {int[]} encrypted data
|
||||||
"""
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
encrypted_data = []
|
encrypted_data = []
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
@@ -104,15 +103,13 @@ def aes_ecb_decrypt(data, key, iv=None):
|
|||||||
@returns {int[]} decrypted data
|
@returns {int[]} decrypted data
|
||||||
"""
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
encrypted_data = []
|
encrypted_data = []
|
||||||
for i in range(block_count):
|
for i in range(block_count):
|
||||||
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES]
|
||||||
encrypted_data += aes_decrypt(block, expanded_key)
|
encrypted_data += aes_decrypt(block, expanded_key)
|
||||||
encrypted_data = encrypted_data[:len(data)]
|
return encrypted_data[:len(data)]
|
||||||
|
|
||||||
return encrypted_data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_ctr_decrypt(data, key, iv):
|
def aes_ctr_decrypt(data, key, iv):
|
||||||
@@ -137,7 +134,7 @@ def aes_ctr_encrypt(data, key, iv):
|
|||||||
@returns {int[]} encrypted data
|
@returns {int[]} encrypted data
|
||||||
"""
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||||
counter = iter_vector(iv)
|
counter = iter_vector(iv)
|
||||||
|
|
||||||
encrypted_data = []
|
encrypted_data = []
|
||||||
@@ -148,9 +145,7 @@ def aes_ctr_encrypt(data, key, iv):
|
|||||||
|
|
||||||
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
cipher_counter_block = aes_encrypt(counter_block, expanded_key)
|
||||||
encrypted_data += xor(block, cipher_counter_block)
|
encrypted_data += xor(block, cipher_counter_block)
|
||||||
encrypted_data = encrypted_data[:len(data)]
|
return encrypted_data[:len(data)]
|
||||||
|
|
||||||
return encrypted_data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_decrypt(data, key, iv):
|
def aes_cbc_decrypt(data, key, iv):
|
||||||
@@ -163,7 +158,7 @@ def aes_cbc_decrypt(data, key, iv):
|
|||||||
@returns {int[]} decrypted data
|
@returns {int[]} decrypted data
|
||||||
"""
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
decrypted_data = []
|
decrypted_data = []
|
||||||
previous_cipher_block = iv
|
previous_cipher_block = iv
|
||||||
@@ -174,9 +169,7 @@ def aes_cbc_decrypt(data, key, iv):
|
|||||||
decrypted_block = aes_decrypt(block, expanded_key)
|
decrypted_block = aes_decrypt(block, expanded_key)
|
||||||
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
decrypted_data += xor(decrypted_block, previous_cipher_block)
|
||||||
previous_cipher_block = block
|
previous_cipher_block = block
|
||||||
decrypted_data = decrypted_data[:len(data)]
|
return decrypted_data[:len(data)]
|
||||||
|
|
||||||
return decrypted_data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
||||||
@@ -190,7 +183,7 @@ def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
|
|||||||
@returns {int[]} encrypted data
|
@returns {int[]} encrypted data
|
||||||
"""
|
"""
|
||||||
expanded_key = key_expansion(key)
|
expanded_key = key_expansion(key)
|
||||||
block_count = int(ceil(float(len(data)) / BLOCK_SIZE_BYTES))
|
block_count = ceil(len(data) / BLOCK_SIZE_BYTES)
|
||||||
|
|
||||||
encrypted_data = []
|
encrypted_data = []
|
||||||
previous_cipher_block = iv
|
previous_cipher_block = iv
|
||||||
@@ -224,10 +217,10 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
|||||||
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
hash_subkey = aes_encrypt([0] * BLOCK_SIZE_BYTES, key_expansion(key))
|
||||||
|
|
||||||
if len(nonce) == 12:
|
if len(nonce) == 12:
|
||||||
j0 = nonce + [0, 0, 0, 1]
|
j0 = [*nonce, 0, 0, 0, 1]
|
||||||
else:
|
else:
|
||||||
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8
|
||||||
ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big'))
|
ghash_in = nonce + [0] * fill + list((8 * len(nonce)).to_bytes(8, 'big'))
|
||||||
j0 = ghash(hash_subkey, ghash_in)
|
j0 = ghash(hash_subkey, ghash_in)
|
||||||
|
|
||||||
# TODO: add nonce support to aes_ctr_decrypt
|
# TODO: add nonce support to aes_ctr_decrypt
|
||||||
@@ -236,17 +229,17 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce):
|
|||||||
iv_ctr = inc(j0)
|
iv_ctr = inc(j0)
|
||||||
|
|
||||||
decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr)))
|
decrypted_data = aes_ctr_decrypt(data, key, iv_ctr + [0] * (BLOCK_SIZE_BYTES - len(iv_ctr)))
|
||||||
pad_len = len(data) // 16 * 16
|
pad_len = (BLOCK_SIZE_BYTES - (len(data) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES
|
||||||
s_tag = ghash(
|
s_tag = ghash(
|
||||||
hash_subkey,
|
hash_subkey,
|
||||||
data
|
data
|
||||||
+ [0] * (BLOCK_SIZE_BYTES - len(data) + pad_len) # pad
|
+ [0] * pad_len # pad
|
||||||
+ bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data
|
+ list((0 * 8).to_bytes(8, 'big') # length of associated data
|
||||||
+ ((len(data) * 8).to_bytes(8, 'big'))) # length of data
|
+ ((len(data) * 8).to_bytes(8, 'big'))), # length of data
|
||||||
)
|
)
|
||||||
|
|
||||||
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
if tag != aes_ctr_encrypt(s_tag, key, j0):
|
||||||
raise ValueError("Mismatching authentication tag")
|
raise ValueError('Mismatching authentication tag')
|
||||||
|
|
||||||
return decrypted_data
|
return decrypted_data
|
||||||
|
|
||||||
@@ -288,9 +281,7 @@ def aes_decrypt(data, expanded_key):
|
|||||||
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
data = list(iter_mix_columns(data, MIX_COLUMN_MATRIX_INV))
|
||||||
data = shift_rows_inv(data)
|
data = shift_rows_inv(data)
|
||||||
data = sub_bytes_inv(data)
|
data = sub_bytes_inv(data)
|
||||||
data = xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
return xor(data, expanded_key[:BLOCK_SIZE_BYTES])
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def aes_decrypt_text(data, password, key_size_bytes):
|
def aes_decrypt_text(data, password, key_size_bytes):
|
||||||
@@ -308,8 +299,8 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
|||||||
"""
|
"""
|
||||||
NONCE_LENGTH_BYTES = 8
|
NONCE_LENGTH_BYTES = 8
|
||||||
|
|
||||||
data = bytes_to_intlist(base64.b64decode(data))
|
data = list(base64.b64decode(data))
|
||||||
password = bytes_to_intlist(password.encode())
|
password = list(password.encode())
|
||||||
|
|
||||||
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))
|
||||||
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES)
|
||||||
@@ -318,9 +309,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
|||||||
cipher = data[NONCE_LENGTH_BYTES:]
|
cipher = data[NONCE_LENGTH_BYTES:]
|
||||||
|
|
||||||
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES))
|
||||||
plaintext = intlist_to_bytes(decrypted_data)
|
return bytes(decrypted_data)
|
||||||
|
|
||||||
return plaintext
|
|
||||||
|
|
||||||
|
|
||||||
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36)
|
||||||
@@ -428,9 +417,7 @@ def key_expansion(data):
|
|||||||
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
for _ in range(3 if key_size_bytes == 32 else 2 if key_size_bytes == 24 else 0):
|
||||||
temp = data[-4:]
|
temp = data[-4:]
|
||||||
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
data += xor(temp, data[-key_size_bytes: 4 - key_size_bytes])
|
||||||
data = data[:expanded_key_size_bytes]
|
return data[:expanded_key_size_bytes]
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
def iter_vector(iv):
|
def iter_vector(iv):
|
||||||
@@ -448,7 +435,7 @@ def sub_bytes_inv(data):
|
|||||||
|
|
||||||
|
|
||||||
def rotate(data):
|
def rotate(data):
|
||||||
return data[1:] + [data[0]]
|
return [*data[1:], data[0]]
|
||||||
|
|
||||||
|
|
||||||
def key_schedule_core(data, rcon_iteration):
|
def key_schedule_core(data, rcon_iteration):
|
||||||
@@ -460,7 +447,7 @@ def key_schedule_core(data, rcon_iteration):
|
|||||||
|
|
||||||
|
|
||||||
def xor(data1, data2):
|
def xor(data1, data2):
|
||||||
return [x ^ y for x, y in zip(data1, data2)]
|
return [x ^ y for x, y in zip(data1, data2, strict=False)]
|
||||||
|
|
||||||
|
|
||||||
def iter_mix_columns(data, matrix):
|
def iter_mix_columns(data, matrix):
|
||||||
@@ -511,7 +498,7 @@ def block_product(block_x, block_y):
|
|||||||
# NIST SP 800-38D, Algorithm 1
|
# NIST SP 800-38D, Algorithm 1
|
||||||
|
|
||||||
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
if len(block_x) != BLOCK_SIZE_BYTES or len(block_y) != BLOCK_SIZE_BYTES:
|
||||||
raise ValueError("Length of blocks need to be %d bytes" % BLOCK_SIZE_BYTES)
|
raise ValueError(f'Length of blocks need to be {BLOCK_SIZE_BYTES} bytes')
|
||||||
|
|
||||||
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
block_r = [0xE1] + [0] * (BLOCK_SIZE_BYTES - 1)
|
||||||
block_v = block_y[:]
|
block_v = block_y[:]
|
||||||
@@ -534,7 +521,7 @@ def ghash(subkey, data):
|
|||||||
# NIST SP 800-38D, Algorithm 2
|
# NIST SP 800-38D, Algorithm 2
|
||||||
|
|
||||||
if len(data) % BLOCK_SIZE_BYTES:
|
if len(data) % BLOCK_SIZE_BYTES:
|
||||||
raise ValueError("Length of data should be %d bytes" % BLOCK_SIZE_BYTES)
|
raise ValueError(f'Length of data should be {BLOCK_SIZE_BYTES} bytes')
|
||||||
|
|
||||||
last_y = [0] * BLOCK_SIZE_BYTES
|
last_y = [0] * BLOCK_SIZE_BYTES
|
||||||
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
for i in range(0, len(data), BLOCK_SIZE_BYTES):
|
||||||
@@ -547,19 +534,17 @@ def ghash(subkey, data):
|
|||||||
__all__ = [
|
__all__ = [
|
||||||
'aes_cbc_decrypt',
|
'aes_cbc_decrypt',
|
||||||
'aes_cbc_decrypt_bytes',
|
'aes_cbc_decrypt_bytes',
|
||||||
'aes_ctr_decrypt',
|
|
||||||
'aes_decrypt_text',
|
|
||||||
'aes_decrypt',
|
|
||||||
'aes_ecb_decrypt',
|
|
||||||
'aes_gcm_decrypt_and_verify',
|
|
||||||
'aes_gcm_decrypt_and_verify_bytes',
|
|
||||||
|
|
||||||
'aes_cbc_encrypt',
|
'aes_cbc_encrypt',
|
||||||
'aes_cbc_encrypt_bytes',
|
'aes_cbc_encrypt_bytes',
|
||||||
|
'aes_ctr_decrypt',
|
||||||
'aes_ctr_encrypt',
|
'aes_ctr_encrypt',
|
||||||
|
'aes_decrypt',
|
||||||
|
'aes_decrypt_text',
|
||||||
|
'aes_ecb_decrypt',
|
||||||
'aes_ecb_encrypt',
|
'aes_ecb_encrypt',
|
||||||
'aes_encrypt',
|
'aes_encrypt',
|
||||||
|
'aes_gcm_decrypt_and_verify',
|
||||||
|
'aes_gcm_decrypt_and_verify_bytes',
|
||||||
'key_expansion',
|
'key_expansion',
|
||||||
'pad_block',
|
'pad_block',
|
||||||
'pkcs7_padding',
|
'pkcs7_padding',
|
||||||
|
|||||||
@@ -81,10 +81,10 @@ class Cache:
|
|||||||
|
|
||||||
cachedir = self._get_root_dir()
|
cachedir = self._get_root_dir()
|
||||||
if not any((term in cachedir) for term in ('cache', 'tmp')):
|
if not any((term in cachedir) for term in ('cache', 'tmp')):
|
||||||
raise Exception('Not removing directory %s - this does not look like a cache dir' % cachedir)
|
raise Exception(f'Not removing directory {cachedir} - this does not look like a cache dir')
|
||||||
|
|
||||||
self._ydl.to_screen(
|
self._ydl.to_screen(
|
||||||
'Removing cache dir %s .' % cachedir, skip_eol=True)
|
f'Removing cache dir {cachedir} .', skip_eol=True)
|
||||||
if os.path.exists(cachedir):
|
if os.path.exists(cachedir):
|
||||||
self._ydl.to_screen('.', skip_eol=True)
|
self._ydl.to_screen('.', skip_eol=True)
|
||||||
shutil.rmtree(cachedir)
|
shutil.rmtree(cachedir)
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
import warnings
|
|
||||||
|
|
||||||
warnings.warn(DeprecationWarning(f'{__name__} is deprecated'))
|
|
||||||
|
|
||||||
casefold = str.casefold
|
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
|
import datetime as dt
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
from .compat_utils import passthrough_module
|
from .compat_utils import passthrough_module
|
||||||
@@ -24,36 +24,21 @@ def compat_etree_fromstring(text):
|
|||||||
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
|
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
|
||||||
|
|
||||||
|
|
||||||
compat_os_name = os._name if os.name == 'java' else os.name
|
|
||||||
|
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
|
||||||
def compat_shlex_quote(s):
|
|
||||||
import re
|
|
||||||
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
|
|
||||||
else:
|
|
||||||
from shlex import quote as compat_shlex_quote # noqa: F401
|
|
||||||
|
|
||||||
|
|
||||||
def compat_ord(c):
|
def compat_ord(c):
|
||||||
return c if isinstance(c, int) else ord(c)
|
return c if isinstance(c, int) else ord(c)
|
||||||
|
|
||||||
|
|
||||||
if compat_os_name == 'nt' and sys.version_info < (3, 8):
|
def compat_datetime_from_timestamp(timestamp):
|
||||||
# os.path.realpath on Windows does not follow symbolic links
|
# Calling dt.datetime.fromtimestamp with negative timestamps throws error in Windows
|
||||||
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
|
# Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/81708,
|
||||||
def compat_realpath(path):
|
# https://github.com/yt-dlp/yt-dlp/issues/6706#issuecomment-1496842642
|
||||||
while os.path.islink(path):
|
return (dt.datetime.fromtimestamp(0, dt.timezone.utc) + dt.timedelta(seconds=timestamp))
|
||||||
path = os.path.abspath(os.readlink(path))
|
|
||||||
return os.path.realpath(path)
|
|
||||||
else:
|
|
||||||
compat_realpath = os.path.realpath
|
|
||||||
|
|
||||||
|
|
||||||
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
# See https://github.com/yt-dlp/yt-dlp/issues/792
|
||||||
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
# https://docs.python.org/3/library/os.path.html#os.path.expanduser
|
||||||
if compat_os_name in ('nt', 'ce'):
|
if os.name in ('nt', 'ce'):
|
||||||
def compat_expanduser(path):
|
def compat_expanduser(path):
|
||||||
HOME = os.environ.get('HOME')
|
HOME = os.environ.get('HOME')
|
||||||
if not HOME:
|
if not HOME:
|
||||||
|
|||||||
@@ -8,16 +8,14 @@ passthrough_module(__name__, '.._legacy', callback=lambda attr: warnings.warn(
|
|||||||
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=6))
|
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=6))
|
||||||
del passthrough_module
|
del passthrough_module
|
||||||
|
|
||||||
import base64
|
import functools # noqa: F401
|
||||||
import urllib.error
|
import os
|
||||||
import urllib.parse
|
|
||||||
|
|
||||||
compat_str = str
|
|
||||||
|
|
||||||
compat_b64decode = base64.b64decode
|
compat_os_name = os.name
|
||||||
|
compat_realpath = os.path.realpath
|
||||||
|
|
||||||
compat_urlparse = urllib.parse
|
|
||||||
compat_parse_qs = urllib.parse.parse_qs
|
def compat_shlex_quote(s):
|
||||||
compat_urllib_parse_unquote = urllib.parse.unquote
|
from ..utils import shell_quote
|
||||||
compat_urllib_parse_urlencode = urllib.parse.urlencode
|
return shell_quote(s)
|
||||||
compat_urllib_parse_urlparse = urllib.parse.urlparse
|
|
||||||
|
|||||||
@@ -30,13 +30,14 @@ from asyncio import run as compat_asyncio_run # noqa: F401
|
|||||||
from re import Pattern as compat_Pattern # noqa: F401
|
from re import Pattern as compat_Pattern # noqa: F401
|
||||||
from re import match as compat_Match # noqa: F401
|
from re import match as compat_Match # noqa: F401
|
||||||
|
|
||||||
from . import compat_expanduser, compat_HTMLParseError, compat_realpath
|
from . import compat_expanduser, compat_HTMLParseError
|
||||||
from .compat_utils import passthrough_module
|
from .compat_utils import passthrough_module
|
||||||
from ..dependencies import brotli as compat_brotli # noqa: F401
|
from ..dependencies import brotli as compat_brotli # noqa: F401
|
||||||
from ..dependencies import websockets as compat_websockets # noqa: F401
|
from ..dependencies import websockets as compat_websockets # noqa: F401
|
||||||
from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401
|
from ..dependencies.Cryptodome import AES as compat_pycrypto_AES # noqa: F401
|
||||||
|
from ..networking.exceptions import HTTPError as compat_HTTPError
|
||||||
|
|
||||||
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))
|
passthrough_module(__name__, '...utils', ('windows_enable_vt_mode',))
|
||||||
|
|
||||||
|
|
||||||
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
|
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
|
||||||
@@ -70,7 +71,6 @@ compat_html_parser_HTMLParseError = compat_HTMLParseError
|
|||||||
compat_HTMLParser = compat_html_parser_HTMLParser = html.parser.HTMLParser
|
compat_HTMLParser = compat_html_parser_HTMLParser = html.parser.HTMLParser
|
||||||
compat_http_client = http.client
|
compat_http_client = http.client
|
||||||
compat_http_server = http.server
|
compat_http_server = http.server
|
||||||
compat_HTTPError = urllib.error.HTTPError
|
|
||||||
compat_input = input
|
compat_input = input
|
||||||
compat_integer_types = (int, )
|
compat_integer_types = (int, )
|
||||||
compat_itertools_count = itertools.count
|
compat_itertools_count = itertools.count
|
||||||
@@ -78,7 +78,7 @@ compat_kwargs = lambda kwargs: kwargs
|
|||||||
compat_map = map
|
compat_map = map
|
||||||
compat_numeric_types = (int, float, complex)
|
compat_numeric_types = (int, float, complex)
|
||||||
compat_os_path_expanduser = compat_expanduser
|
compat_os_path_expanduser = compat_expanduser
|
||||||
compat_os_path_realpath = compat_realpath
|
compat_os_path_realpath = os.path.realpath
|
||||||
compat_print = print
|
compat_print = print
|
||||||
compat_shlex_split = shlex.split
|
compat_shlex_split = shlex.split
|
||||||
compat_socket_create_connection = socket.create_connection
|
compat_socket_create_connection = socket.create_connection
|
||||||
@@ -88,7 +88,7 @@ compat_struct_unpack = struct.unpack
|
|||||||
compat_subprocess_get_DEVNULL = lambda: subprocess.DEVNULL
|
compat_subprocess_get_DEVNULL = lambda: subprocess.DEVNULL
|
||||||
compat_tokenize_tokenize = tokenize.tokenize
|
compat_tokenize_tokenize = tokenize.tokenize
|
||||||
compat_urllib_error = urllib.error
|
compat_urllib_error = urllib.error
|
||||||
compat_urllib_HTTPError = urllib.error.HTTPError
|
compat_urllib_HTTPError = compat_HTTPError
|
||||||
compat_urllib_parse = urllib.parse
|
compat_urllib_parse = urllib.parse
|
||||||
compat_urllib_parse_parse_qs = urllib.parse.parse_qs
|
compat_urllib_parse_parse_qs = urllib.parse.parse_qs
|
||||||
compat_urllib_parse_quote = urllib.parse.quote
|
compat_urllib_parse_quote = urllib.parse.quote
|
||||||
@@ -104,5 +104,12 @@ compat_xml_parse_error = compat_xml_etree_ElementTree_ParseError = etree.ParseEr
|
|||||||
compat_xpath = lambda xpath: xpath
|
compat_xpath = lambda xpath: xpath
|
||||||
compat_zip = zip
|
compat_zip = zip
|
||||||
workaround_optparse_bug9161 = lambda: None
|
workaround_optparse_bug9161 = lambda: None
|
||||||
|
compat_str = str
|
||||||
|
compat_b64decode = base64.b64decode
|
||||||
|
compat_urlparse = urllib.parse
|
||||||
|
compat_parse_qs = urllib.parse.parse_qs
|
||||||
|
compat_urllib_parse_unquote = urllib.parse.unquote
|
||||||
|
compat_urllib_parse_urlencode = urllib.parse.urlencode
|
||||||
|
compat_urllib_parse_urlparse = urllib.parse.urlparse
|
||||||
|
|
||||||
legacy = []
|
legacy = []
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ def get_package_info(module):
|
|||||||
name=getattr(module, '_yt_dlp__identifier', module.__name__),
|
name=getattr(module, '_yt_dlp__identifier', module.__name__),
|
||||||
version=str(next(filter(None, (
|
version=str(next(filter(None, (
|
||||||
getattr(module, attr, None)
|
getattr(module, attr, None)
|
||||||
for attr in ('__version__', 'version_string', 'version')
|
for attr in ('_yt_dlp__version', '__version__', 'version_string', 'version')
|
||||||
)), None)))
|
)), None)))
|
||||||
|
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ def passthrough_module(parent, child, allowed_attributes=(..., ), *, callback=la
|
|||||||
callback(attr)
|
callback(attr)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@functools.lru_cache(maxsize=None)
|
@functools.cache
|
||||||
def from_child(attr):
|
def from_child(attr):
|
||||||
nonlocal child
|
nonlocal child
|
||||||
if attr not in allowed_attributes:
|
if attr not in allowed_attributes:
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
# flake8: noqa: F405
|
|
||||||
from functools import * # noqa: F403
|
|
||||||
|
|
||||||
from .compat_utils import passthrough_module
|
|
||||||
|
|
||||||
passthrough_module(__name__, 'functools')
|
|
||||||
del passthrough_module
|
|
||||||
|
|
||||||
try:
|
|
||||||
cache # >= 3.9
|
|
||||||
except NameError:
|
|
||||||
cache = lru_cache(maxsize=None)
|
|
||||||
|
|
||||||
try:
|
|
||||||
cached_property # >= 3.8
|
|
||||||
except NameError:
|
|
||||||
class cached_property:
|
|
||||||
def __init__(self, func):
|
|
||||||
update_wrapper(self, func)
|
|
||||||
self.func = func
|
|
||||||
|
|
||||||
def __get__(self, instance, _):
|
|
||||||
if instance is None:
|
|
||||||
return self
|
|
||||||
setattr(instance, self.func.__name__, self.func(instance))
|
|
||||||
return getattr(instance, self.func.__name__)
|
|
||||||
@@ -1,16 +1,22 @@
|
|||||||
tests = {
|
|
||||||
'webp': lambda h: h[0:4] == b'RIFF' and h[8:] == b'WEBP',
|
|
||||||
'png': lambda h: h[:8] == b'\211PNG\r\n\032\n',
|
|
||||||
'jpeg': lambda h: h[6:10] in (b'JFIF', b'Exif'),
|
|
||||||
'gif': lambda h: h[:6] in (b'GIF87a', b'GIF89a'),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def what(file=None, h=None):
|
def what(file=None, h=None):
|
||||||
"""Detect format of image (Currently supports jpeg, png, webp, gif only)
|
"""Detect format of image (Currently supports jpeg, png, webp, gif only)
|
||||||
Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py
|
Ref: https://github.com/python/cpython/blob/3.11/Lib/imghdr.py
|
||||||
|
Ref: https://www.w3.org/Graphics/JPEG/itu-t81.pdf
|
||||||
"""
|
"""
|
||||||
if h is None:
|
if h is None:
|
||||||
with open(file, 'rb') as f:
|
with open(file, 'rb') as f:
|
||||||
h = f.read(12)
|
h = f.read(12)
|
||||||
return next((type_ for type_, test in tests.items() if test(h)), None)
|
|
||||||
|
if h.startswith(b'RIFF') and h.startswith(b'WEBP', 8):
|
||||||
|
return 'webp'
|
||||||
|
|
||||||
|
if h.startswith(b'\x89PNG'):
|
||||||
|
return 'png'
|
||||||
|
|
||||||
|
if h.startswith(b'\xFF\xD8\xFF'):
|
||||||
|
return 'jpeg'
|
||||||
|
|
||||||
|
if h.startswith(b'GIF'):
|
||||||
|
return 'gif'
|
||||||
|
|
||||||
|
return None
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
# flake8: noqa: F405
|
|
||||||
from types import * # noqa: F403
|
|
||||||
|
|
||||||
from .compat_utils import passthrough_module
|
|
||||||
|
|
||||||
passthrough_module(__name__, 'types')
|
|
||||||
del passthrough_module
|
|
||||||
|
|
||||||
try:
|
|
||||||
# NB: pypy has builtin NoneType, so checking NameError won't work
|
|
||||||
from types import NoneType # >= 3.10
|
|
||||||
except ImportError:
|
|
||||||
NoneType = type(None)
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# flake8: noqa: F405
|
# flake8: noqa: F405
|
||||||
from urllib import * # noqa: F403
|
from urllib import * # noqa: F403
|
||||||
|
|
||||||
del request
|
del request # noqa: F821
|
||||||
from . import request # noqa: F401
|
from . import request # noqa: F401
|
||||||
|
|
||||||
from ..compat_utils import passthrough_module
|
from ..compat_utils import passthrough_module
|
||||||
|
|||||||
@@ -7,13 +7,13 @@ passthrough_module(__name__, 'urllib.request')
|
|||||||
del passthrough_module
|
del passthrough_module
|
||||||
|
|
||||||
|
|
||||||
from .. import compat_os_name
|
import os
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
if os.name == 'nt':
|
||||||
# On older python versions, proxies are extracted from Windows registry erroneously. [1]
|
# On older Python versions, proxies are extracted from Windows registry erroneously. [1]
|
||||||
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
# If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2]
|
||||||
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
# It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade
|
||||||
# it to http on these older python versions to avoid issues
|
# it to http on these older Python versions to avoid issues
|
||||||
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
# This also applies for ftp proxy type, as ftp:// proxy scheme is not supported.
|
||||||
# 1: https://github.com/python/cpython/issues/86793
|
# 1: https://github.com/python/cpython/issues/86793
|
||||||
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
# 2: https://github.com/python/cpython/blob/51f1ae5ceb0673316c4e4b0175384e892e33cc6e/Lib/urllib/request.py#L2683-L2698
|
||||||
@@ -22,12 +22,8 @@ if compat_os_name == 'nt':
|
|||||||
|
|
||||||
def getproxies_registry_patched():
|
def getproxies_registry_patched():
|
||||||
proxies = getproxies_registry()
|
proxies = getproxies_registry()
|
||||||
if (
|
|
||||||
sys.version_info >= (3, 10, 5) # https://docs.python.org/3.10/whatsnew/changelog.html#python-3-10-5-final
|
|
||||||
or (3, 9, 13) <= sys.version_info < (3, 10) # https://docs.python.org/3.9/whatsnew/changelog.html#python-3-9-13-final
|
|
||||||
):
|
|
||||||
return proxies
|
|
||||||
|
|
||||||
|
if sys.version_info < (3, 10, 5): # https://docs.python.org/3.10/whatsnew/changelog.html#python-3-10-5-final
|
||||||
for scheme in ('https', 'ftp'):
|
for scheme in ('https', 'ftp'):
|
||||||
if scheme in proxies and proxies[scheme].startswith(f'{scheme}://'):
|
if scheme in proxies and proxies[scheme].startswith(f'{scheme}://'):
|
||||||
proxies[scheme] = 'http' + proxies[scheme][len(scheme):]
|
proxies[scheme] = 'http' + proxies[scheme][len(scheme):]
|
||||||
@@ -37,4 +33,4 @@ if compat_os_name == 'nt':
|
|||||||
def getproxies():
|
def getproxies():
|
||||||
return getproxies_environment() or getproxies_registry_patched()
|
return getproxies_environment() or getproxies_registry_patched()
|
||||||
|
|
||||||
del compat_os_name
|
del os
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
import base64
|
import base64
|
||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import datetime as dt
|
||||||
|
import functools
|
||||||
|
import glob
|
||||||
|
import hashlib
|
||||||
import http.cookiejar
|
import http.cookiejar
|
||||||
import http.cookies
|
import http.cookies
|
||||||
import io
|
import io
|
||||||
@@ -14,16 +18,13 @@ import sys
|
|||||||
import tempfile
|
import tempfile
|
||||||
import time
|
import time
|
||||||
import urllib.request
|
import urllib.request
|
||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from hashlib import pbkdf2_hmac
|
|
||||||
|
|
||||||
from .aes import (
|
from .aes import (
|
||||||
aes_cbc_decrypt_bytes,
|
aes_cbc_decrypt_bytes,
|
||||||
aes_gcm_decrypt_and_verify_bytes,
|
aes_gcm_decrypt_and_verify_bytes,
|
||||||
unpad_pkcs7,
|
unpad_pkcs7,
|
||||||
)
|
)
|
||||||
from .compat import functools
|
|
||||||
from .dependencies import (
|
from .dependencies import (
|
||||||
_SECRETSTORAGE_UNAVAILABLE_REASON,
|
_SECRETSTORAGE_UNAVAILABLE_REASON,
|
||||||
secretstorage,
|
secretstorage,
|
||||||
@@ -31,6 +32,8 @@ from .dependencies import (
|
|||||||
)
|
)
|
||||||
from .minicurses import MultilinePrinter, QuietMultilinePrinter
|
from .minicurses import MultilinePrinter, QuietMultilinePrinter
|
||||||
from .utils import (
|
from .utils import (
|
||||||
|
DownloadError,
|
||||||
|
YoutubeDLError,
|
||||||
Popen,
|
Popen,
|
||||||
error_to_str,
|
error_to_str,
|
||||||
expand_path,
|
expand_path,
|
||||||
@@ -43,7 +46,7 @@ from .utils import (
|
|||||||
from .utils._utils import _YDLLogger
|
from .utils._utils import _YDLLogger
|
||||||
from .utils.networking import normalize_url
|
from .utils.networking import normalize_url
|
||||||
|
|
||||||
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
|
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi', 'whale'}
|
||||||
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
|
||||||
|
|
||||||
|
|
||||||
@@ -83,7 +86,12 @@ def _create_progress_bar(logger):
|
|||||||
return printer
|
return printer
|
||||||
|
|
||||||
|
|
||||||
|
class CookieLoadError(YoutubeDLError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def load_cookies(cookie_file, browser_specification, ydl):
|
def load_cookies(cookie_file, browser_specification, ydl):
|
||||||
|
try:
|
||||||
cookie_jars = []
|
cookie_jars = []
|
||||||
if browser_specification is not None:
|
if browser_specification is not None:
|
||||||
browser_name, profile, keyring, container = _parse_browser_specification(*browser_specification)
|
browser_name, profile, keyring, container = _parse_browser_specification(*browser_specification)
|
||||||
@@ -101,6 +109,8 @@ def load_cookies(cookie_file, browser_specification, ydl):
|
|||||||
cookie_jars.append(jar)
|
cookie_jars.append(jar)
|
||||||
|
|
||||||
return _merge_cookie_jars(cookie_jars)
|
return _merge_cookie_jars(cookie_jars)
|
||||||
|
except Exception:
|
||||||
|
raise CookieLoadError('failed to load cookies')
|
||||||
|
|
||||||
|
|
||||||
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None, container=None):
|
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None, container=None):
|
||||||
@@ -115,20 +125,23 @@ def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(),
|
|||||||
|
|
||||||
|
|
||||||
def _extract_firefox_cookies(profile, container, logger):
|
def _extract_firefox_cookies(profile, container, logger):
|
||||||
|
MAX_SUPPORTED_DB_SCHEMA_VERSION = 17
|
||||||
|
|
||||||
logger.info('Extracting cookies from firefox')
|
logger.info('Extracting cookies from firefox')
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
if profile is None:
|
if profile is None:
|
||||||
search_root = _firefox_browser_dir()
|
search_roots = list(_firefox_browser_dirs())
|
||||||
elif _is_path(profile):
|
elif _is_path(profile):
|
||||||
search_root = profile
|
search_roots = [profile]
|
||||||
else:
|
else:
|
||||||
search_root = os.path.join(_firefox_browser_dir(), profile)
|
search_roots = [os.path.join(path, profile) for path in _firefox_browser_dirs()]
|
||||||
|
search_root = ', '.join(map(repr, search_roots))
|
||||||
|
|
||||||
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
|
cookie_database_path = _newest(_firefox_cookie_dbs(search_roots))
|
||||||
if cookie_database_path is None:
|
if cookie_database_path is None:
|
||||||
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
|
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
|
||||||
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
||||||
@@ -142,15 +155,19 @@ def _extract_firefox_cookies(profile, container, logger):
|
|||||||
identities = json.load(containers).get('identities', [])
|
identities = json.load(containers).get('identities', [])
|
||||||
container_id = next((context.get('userContextId') for context in identities if container in (
|
container_id = next((context.get('userContextId') for context in identities if container in (
|
||||||
context.get('name'),
|
context.get('name'),
|
||||||
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group())
|
try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()),
|
||||||
)), None)
|
)), None)
|
||||||
if not isinstance(container_id, int):
|
if not isinstance(container_id, int):
|
||||||
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
raise ValueError(f'could not find firefox container "{container}" in containers.json')
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||||
cursor = None
|
|
||||||
try:
|
|
||||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||||
|
with contextlib.closing(cursor.connection):
|
||||||
|
db_schema_version = cursor.execute('PRAGMA user_version;').fetchone()[0]
|
||||||
|
if db_schema_version > MAX_SUPPORTED_DB_SCHEMA_VERSION:
|
||||||
|
logger.warning(f'Possibly unsupported firefox cookies database version: {db_schema_version}')
|
||||||
|
else:
|
||||||
|
logger.debug(f'Firefox cookies database version: {db_schema_version}')
|
||||||
if isinstance(container_id, int):
|
if isinstance(container_id, int):
|
||||||
logger.debug(
|
logger.debug(
|
||||||
f'Only loading cookies from firefox container "{container}", ID {container_id}')
|
f'Only loading cookies from firefox container "{container}", ID {container_id}')
|
||||||
@@ -169,6 +186,10 @@ def _extract_firefox_cookies(profile, container, logger):
|
|||||||
total_cookie_count = len(table)
|
total_cookie_count = len(table)
|
||||||
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
|
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
|
||||||
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
|
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
|
||||||
|
# FF142 upgraded cookies DB to schema version 16 and started using milliseconds for cookie expiry
|
||||||
|
# Ref: https://github.com/mozilla-firefox/firefox/commit/5869af852cd20425165837f6c2d9971f3efba83d
|
||||||
|
if db_schema_version >= 16 and expiry is not None:
|
||||||
|
expiry /= 1000
|
||||||
cookie = http.cookiejar.Cookie(
|
cookie = http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
||||||
@@ -177,17 +198,37 @@ def _extract_firefox_cookies(profile, container, logger):
|
|||||||
jar.set_cookie(cookie)
|
jar.set_cookie(cookie)
|
||||||
logger.info(f'Extracted {len(jar)} cookies from firefox')
|
logger.info(f'Extracted {len(jar)} cookies from firefox')
|
||||||
return jar
|
return jar
|
||||||
finally:
|
|
||||||
if cursor is not None:
|
|
||||||
cursor.connection.close()
|
|
||||||
|
|
||||||
|
|
||||||
def _firefox_browser_dir():
|
def _firefox_browser_dirs():
|
||||||
if sys.platform in ('cygwin', 'win32'):
|
if sys.platform in ('cygwin', 'win32'):
|
||||||
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
|
yield from map(os.path.expandvars, (
|
||||||
|
R'%APPDATA%\Mozilla\Firefox\Profiles',
|
||||||
|
R'%LOCALAPPDATA%\Packages\Mozilla.Firefox_n80bbvh6b1yt2\LocalCache\Roaming\Mozilla\Firefox\Profiles',
|
||||||
|
))
|
||||||
|
|
||||||
elif sys.platform == 'darwin':
|
elif sys.platform == 'darwin':
|
||||||
return os.path.expanduser('~/Library/Application Support/Firefox')
|
yield os.path.expanduser('~/Library/Application Support/Firefox/Profiles')
|
||||||
return os.path.expanduser('~/.mozilla/firefox')
|
|
||||||
|
else:
|
||||||
|
yield from map(os.path.expanduser, (
|
||||||
|
# New installations of FF147+ respect the XDG base directory specification
|
||||||
|
# Ref: https://bugzilla.mozilla.org/show_bug.cgi?id=259356
|
||||||
|
os.path.join(_config_home(), 'mozilla/firefox'),
|
||||||
|
# Existing FF version<=146 installations
|
||||||
|
'~/.mozilla/firefox',
|
||||||
|
# Flatpak XDG: https://docs.flatpak.org/en/latest/conventions.html#xdg-base-directories
|
||||||
|
'~/.var/app/org.mozilla.firefox/config/mozilla/firefox',
|
||||||
|
'~/.var/app/org.mozilla.firefox/.mozilla/firefox',
|
||||||
|
# Snap installations do not respect the XDG base directory specification
|
||||||
|
'~/snap/firefox/common/.mozilla/firefox',
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
|
def _firefox_cookie_dbs(roots):
|
||||||
|
for root in map(os.path.abspath, roots):
|
||||||
|
for pattern in ('', '*/', 'Profiles/*/'):
|
||||||
|
yield from glob.iglob(os.path.join(root, pattern, 'cookies.sqlite'))
|
||||||
|
|
||||||
|
|
||||||
def _get_chromium_based_browser_settings(browser_name):
|
def _get_chromium_based_browser_settings(browser_name):
|
||||||
@@ -202,6 +243,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
|||||||
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
|
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
|
||||||
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
|
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
|
||||||
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
|
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
|
||||||
|
'whale': os.path.join(appdata_local, R'Naver\Naver Whale\User Data'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
elif sys.platform == 'darwin':
|
elif sys.platform == 'darwin':
|
||||||
@@ -213,6 +255,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
|||||||
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
'edge': os.path.join(appdata, 'Microsoft Edge'),
|
||||||
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
|
||||||
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
'vivaldi': os.path.join(appdata, 'Vivaldi'),
|
||||||
|
'whale': os.path.join(appdata, 'Naver/Whale'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -224,6 +267,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
|||||||
'edge': os.path.join(config, 'microsoft-edge'),
|
'edge': os.path.join(config, 'microsoft-edge'),
|
||||||
'opera': os.path.join(config, 'opera'),
|
'opera': os.path.join(config, 'opera'),
|
||||||
'vivaldi': os.path.join(config, 'vivaldi'),
|
'vivaldi': os.path.join(config, 'vivaldi'),
|
||||||
|
'whale': os.path.join(config, 'naver-whale'),
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
|
||||||
@@ -235,6 +279,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
|||||||
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
|
||||||
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
|
||||||
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
|
||||||
|
'whale': 'Whale',
|
||||||
}[browser_name]
|
}[browser_name]
|
||||||
|
|
||||||
browsers_without_profiles = {'opera'}
|
browsers_without_profiles = {'opera'}
|
||||||
@@ -242,7 +287,7 @@ def _get_chromium_based_browser_settings(browser_name):
|
|||||||
return {
|
return {
|
||||||
'browser_dir': browser_dir,
|
'browser_dir': browser_dir,
|
||||||
'keyring_name': keyring_name,
|
'keyring_name': keyring_name,
|
||||||
'supports_profiles': browser_name not in browsers_without_profiles
|
'supports_profiles': browser_name not in browsers_without_profiles,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -251,7 +296,7 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
|||||||
|
|
||||||
if not sqlite3:
|
if not sqlite3:
|
||||||
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
|
||||||
'Please use a python interpreter compiled with sqlite3 support')
|
'Please use a Python interpreter compiled with sqlite3 support')
|
||||||
return YoutubeDLCookieJar()
|
return YoutubeDLCookieJar()
|
||||||
|
|
||||||
config = _get_chromium_based_browser_settings(browser_name)
|
config = _get_chromium_based_browser_settings(browser_name)
|
||||||
@@ -268,17 +313,23 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
|||||||
logger.error(f'{browser_name} does not support profiles')
|
logger.error(f'{browser_name} does not support profiles')
|
||||||
search_root = config['browser_dir']
|
search_root = config['browser_dir']
|
||||||
|
|
||||||
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
|
cookie_database_path = _newest(_find_files(search_root, 'Cookies', logger))
|
||||||
if cookie_database_path is None:
|
if cookie_database_path is None:
|
||||||
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
|
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
|
||||||
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
|
||||||
|
|
||||||
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
|
||||||
cursor = None
|
cursor = None
|
||||||
try:
|
try:
|
||||||
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
cursor = _open_database_copy(cookie_database_path, tmpdir)
|
||||||
|
|
||||||
|
# meta_version is necessary to determine if we need to trim the hash prefix from the cookies
|
||||||
|
# Ref: https://chromium.googlesource.com/chromium/src/+/b02dcebd7cafab92770734dc2bc317bd07f1d891/net/extras/sqlite/sqlite_persistent_cookie_store.cc#223
|
||||||
|
meta_version = int(cursor.execute('SELECT value FROM meta WHERE key = "version"').fetchone()[0])
|
||||||
|
decryptor = get_cookie_decryptor(
|
||||||
|
config['browser_dir'], config['keyring_name'], logger,
|
||||||
|
keyring=keyring, meta_version=meta_version)
|
||||||
|
|
||||||
cursor.connection.text_factory = bytes
|
cursor.connection.text_factory = bytes
|
||||||
column_names = _get_column_names(cursor, 'cookies')
|
column_names = _get_column_names(cursor, 'cookies')
|
||||||
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
|
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
|
||||||
@@ -307,6 +358,12 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger):
|
|||||||
counts['unencrypted'] = unencrypted_cookies
|
counts['unencrypted'] = unencrypted_cookies
|
||||||
logger.debug(f'cookie version breakdown: {counts}')
|
logger.debug(f'cookie version breakdown: {counts}')
|
||||||
return jar
|
return jar
|
||||||
|
except PermissionError as error:
|
||||||
|
if os.name == 'nt' and error.errno == 13:
|
||||||
|
message = 'Could not copy Chrome cookie database. See https://github.com/yt-dlp/yt-dlp/issues/7271 for more info'
|
||||||
|
logger.error(message)
|
||||||
|
raise DownloadError(message) # force exit
|
||||||
|
raise
|
||||||
finally:
|
finally:
|
||||||
if cursor is not None:
|
if cursor is not None:
|
||||||
cursor.connection.close()
|
cursor.connection.close()
|
||||||
@@ -324,6 +381,11 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa
|
|||||||
if value is None:
|
if value is None:
|
||||||
return is_encrypted, None
|
return is_encrypted, None
|
||||||
|
|
||||||
|
# In chrome, session cookies have expires_utc set to 0
|
||||||
|
# In our cookie-store, cookies that do not expire should have expires set to None
|
||||||
|
if not expires_utc:
|
||||||
|
expires_utc = None
|
||||||
|
|
||||||
return is_encrypted, http.cookiejar.Cookie(
|
return is_encrypted, http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
||||||
@@ -365,22 +427,23 @@ class ChromeCookieDecryptor:
|
|||||||
raise NotImplementedError('Must be implemented by sub classes')
|
raise NotImplementedError('Must be implemented by sub classes')
|
||||||
|
|
||||||
|
|
||||||
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
|
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None, meta_version=None):
|
||||||
if sys.platform == 'darwin':
|
if sys.platform == 'darwin':
|
||||||
return MacChromeCookieDecryptor(browser_keyring_name, logger)
|
return MacChromeCookieDecryptor(browser_keyring_name, logger, meta_version=meta_version)
|
||||||
elif sys.platform in ('win32', 'cygwin'):
|
elif sys.platform in ('win32', 'cygwin'):
|
||||||
return WindowsChromeCookieDecryptor(browser_root, logger)
|
return WindowsChromeCookieDecryptor(browser_root, logger, meta_version=meta_version)
|
||||||
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
|
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring, meta_version=meta_version)
|
||||||
|
|
||||||
|
|
||||||
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||||
def __init__(self, browser_keyring_name, logger, *, keyring=None):
|
def __init__(self, browser_keyring_name, logger, *, keyring=None, meta_version=None):
|
||||||
self._logger = logger
|
self._logger = logger
|
||||||
self._v10_key = self.derive_key(b'peanuts')
|
self._v10_key = self.derive_key(b'peanuts')
|
||||||
self._empty_key = self.derive_key(b'')
|
self._empty_key = self.derive_key(b'')
|
||||||
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
|
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
|
||||||
self._browser_keyring_name = browser_keyring_name
|
self._browser_keyring_name = browser_keyring_name
|
||||||
self._keyring = keyring
|
self._keyring = keyring
|
||||||
|
self._meta_version = meta_version or 0
|
||||||
|
|
||||||
@functools.cached_property
|
@functools.cached_property
|
||||||
def _v11_key(self):
|
def _v11_key(self):
|
||||||
@@ -409,14 +472,18 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
|
|
||||||
if version == b'v10':
|
if version == b'v10':
|
||||||
self._cookie_counts['v10'] += 1
|
self._cookie_counts['v10'] += 1
|
||||||
return _decrypt_aes_cbc_multi(ciphertext, (self._v10_key, self._empty_key), self._logger)
|
return _decrypt_aes_cbc_multi(
|
||||||
|
ciphertext, (self._v10_key, self._empty_key), self._logger,
|
||||||
|
hash_prefix=self._meta_version >= 24)
|
||||||
|
|
||||||
elif version == b'v11':
|
elif version == b'v11':
|
||||||
self._cookie_counts['v11'] += 1
|
self._cookie_counts['v11'] += 1
|
||||||
if self._v11_key is None:
|
if self._v11_key is None:
|
||||||
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
|
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
|
||||||
return None
|
return None
|
||||||
return _decrypt_aes_cbc_multi(ciphertext, (self._v11_key, self._empty_key), self._logger)
|
return _decrypt_aes_cbc_multi(
|
||||||
|
ciphertext, (self._v11_key, self._empty_key), self._logger,
|
||||||
|
hash_prefix=self._meta_version >= 24)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self._logger.warning(f'unknown cookie version: "{version}"', only_once=True)
|
self._logger.warning(f'unknown cookie version: "{version}"', only_once=True)
|
||||||
@@ -425,11 +492,12 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
|
|
||||||
|
|
||||||
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||||
def __init__(self, browser_keyring_name, logger):
|
def __init__(self, browser_keyring_name, logger, meta_version=None):
|
||||||
self._logger = logger
|
self._logger = logger
|
||||||
password = _get_mac_keyring_password(browser_keyring_name, logger)
|
password = _get_mac_keyring_password(browser_keyring_name, logger)
|
||||||
self._v10_key = None if password is None else self.derive_key(password)
|
self._v10_key = None if password is None else self.derive_key(password)
|
||||||
self._cookie_counts = {'v10': 0, 'other': 0}
|
self._cookie_counts = {'v10': 0, 'other': 0}
|
||||||
|
self._meta_version = meta_version or 0
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def derive_key(password):
|
def derive_key(password):
|
||||||
@@ -447,7 +515,8 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return _decrypt_aes_cbc_multi(ciphertext, (self._v10_key,), self._logger)
|
return _decrypt_aes_cbc_multi(
|
||||||
|
ciphertext, (self._v10_key,), self._logger, hash_prefix=self._meta_version >= 24)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self._cookie_counts['other'] += 1
|
self._cookie_counts['other'] += 1
|
||||||
@@ -457,10 +526,11 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
|
|
||||||
|
|
||||||
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
||||||
def __init__(self, browser_root, logger):
|
def __init__(self, browser_root, logger, meta_version=None):
|
||||||
self._logger = logger
|
self._logger = logger
|
||||||
self._v10_key = _get_windows_v10_key(browser_root, logger)
|
self._v10_key = _get_windows_v10_key(browser_root, logger)
|
||||||
self._cookie_counts = {'v10': 0, 'other': 0}
|
self._cookie_counts = {'v10': 0, 'other': 0}
|
||||||
|
self._meta_version = meta_version or 0
|
||||||
|
|
||||||
def decrypt(self, encrypted_value):
|
def decrypt(self, encrypted_value):
|
||||||
version = encrypted_value[:3]
|
version = encrypted_value[:3]
|
||||||
@@ -484,7 +554,9 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
|
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
|
||||||
authentication_tag = raw_ciphertext[-authentication_tag_length:]
|
authentication_tag = raw_ciphertext[-authentication_tag_length:]
|
||||||
|
|
||||||
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
|
return _decrypt_aes_gcm(
|
||||||
|
ciphertext, self._v10_key, nonce, authentication_tag, self._logger,
|
||||||
|
hash_prefix=self._meta_version >= 24)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
self._cookie_counts['other'] += 1
|
self._cookie_counts['other'] += 1
|
||||||
@@ -494,7 +566,7 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
|
|||||||
|
|
||||||
|
|
||||||
def _extract_safari_cookies(profile, logger):
|
def _extract_safari_cookies(profile, logger):
|
||||||
if sys.platform != 'darwin':
|
if sys.platform not in ('darwin', 'ios'):
|
||||||
raise ValueError(f'unsupported platform: {sys.platform}')
|
raise ValueError(f'unsupported platform: {sys.platform}')
|
||||||
|
|
||||||
if profile:
|
if profile:
|
||||||
@@ -575,7 +647,7 @@ class DataParser:
|
|||||||
|
|
||||||
|
|
||||||
def _mac_absolute_time_to_posix(timestamp):
|
def _mac_absolute_time_to_posix(timestamp):
|
||||||
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
|
return int((dt.datetime(2001, 1, 1, 0, 0, tzinfo=dt.timezone.utc) + dt.timedelta(seconds=timestamp)).timestamp())
|
||||||
|
|
||||||
|
|
||||||
def _parse_safari_cookies_header(data, logger):
|
def _parse_safari_cookies_header(data, logger):
|
||||||
@@ -706,22 +778,21 @@ def _get_linux_desktop_environment(env, logger):
|
|||||||
GetDesktopEnvironment
|
GetDesktopEnvironment
|
||||||
"""
|
"""
|
||||||
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
|
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
|
||||||
desktop_session = env.get('DESKTOP_SESSION', None)
|
desktop_session = env.get('DESKTOP_SESSION', '')
|
||||||
if xdg_current_desktop is not None:
|
if xdg_current_desktop is not None:
|
||||||
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
|
for part in map(str.strip, xdg_current_desktop.split(':')):
|
||||||
|
if part == 'Unity':
|
||||||
if xdg_current_desktop == 'Unity':
|
if 'gnome-fallback' in desktop_session:
|
||||||
if desktop_session is not None and 'gnome-fallback' in desktop_session:
|
|
||||||
return _LinuxDesktopEnvironment.GNOME
|
return _LinuxDesktopEnvironment.GNOME
|
||||||
else:
|
else:
|
||||||
return _LinuxDesktopEnvironment.UNITY
|
return _LinuxDesktopEnvironment.UNITY
|
||||||
elif xdg_current_desktop == 'Deepin':
|
elif part == 'Deepin':
|
||||||
return _LinuxDesktopEnvironment.DEEPIN
|
return _LinuxDesktopEnvironment.DEEPIN
|
||||||
elif xdg_current_desktop == 'GNOME':
|
elif part == 'GNOME':
|
||||||
return _LinuxDesktopEnvironment.GNOME
|
return _LinuxDesktopEnvironment.GNOME
|
||||||
elif xdg_current_desktop == 'X-Cinnamon':
|
elif part == 'X-Cinnamon':
|
||||||
return _LinuxDesktopEnvironment.CINNAMON
|
return _LinuxDesktopEnvironment.CINNAMON
|
||||||
elif xdg_current_desktop == 'KDE':
|
elif part == 'KDE':
|
||||||
kde_version = env.get('KDE_SESSION_VERSION', None)
|
kde_version = env.get('KDE_SESSION_VERSION', None)
|
||||||
if kde_version == '5':
|
if kde_version == '5':
|
||||||
return _LinuxDesktopEnvironment.KDE5
|
return _LinuxDesktopEnvironment.KDE5
|
||||||
@@ -732,18 +803,16 @@ def _get_linux_desktop_environment(env, logger):
|
|||||||
else:
|
else:
|
||||||
logger.info(f'unknown KDE version: "{kde_version}". Assuming KDE4')
|
logger.info(f'unknown KDE version: "{kde_version}". Assuming KDE4')
|
||||||
return _LinuxDesktopEnvironment.KDE4
|
return _LinuxDesktopEnvironment.KDE4
|
||||||
elif xdg_current_desktop == 'Pantheon':
|
elif part == 'Pantheon':
|
||||||
return _LinuxDesktopEnvironment.PANTHEON
|
return _LinuxDesktopEnvironment.PANTHEON
|
||||||
elif xdg_current_desktop == 'XFCE':
|
elif part == 'XFCE':
|
||||||
return _LinuxDesktopEnvironment.XFCE
|
return _LinuxDesktopEnvironment.XFCE
|
||||||
elif xdg_current_desktop == 'UKUI':
|
elif part == 'UKUI':
|
||||||
return _LinuxDesktopEnvironment.UKUI
|
return _LinuxDesktopEnvironment.UKUI
|
||||||
elif xdg_current_desktop == 'LXQt':
|
elif part == 'LXQt':
|
||||||
return _LinuxDesktopEnvironment.LXQT
|
return _LinuxDesktopEnvironment.LXQT
|
||||||
else:
|
logger.debug(f'XDG_CURRENT_DESKTOP is set to an unknown value: "{xdg_current_desktop}"')
|
||||||
logger.info(f'XDG_CURRENT_DESKTOP is set to an unknown value: "{xdg_current_desktop}"')
|
|
||||||
|
|
||||||
elif desktop_session is not None:
|
|
||||||
if desktop_session == 'deepin':
|
if desktop_session == 'deepin':
|
||||||
return _LinuxDesktopEnvironment.DEEPIN
|
return _LinuxDesktopEnvironment.DEEPIN
|
||||||
elif desktop_session in ('mate', 'gnome'):
|
elif desktop_session in ('mate', 'gnome'):
|
||||||
@@ -760,9 +829,8 @@ def _get_linux_desktop_environment(env, logger):
|
|||||||
elif desktop_session == 'ukui':
|
elif desktop_session == 'ukui':
|
||||||
return _LinuxDesktopEnvironment.UKUI
|
return _LinuxDesktopEnvironment.UKUI
|
||||||
else:
|
else:
|
||||||
logger.info(f'DESKTOP_SESSION is set to an unknown value: "{desktop_session}"')
|
logger.debug(f'DESKTOP_SESSION is set to an unknown value: "{desktop_session}"')
|
||||||
|
|
||||||
else:
|
|
||||||
if 'GNOME_DESKTOP_SESSION_ID' in env:
|
if 'GNOME_DESKTOP_SESSION_ID' in env:
|
||||||
return _LinuxDesktopEnvironment.GNOME
|
return _LinuxDesktopEnvironment.GNOME
|
||||||
elif 'KDE_FULL_SESSION' in env:
|
elif 'KDE_FULL_SESSION' in env:
|
||||||
@@ -770,6 +838,7 @@ def _get_linux_desktop_environment(env, logger):
|
|||||||
return _LinuxDesktopEnvironment.KDE4
|
return _LinuxDesktopEnvironment.KDE4
|
||||||
else:
|
else:
|
||||||
return _LinuxDesktopEnvironment.KDE3
|
return _LinuxDesktopEnvironment.KDE3
|
||||||
|
|
||||||
return _LinuxDesktopEnvironment.OTHER
|
return _LinuxDesktopEnvironment.OTHER
|
||||||
|
|
||||||
|
|
||||||
@@ -794,7 +863,7 @@ def _choose_linux_keyring(logger):
|
|||||||
elif desktop_environment == _LinuxDesktopEnvironment.KDE6:
|
elif desktop_environment == _LinuxDesktopEnvironment.KDE6:
|
||||||
linux_keyring = _LinuxKeyring.KWALLET6
|
linux_keyring = _LinuxKeyring.KWALLET6
|
||||||
elif desktop_environment in (
|
elif desktop_environment in (
|
||||||
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER
|
_LinuxDesktopEnvironment.KDE3, _LinuxDesktopEnvironment.LXQT, _LinuxDesktopEnvironment.OTHER,
|
||||||
):
|
):
|
||||||
linux_keyring = _LinuxKeyring.BASICTEXT
|
linux_keyring = _LinuxKeyring.BASICTEXT
|
||||||
else:
|
else:
|
||||||
@@ -829,7 +898,7 @@ def _get_kwallet_network_wallet(keyring, logger):
|
|||||||
'dbus-send', '--session', '--print-reply=literal',
|
'dbus-send', '--session', '--print-reply=literal',
|
||||||
f'--dest={service_name}',
|
f'--dest={service_name}',
|
||||||
wallet_path,
|
wallet_path,
|
||||||
'org.kde.KWallet.networkWallet'
|
'org.kde.KWallet.networkWallet',
|
||||||
], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
if returncode:
|
if returncode:
|
||||||
@@ -859,7 +928,7 @@ def _get_kwallet_password(browser_keyring_name, keyring, logger):
|
|||||||
'kwallet-query',
|
'kwallet-query',
|
||||||
'--read-password', f'{browser_keyring_name} Safe Storage',
|
'--read-password', f'{browser_keyring_name} Safe Storage',
|
||||||
'--folder', f'{browser_keyring_name} Keys',
|
'--folder', f'{browser_keyring_name} Keys',
|
||||||
network_wallet
|
network_wallet,
|
||||||
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
if returncode:
|
if returncode:
|
||||||
@@ -899,7 +968,6 @@ def _get_gnome_keyring_password(browser_keyring_name, logger):
|
|||||||
for item in col.get_all_items():
|
for item in col.get_all_items():
|
||||||
if item.get_label() == f'{browser_keyring_name} Safe Storage':
|
if item.get_label() == f'{browser_keyring_name} Safe Storage':
|
||||||
return item.get_secret()
|
return item.get_secret()
|
||||||
else:
|
|
||||||
logger.error('failed to read from keyring')
|
logger.error('failed to read from keyring')
|
||||||
return b''
|
return b''
|
||||||
|
|
||||||
@@ -947,7 +1015,7 @@ def _get_windows_v10_key(browser_root, logger):
|
|||||||
References:
|
References:
|
||||||
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_win.cc
|
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/sync/os_crypt_win.cc
|
||||||
"""
|
"""
|
||||||
path = _find_most_recently_used_file(browser_root, 'Local State', logger)
|
path = _newest(_find_files(browser_root, 'Local State', logger))
|
||||||
if path is None:
|
if path is None:
|
||||||
logger.error('could not find local state file')
|
logger.error('could not find local state file')
|
||||||
return None
|
return None
|
||||||
@@ -970,13 +1038,15 @@ def _get_windows_v10_key(browser_root, logger):
|
|||||||
|
|
||||||
|
|
||||||
def pbkdf2_sha1(password, salt, iterations, key_length):
|
def pbkdf2_sha1(password, salt, iterations, key_length):
|
||||||
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
|
return hashlib.pbkdf2_hmac('sha1', password, salt, iterations, key_length)
|
||||||
|
|
||||||
|
|
||||||
def _decrypt_aes_cbc_multi(ciphertext, keys, logger, initialization_vector=b' ' * 16):
|
def _decrypt_aes_cbc_multi(ciphertext, keys, logger, initialization_vector=b' ' * 16, hash_prefix=False):
|
||||||
for key in keys:
|
for key in keys:
|
||||||
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
|
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
|
||||||
try:
|
try:
|
||||||
|
if hash_prefix:
|
||||||
|
return plaintext[32:].decode()
|
||||||
return plaintext.decode()
|
return plaintext.decode()
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
pass
|
pass
|
||||||
@@ -984,7 +1054,7 @@ def _decrypt_aes_cbc_multi(ciphertext, keys, logger, initialization_vector=b' '
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger, hash_prefix=False):
|
||||||
try:
|
try:
|
||||||
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
|
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@@ -992,6 +1062,8 @@ def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if hash_prefix:
|
||||||
|
return plaintext[32:].decode()
|
||||||
return plaintext.decode()
|
return plaintext.decode()
|
||||||
except UnicodeDecodeError:
|
except UnicodeDecodeError:
|
||||||
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
|
||||||
@@ -1021,11 +1093,12 @@ def _decrypt_windows_dpapi(ciphertext, logger):
|
|||||||
None, # pvReserved: must be NULL
|
None, # pvReserved: must be NULL
|
||||||
None, # pPromptStruct: information about prompts to display
|
None, # pPromptStruct: information about prompts to display
|
||||||
0, # dwFlags
|
0, # dwFlags
|
||||||
ctypes.byref(blob_out) # pDataOut
|
ctypes.byref(blob_out), # pDataOut
|
||||||
)
|
)
|
||||||
if not ret:
|
if not ret:
|
||||||
logger.warning('failed to decrypt with DPAPI', only_once=True)
|
message = 'Failed to decrypt with DPAPI. See https://github.com/yt-dlp/yt-dlp/issues/10927 for more info'
|
||||||
return None
|
logger.error(message)
|
||||||
|
raise DownloadError(message) # force exit
|
||||||
|
|
||||||
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
|
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
|
||||||
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
|
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
|
||||||
@@ -1049,17 +1122,20 @@ def _get_column_names(cursor, table_name):
|
|||||||
return [row[1].decode() for row in table_info]
|
return [row[1].decode() for row in table_info]
|
||||||
|
|
||||||
|
|
||||||
def _find_most_recently_used_file(root, filename, logger):
|
def _newest(files):
|
||||||
|
return max(files, key=lambda path: os.lstat(path).st_mtime, default=None)
|
||||||
|
|
||||||
|
|
||||||
|
def _find_files(root, filename, logger):
|
||||||
# if there are multiple browser profiles, take the most recently used one
|
# if there are multiple browser profiles, take the most recently used one
|
||||||
i, paths = 0, []
|
i = 0
|
||||||
with _create_progress_bar(logger) as progress_bar:
|
with _create_progress_bar(logger) as progress_bar:
|
||||||
for curr_root, dirs, files in os.walk(root):
|
for curr_root, _, files in os.walk(root):
|
||||||
for file in files:
|
for file in files:
|
||||||
i += 1
|
i += 1
|
||||||
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
|
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
|
||||||
if file == filename:
|
if file == filename:
|
||||||
paths.append(os.path.join(curr_root, file))
|
yield os.path.join(curr_root, file)
|
||||||
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
|
|
||||||
|
|
||||||
|
|
||||||
def _merge_cookie_jars(jars):
|
def _merge_cookie_jars(jars):
|
||||||
@@ -1073,7 +1149,7 @@ def _merge_cookie_jars(jars):
|
|||||||
|
|
||||||
|
|
||||||
def _is_path(value):
|
def _is_path(value):
|
||||||
return os.path.sep in value
|
return any(sep in value for sep in (os.path.sep, os.path.altsep) if sep)
|
||||||
|
|
||||||
|
|
||||||
def _parse_browser_specification(browser_name, profile=None, keyring=None, container=None):
|
def _parse_browser_specification(browser_name, profile=None, keyring=None, container=None):
|
||||||
@@ -1094,24 +1170,24 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
|||||||
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
|
_LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}')
|
||||||
|
|
||||||
_RESERVED = {
|
_RESERVED = {
|
||||||
"expires",
|
'expires',
|
||||||
"path",
|
'path',
|
||||||
"comment",
|
'comment',
|
||||||
"domain",
|
'domain',
|
||||||
"max-age",
|
'max-age',
|
||||||
"secure",
|
'secure',
|
||||||
"httponly",
|
'httponly',
|
||||||
"version",
|
'version',
|
||||||
"samesite",
|
'samesite',
|
||||||
}
|
}
|
||||||
|
|
||||||
_FLAGS = {"secure", "httponly"}
|
_FLAGS = {'secure', 'httponly'}
|
||||||
|
|
||||||
# Added 'bad' group to catch the remaining value
|
# Added 'bad' group to catch the remaining value
|
||||||
_COOKIE_PATTERN = re.compile(r"""
|
_COOKIE_PATTERN = re.compile(r'''
|
||||||
\s* # Optional whitespace at start of cookie
|
\s* # Optional whitespace at start of cookie
|
||||||
(?P<key> # Start of group 'key'
|
(?P<key> # Start of group 'key'
|
||||||
[""" + _LEGAL_KEY_CHARS + r"""]+?# Any word of at least one letter
|
[''' + _LEGAL_KEY_CHARS + r''']+?# Any word of at least one letter
|
||||||
) # End of group 'key'
|
) # End of group 'key'
|
||||||
( # Optional group: there may not be a value.
|
( # Optional group: there may not be a value.
|
||||||
\s*=\s* # Equal Sign
|
\s*=\s* # Equal Sign
|
||||||
@@ -1121,7 +1197,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
|||||||
| # or
|
| # or
|
||||||
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
||||||
| # or
|
| # or
|
||||||
[""" + _LEGAL_VALUE_CHARS + r"""]* # Any word or empty string
|
[''' + _LEGAL_VALUE_CHARS + r''']* # Any word or empty string
|
||||||
) # End of group 'val'
|
) # End of group 'val'
|
||||||
| # or
|
| # or
|
||||||
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
|
(?P<bad>(?:\\;|[^;])*?) # 'bad' group fallback for invalid values
|
||||||
@@ -1129,7 +1205,7 @@ class LenientSimpleCookie(http.cookies.SimpleCookie):
|
|||||||
)? # End of optional value group
|
)? # End of optional value group
|
||||||
\s* # Any number of spaces.
|
\s* # Any number of spaces.
|
||||||
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
||||||
""", re.ASCII | re.VERBOSE)
|
''', re.ASCII | re.VERBOSE)
|
||||||
|
|
||||||
def load(self, data):
|
def load(self, data):
|
||||||
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
|
# Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776
|
||||||
@@ -1216,8 +1292,8 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
|||||||
def _really_save(self, f, ignore_discard, ignore_expires):
|
def _really_save(self, f, ignore_discard, ignore_expires):
|
||||||
now = time.time()
|
now = time.time()
|
||||||
for cookie in self:
|
for cookie in self:
|
||||||
if (not ignore_discard and cookie.discard
|
if ((not ignore_discard and cookie.discard)
|
||||||
or not ignore_expires and cookie.is_expired(now)):
|
or (not ignore_expires and cookie.is_expired(now))):
|
||||||
continue
|
continue
|
||||||
name, value = cookie.name, cookie.value
|
name, value = cookie.name, cookie.value
|
||||||
if value is None:
|
if value is None:
|
||||||
@@ -1225,14 +1301,14 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
|||||||
# with no name, whereas http.cookiejar regards it as a
|
# with no name, whereas http.cookiejar regards it as a
|
||||||
# cookie with no value.
|
# cookie with no value.
|
||||||
name, value = '', name
|
name, value = '', name
|
||||||
f.write('%s\n' % '\t'.join((
|
f.write('{}\n'.format('\t'.join((
|
||||||
cookie.domain,
|
cookie.domain,
|
||||||
self._true_or_false(cookie.domain.startswith('.')),
|
self._true_or_false(cookie.domain.startswith('.')),
|
||||||
cookie.path,
|
cookie.path,
|
||||||
self._true_or_false(cookie.secure),
|
self._true_or_false(cookie.secure),
|
||||||
str_or_none(cookie.expires, default=''),
|
str_or_none(cookie.expires, default=''),
|
||||||
name, value
|
name, value,
|
||||||
)))
|
))))
|
||||||
|
|
||||||
def save(self, filename=None, ignore_discard=True, ignore_expires=True):
|
def save(self, filename=None, ignore_discard=True, ignore_expires=True):
|
||||||
"""
|
"""
|
||||||
@@ -1271,10 +1347,10 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
|||||||
return line
|
return line
|
||||||
cookie_list = line.split('\t')
|
cookie_list = line.split('\t')
|
||||||
if len(cookie_list) != self._ENTRY_LEN:
|
if len(cookie_list) != self._ENTRY_LEN:
|
||||||
raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
|
raise http.cookiejar.LoadError(f'invalid length {len(cookie_list)}')
|
||||||
cookie = self._CookieFileEntry(*cookie_list)
|
cookie = self._CookieFileEntry(*cookie_list)
|
||||||
if cookie.expires_at and not cookie.expires_at.isdigit():
|
if cookie.expires_at and not re.fullmatch(r'[0-9]+(?:\.[0-9]+)?', cookie.expires_at):
|
||||||
raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
|
raise http.cookiejar.LoadError(f'invalid expires at {cookie.expires_at}')
|
||||||
return line
|
return line
|
||||||
|
|
||||||
cf = io.StringIO()
|
cf = io.StringIO()
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ try:
|
|||||||
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
|
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
|
||||||
from Crypto.Hash import CMAC, SHA1 # noqa: F401
|
from Crypto.Hash import CMAC, SHA1 # noqa: F401
|
||||||
from Crypto.PublicKey import RSA # noqa: F401
|
from Crypto.PublicKey import RSA # noqa: F401
|
||||||
except ImportError:
|
except (ImportError, OSError):
|
||||||
__version__ = f'broken {__version__}'.strip()
|
__version__ = f'broken {__version__}'.strip()
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -43,19 +43,28 @@ except Exception as _err:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
import sqlite3
|
import sqlite3
|
||||||
|
# We need to get the underlying `sqlite` version, see https://github.com/yt-dlp/yt-dlp/issues/8152
|
||||||
|
sqlite3._yt_dlp__version = sqlite3.sqlite_version
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# although sqlite3 is part of the standard library, it is possible to compile python without
|
# although sqlite3 is part of the standard library, it is possible to compile Python without
|
||||||
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
|
||||||
sqlite3 = None
|
sqlite3 = None
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import websockets
|
import websockets
|
||||||
except (ImportError, SyntaxError):
|
except ImportError:
|
||||||
# websockets 3.10 on python 3.6 causes SyntaxError
|
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/2633
|
|
||||||
websockets = None
|
websockets = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
import urllib3
|
||||||
|
except ImportError:
|
||||||
|
urllib3 = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
except ImportError:
|
||||||
|
requests = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import xattr # xattr or pyxattr
|
import xattr # xattr or pyxattr
|
||||||
@@ -65,9 +74,19 @@ else:
|
|||||||
if hasattr(xattr, 'set'): # pyxattr
|
if hasattr(xattr, 'set'): # pyxattr
|
||||||
xattr._yt_dlp__identifier = 'pyxattr'
|
xattr._yt_dlp__identifier = 'pyxattr'
|
||||||
|
|
||||||
|
try:
|
||||||
|
import curl_cffi
|
||||||
|
except ImportError:
|
||||||
|
curl_cffi = None
|
||||||
|
|
||||||
from . import Cryptodome
|
from . import Cryptodome
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yt_dlp_ejs
|
||||||
|
except ImportError:
|
||||||
|
yt_dlp_ejs = None
|
||||||
|
|
||||||
|
|
||||||
all_dependencies = {k: v for k, v in globals().items() if not k.startswith('_')}
|
all_dependencies = {k: v for k, v in globals().items() if not k.startswith('_')}
|
||||||
available_dependencies = {k: v for k, v in all_dependencies.items() if v}
|
available_dependencies = {k: v for k, v in all_dependencies.items() if v}
|
||||||
|
|
||||||
|
|||||||
@@ -30,11 +30,12 @@ from .hls import HlsFD
|
|||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
from .ism import IsmFD
|
from .ism import IsmFD
|
||||||
from .mhtml import MhtmlFD
|
from .mhtml import MhtmlFD
|
||||||
from .niconico import NiconicoDmcFD, NiconicoLiveFD
|
from .niconico import NiconicoLiveFD
|
||||||
from .rtmp import RtmpFD
|
from .rtmp import RtmpFD
|
||||||
from .rtsp import RtspFD
|
from .rtsp import RtspFD
|
||||||
from .websocket import WebSocketFragmentFD
|
from .websocket import WebSocketFragmentFD
|
||||||
from .youtube_live_chat import YoutubeLiveChatFD
|
from .youtube_live_chat import YoutubeLiveChatFD
|
||||||
|
from .bunnycdn import BunnyCdnFD
|
||||||
|
|
||||||
PROTOCOL_MAP = {
|
PROTOCOL_MAP = {
|
||||||
'rtmp': RtmpFD,
|
'rtmp': RtmpFD,
|
||||||
@@ -49,12 +50,12 @@ PROTOCOL_MAP = {
|
|||||||
'http_dash_segments_generator': DashSegmentsFD,
|
'http_dash_segments_generator': DashSegmentsFD,
|
||||||
'ism': IsmFD,
|
'ism': IsmFD,
|
||||||
'mhtml': MhtmlFD,
|
'mhtml': MhtmlFD,
|
||||||
'niconico_dmc': NiconicoDmcFD,
|
|
||||||
'niconico_live': NiconicoLiveFD,
|
'niconico_live': NiconicoLiveFD,
|
||||||
'fc2_live': FC2LiveFD,
|
'fc2_live': FC2LiveFD,
|
||||||
'websocket_frag': WebSocketFragmentFD,
|
'websocket_frag': WebSocketFragmentFD,
|
||||||
'youtube_live_chat': YoutubeLiveChatFD,
|
'youtube_live_chat': YoutubeLiveChatFD,
|
||||||
'youtube_live_chat_replay': YoutubeLiveChatFD,
|
'youtube_live_chat_replay': YoutubeLiveChatFD,
|
||||||
|
'bunnycdn': BunnyCdnFD,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -65,7 +66,6 @@ def shorten_protocol_name(proto, simplify=False):
|
|||||||
'rtmp_ffmpeg': 'rtmpF',
|
'rtmp_ffmpeg': 'rtmpF',
|
||||||
'http_dash_segments': 'dash',
|
'http_dash_segments': 'dash',
|
||||||
'http_dash_segments_generator': 'dashG',
|
'http_dash_segments_generator': 'dashG',
|
||||||
'niconico_dmc': 'dmc',
|
|
||||||
'websocket_frag': 'WSfrag',
|
'websocket_frag': 'WSfrag',
|
||||||
}
|
}
|
||||||
if simplify:
|
if simplify:
|
||||||
@@ -99,7 +99,7 @@ def _get_suitable_downloader(info_dict, protocol, params, default):
|
|||||||
if external_downloader is None:
|
if external_downloader is None:
|
||||||
if info_dict['to_stdout'] and FFmpegFD.can_merge_formats(info_dict, params):
|
if info_dict['to_stdout'] and FFmpegFD.can_merge_formats(info_dict, params):
|
||||||
return FFmpegFD
|
return FFmpegFD
|
||||||
elif external_downloader.lower() != 'native':
|
elif external_downloader.lower() != 'native' and info_dict.get('impersonate') is None:
|
||||||
ed = get_external_downloader(external_downloader)
|
ed = get_external_downloader(external_downloader)
|
||||||
if ed.can_download(info_dict, external_downloader):
|
if ed.can_download(info_dict, external_downloader):
|
||||||
return ed
|
return ed
|
||||||
|
|||||||
50
plugins/youtube_download/yt_dlp/downloader/bunnycdn.py
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
import hashlib
|
||||||
|
import random
|
||||||
|
import threading
|
||||||
|
|
||||||
|
from .common import FileDownloader
|
||||||
|
from . import HlsFD
|
||||||
|
from ..networking import Request
|
||||||
|
from ..networking.exceptions import network_exceptions
|
||||||
|
|
||||||
|
|
||||||
|
class BunnyCdnFD(FileDownloader):
|
||||||
|
"""
|
||||||
|
Downloads from BunnyCDN with required pings
|
||||||
|
Note, this is not a part of public API, and will be removed without notice.
|
||||||
|
DO NOT USE
|
||||||
|
"""
|
||||||
|
|
||||||
|
def real_download(self, filename, info_dict):
|
||||||
|
self.to_screen(f'[{self.FD_NAME}] Downloading from BunnyCDN')
|
||||||
|
|
||||||
|
fd = HlsFD(self.ydl, self.params)
|
||||||
|
|
||||||
|
stop_event = threading.Event()
|
||||||
|
ping_thread = threading.Thread(target=self.ping_thread, args=(stop_event,), kwargs=info_dict['_bunnycdn_ping_data'])
|
||||||
|
ping_thread.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
return fd.real_download(filename, info_dict)
|
||||||
|
finally:
|
||||||
|
stop_event.set()
|
||||||
|
|
||||||
|
def ping_thread(self, stop_event, url, headers, secret, context_id):
|
||||||
|
# Site sends ping every 4 seconds, but this throttles the download. Pinging every 2 seconds seems to work.
|
||||||
|
ping_interval = 2
|
||||||
|
# Hard coded resolution as it doesn't seem to matter
|
||||||
|
res = 1080
|
||||||
|
paused = 'false'
|
||||||
|
current_time = 0
|
||||||
|
|
||||||
|
while not stop_event.wait(ping_interval):
|
||||||
|
current_time += ping_interval
|
||||||
|
|
||||||
|
time = current_time + round(random.random(), 6)
|
||||||
|
md5_hash = hashlib.md5(f'{secret}_{context_id}_{time}_{paused}_{res}'.encode()).hexdigest()
|
||||||
|
ping_url = f'{url}?hash={md5_hash}&time={time}&paused={paused}&resolution={res}'
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.ydl.urlopen(Request(ping_url, headers=headers)).read()
|
||||||
|
except network_exceptions as e:
|
||||||
|
self.to_screen(f'[{self.FD_NAME}] Ping failed: {e}')
|
||||||
@@ -4,6 +4,7 @@ import functools
|
|||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from ..minicurses import (
|
from ..minicurses import (
|
||||||
@@ -19,9 +20,7 @@ from ..utils import (
|
|||||||
Namespace,
|
Namespace,
|
||||||
RetryManager,
|
RetryManager,
|
||||||
classproperty,
|
classproperty,
|
||||||
decodeArgument,
|
|
||||||
deprecation_warning,
|
deprecation_warning,
|
||||||
encodeFilename,
|
|
||||||
format_bytes,
|
format_bytes,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
parse_bytes,
|
parse_bytes,
|
||||||
@@ -32,6 +31,7 @@ from ..utils import (
|
|||||||
timetuple_from_msec,
|
timetuple_from_msec,
|
||||||
try_call,
|
try_call,
|
||||||
)
|
)
|
||||||
|
from ..utils._utils import _ProgressState
|
||||||
|
|
||||||
|
|
||||||
class FileDownloader:
|
class FileDownloader:
|
||||||
@@ -62,7 +62,7 @@ class FileDownloader:
|
|||||||
test: Download only first bytes to test the downloader.
|
test: Download only first bytes to test the downloader.
|
||||||
min_filesize: Skip files smaller than this size
|
min_filesize: Skip files smaller than this size
|
||||||
max_filesize: Skip files larger than this size
|
max_filesize: Skip files larger than this size
|
||||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
progress_delta: The minimum time between progress output, in seconds
|
||||||
external_downloader_args: A dictionary of downloader keys (in lower case)
|
external_downloader_args: A dictionary of downloader keys (in lower case)
|
||||||
and a list of additional command-line arguments for the
|
and a list of additional command-line arguments for the
|
||||||
executable. Use 'default' as the name for arguments to be
|
executable. Use 'default' as the name for arguments to be
|
||||||
@@ -88,6 +88,9 @@ class FileDownloader:
|
|||||||
self.params = params
|
self.params = params
|
||||||
self._prepare_multiline_status()
|
self._prepare_multiline_status()
|
||||||
self.add_progress_hook(self.report_progress)
|
self.add_progress_hook(self.report_progress)
|
||||||
|
if self.params.get('progress_delta'):
|
||||||
|
self._progress_delta_lock = threading.Lock()
|
||||||
|
self._progress_delta_time = time.monotonic()
|
||||||
|
|
||||||
def _set_ydl(self, ydl):
|
def _set_ydl(self, ydl):
|
||||||
self.ydl = ydl
|
self.ydl = ydl
|
||||||
@@ -214,7 +217,7 @@ class FileDownloader:
|
|||||||
def temp_name(self, filename):
|
def temp_name(self, filename):
|
||||||
"""Returns a temporary filename for the given filename."""
|
"""Returns a temporary filename for the given filename."""
|
||||||
if self.params.get('nopart', False) or filename == '-' or \
|
if self.params.get('nopart', False) or filename == '-' or \
|
||||||
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
(os.path.exists(filename) and not os.path.isfile(filename)):
|
||||||
return filename
|
return filename
|
||||||
return filename + '.part'
|
return filename + '.part'
|
||||||
|
|
||||||
@@ -268,7 +271,7 @@ class FileDownloader:
|
|||||||
"""Try to set the last-modified time of the given file."""
|
"""Try to set the last-modified time of the given file."""
|
||||||
if last_modified_hdr is None:
|
if last_modified_hdr is None:
|
||||||
return
|
return
|
||||||
if not os.path.isfile(encodeFilename(filename)):
|
if not os.path.isfile(filename):
|
||||||
return
|
return
|
||||||
timestr = last_modified_hdr
|
timestr = last_modified_hdr
|
||||||
if timestr is None:
|
if timestr is None:
|
||||||
@@ -330,7 +333,7 @@ class FileDownloader:
|
|||||||
progress_dict), s.get('progress_idx') or 0)
|
progress_dict), s.get('progress_idx') or 0)
|
||||||
self.to_console_title(self.ydl.evaluate_outtmpl(
|
self.to_console_title(self.ydl.evaluate_outtmpl(
|
||||||
progress_template.get('download-title') or 'yt-dlp %(progress._default_template)s',
|
progress_template.get('download-title') or 'yt-dlp %(progress._default_template)s',
|
||||||
progress_dict))
|
progress_dict), _ProgressState.from_dict(s), s.get('_percent'))
|
||||||
|
|
||||||
def _format_progress(self, *args, **kwargs):
|
def _format_progress(self, *args, **kwargs):
|
||||||
return self.ydl._format_text(
|
return self.ydl._format_text(
|
||||||
@@ -354,6 +357,7 @@ class FileDownloader:
|
|||||||
'_speed_str': self.format_speed(speed).strip(),
|
'_speed_str': self.format_speed(speed).strip(),
|
||||||
'_total_bytes_str': _format_bytes('total_bytes'),
|
'_total_bytes_str': _format_bytes('total_bytes'),
|
||||||
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
||||||
|
'_percent': 100.0,
|
||||||
'_percent_str': self.format_percent(100),
|
'_percent_str': self.format_percent(100),
|
||||||
})
|
})
|
||||||
self._report_progress_status(s, join_nonempty(
|
self._report_progress_status(s, join_nonempty(
|
||||||
@@ -366,13 +370,21 @@ class FileDownloader:
|
|||||||
if s['status'] != 'downloading':
|
if s['status'] != 'downloading':
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if update_delta := self.params.get('progress_delta'):
|
||||||
|
with self._progress_delta_lock:
|
||||||
|
if time.monotonic() < self._progress_delta_time:
|
||||||
|
return
|
||||||
|
self._progress_delta_time += update_delta
|
||||||
|
|
||||||
|
progress = try_call(
|
||||||
|
lambda: 100 * s['downloaded_bytes'] / s['total_bytes'],
|
||||||
|
lambda: 100 * s['downloaded_bytes'] / s['total_bytes_estimate'],
|
||||||
|
lambda: s['downloaded_bytes'] == 0 and 0)
|
||||||
s.update({
|
s.update({
|
||||||
'_eta_str': self.format_eta(s.get('eta')).strip(),
|
'_eta_str': self.format_eta(s.get('eta')).strip(),
|
||||||
'_speed_str': self.format_speed(s.get('speed')),
|
'_speed_str': self.format_speed(s.get('speed')),
|
||||||
'_percent_str': self.format_percent(try_call(
|
'_percent': progress,
|
||||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes'],
|
'_percent_str': self.format_percent(progress),
|
||||||
lambda: 100 * s['downloaded_bytes'] / s['total_bytes_estimate'],
|
|
||||||
lambda: s['downloaded_bytes'] == 0 and 0)),
|
|
||||||
'_total_bytes_str': _format_bytes('total_bytes'),
|
'_total_bytes_str': _format_bytes('total_bytes'),
|
||||||
'_total_bytes_estimate_str': _format_bytes('total_bytes_estimate'),
|
'_total_bytes_estimate_str': _format_bytes('total_bytes_estimate'),
|
||||||
'_downloaded_bytes_str': _format_bytes('downloaded_bytes'),
|
'_downloaded_bytes_str': _format_bytes('downloaded_bytes'),
|
||||||
@@ -393,7 +405,7 @@ class FileDownloader:
|
|||||||
|
|
||||||
def report_resuming_byte(self, resume_len):
|
def report_resuming_byte(self, resume_len):
|
||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
self.to_screen(f'[download] Resuming download at byte {resume_len}')
|
||||||
|
|
||||||
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
||||||
"""Report retry"""
|
"""Report retry"""
|
||||||
@@ -421,13 +433,13 @@ class FileDownloader:
|
|||||||
"""
|
"""
|
||||||
nooverwrites_and_exists = (
|
nooverwrites_and_exists = (
|
||||||
not self.params.get('overwrites', True)
|
not self.params.get('overwrites', True)
|
||||||
and os.path.exists(encodeFilename(filename))
|
and os.path.exists(filename)
|
||||||
)
|
)
|
||||||
|
|
||||||
if not hasattr(filename, 'write'):
|
if not hasattr(filename, 'write'):
|
||||||
continuedl_and_exists = (
|
continuedl_and_exists = (
|
||||||
self.params.get('continuedl', True)
|
self.params.get('continuedl', True)
|
||||||
and os.path.isfile(encodeFilename(filename))
|
and os.path.isfile(filename)
|
||||||
and not self.params.get('nopart', False)
|
and not self.params.get('nopart', False)
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -437,19 +449,32 @@ class FileDownloader:
|
|||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'filename': filename,
|
'filename': filename,
|
||||||
'status': 'finished',
|
'status': 'finished',
|
||||||
'total_bytes': os.path.getsize(encodeFilename(filename)),
|
'total_bytes': os.path.getsize(filename),
|
||||||
}, info_dict)
|
}, info_dict)
|
||||||
self._finish_multiline_status()
|
self._finish_multiline_status()
|
||||||
return True, False
|
return True, False
|
||||||
|
|
||||||
|
sleep_note = ''
|
||||||
if subtitle:
|
if subtitle:
|
||||||
sleep_interval = self.params.get('sleep_interval_subtitles') or 0
|
sleep_interval = self.params.get('sleep_interval_subtitles') or 0
|
||||||
else:
|
else:
|
||||||
min_sleep_interval = self.params.get('sleep_interval') or 0
|
min_sleep_interval = self.params.get('sleep_interval') or 0
|
||||||
|
max_sleep_interval = self.params.get('max_sleep_interval') or 0
|
||||||
|
|
||||||
|
requested_formats = info_dict.get('requested_formats') or [info_dict]
|
||||||
|
if available_at := max(f.get('available_at') or 0 for f in requested_formats):
|
||||||
|
forced_sleep_interval = available_at - int(time.time())
|
||||||
|
if forced_sleep_interval > min_sleep_interval:
|
||||||
|
sleep_note = 'as required by the site'
|
||||||
|
min_sleep_interval = forced_sleep_interval
|
||||||
|
if forced_sleep_interval > max_sleep_interval:
|
||||||
|
max_sleep_interval = forced_sleep_interval
|
||||||
|
|
||||||
sleep_interval = random.uniform(
|
sleep_interval = random.uniform(
|
||||||
min_sleep_interval, self.params.get('max_sleep_interval') or min_sleep_interval)
|
min_sleep_interval, max_sleep_interval or min_sleep_interval)
|
||||||
|
|
||||||
if sleep_interval > 0:
|
if sleep_interval > 0:
|
||||||
self.to_screen(f'[download] Sleeping {sleep_interval:.2f} seconds ...')
|
self.to_screen(f'[download] Sleeping {sleep_interval:.2f} seconds {sleep_note}...')
|
||||||
time.sleep(sleep_interval)
|
time.sleep(sleep_interval)
|
||||||
|
|
||||||
ret = self.real_download(filename, info_dict)
|
ret = self.real_download(filename, info_dict)
|
||||||
@@ -478,9 +503,18 @@ class FileDownloader:
|
|||||||
if not self.params.get('verbose', False):
|
if not self.params.get('verbose', False):
|
||||||
return
|
return
|
||||||
|
|
||||||
str_args = [decodeArgument(a) for a in args]
|
|
||||||
|
|
||||||
if exe is None:
|
if exe is None:
|
||||||
exe = os.path.basename(str_args[0])
|
exe = os.path.basename(args[0])
|
||||||
|
|
||||||
self.write_debug(f'{exe} command line: {shell_quote(str_args)}')
|
self.write_debug(f'{exe} command line: {shell_quote(args)}')
|
||||||
|
|
||||||
|
def _get_impersonate_target(self, info_dict):
|
||||||
|
impersonate = info_dict.get('impersonate')
|
||||||
|
if impersonate is None:
|
||||||
|
return None
|
||||||
|
available_target, requested_targets = self.ydl._parse_impersonate_targets(impersonate)
|
||||||
|
if available_target:
|
||||||
|
return available_target
|
||||||
|
elif requested_targets:
|
||||||
|
self.report_warning(self.ydl._unavailable_targets_message(requested_targets))
|
||||||
|
return None
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import urllib.parse
|
|||||||
|
|
||||||
from . import get_suitable_downloader
|
from . import get_suitable_downloader
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..utils import update_url_query, urljoin
|
from ..utils import ReExtractInfo, update_url_query, urljoin
|
||||||
|
|
||||||
|
|
||||||
class DashSegmentsFD(FragmentFD):
|
class DashSegmentsFD(FragmentFD):
|
||||||
@@ -15,16 +15,24 @@ class DashSegmentsFD(FragmentFD):
|
|||||||
FD_NAME = 'dashsegments'
|
FD_NAME = 'dashsegments'
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
if info_dict.get('is_live') and set(info_dict['protocol'].split('+')) != {'http_dash_segments_generator'}:
|
if 'http_dash_segments_generator' in info_dict['protocol'].split('+'):
|
||||||
|
real_downloader = None # No external FD can support --live-from-start
|
||||||
|
else:
|
||||||
|
if info_dict.get('is_live'):
|
||||||
self.report_error('Live DASH videos are not supported')
|
self.report_error('Live DASH videos are not supported')
|
||||||
|
|
||||||
real_start = time.time()
|
|
||||||
real_downloader = get_suitable_downloader(
|
real_downloader = get_suitable_downloader(
|
||||||
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
info_dict, self.params, None, protocol='dash_frag_urls', to_stdout=(filename == '-'))
|
||||||
|
|
||||||
|
real_start = time.time()
|
||||||
|
|
||||||
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
requested_formats = [{**info_dict, **fmt} for fmt in info_dict.get('requested_formats', [])]
|
||||||
args = []
|
args = []
|
||||||
for fmt in requested_formats or [info_dict]:
|
for fmt in requested_formats or [info_dict]:
|
||||||
|
# Re-extract if --load-info-json is used and 'fragments' was originally a generator
|
||||||
|
# See https://github.com/yt-dlp/yt-dlp/issues/13906
|
||||||
|
if isinstance(fmt['fragments'], str):
|
||||||
|
raise ReExtractInfo('the stream needs to be re-extracted', expected=True)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
fragment_count = 1 if self.params.get('test') else len(fmt['fragments'])
|
fragment_count = 1 if self.params.get('test') else len(fmt['fragments'])
|
||||||
except TypeError:
|
except TypeError:
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
import enum
|
import enum
|
||||||
|
import functools
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@@ -9,7 +10,6 @@ import time
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import functools
|
|
||||||
from ..networking import Request
|
from ..networking import Request
|
||||||
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@@ -23,7 +23,6 @@ from ..utils import (
|
|||||||
cli_valueless_option,
|
cli_valueless_option,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
encodeFilename,
|
|
||||||
find_available_port,
|
find_available_port,
|
||||||
remove_end,
|
remove_end,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
@@ -55,7 +54,7 @@ class ExternalFD(FragmentFD):
|
|||||||
# correct and expected termination thus all postprocessing
|
# correct and expected termination thus all postprocessing
|
||||||
# should take place
|
# should take place
|
||||||
retval = 0
|
retval = 0
|
||||||
self.to_screen('[%s] Interrupted by user' % self.get_basename())
|
self.to_screen(f'[{self.get_basename()}] Interrupted by user')
|
||||||
finally:
|
finally:
|
||||||
if self._cookies_tempfile:
|
if self._cookies_tempfile:
|
||||||
self.try_remove(self._cookies_tempfile)
|
self.try_remove(self._cookies_tempfile)
|
||||||
@@ -67,7 +66,7 @@ class ExternalFD(FragmentFD):
|
|||||||
'elapsed': time.time() - started,
|
'elapsed': time.time() - started,
|
||||||
}
|
}
|
||||||
if filename != '-':
|
if filename != '-':
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(tmpfilename)
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
status.update({
|
status.update({
|
||||||
'downloaded_bytes': fsize,
|
'downloaded_bytes': fsize,
|
||||||
@@ -108,7 +107,7 @@ class ExternalFD(FragmentFD):
|
|||||||
return all((
|
return all((
|
||||||
not info_dict.get('to_stdout') or Features.TO_STDOUT in cls.SUPPORTED_FEATURES,
|
not info_dict.get('to_stdout') or Features.TO_STDOUT in cls.SUPPORTED_FEATURES,
|
||||||
'+' not in info_dict['protocol'] or Features.MULTIPLE_FORMATS in cls.SUPPORTED_FEATURES,
|
'+' not in info_dict['protocol'] or Features.MULTIPLE_FORMATS in cls.SUPPORTED_FEATURES,
|
||||||
not traverse_obj(info_dict, ('hls_aes', ...), 'extra_param_to_segment_url'),
|
not traverse_obj(info_dict, ('hls_aes', ...), 'extra_param_to_segment_url', 'extra_param_to_key_url'),
|
||||||
all(proto in cls.SUPPORTED_PROTOCOLS for proto in info_dict['protocol'].split('+')),
|
all(proto in cls.SUPPORTED_PROTOCOLS for proto in info_dict['protocol'].split('+')),
|
||||||
))
|
))
|
||||||
|
|
||||||
@@ -172,7 +171,7 @@ class ExternalFD(FragmentFD):
|
|||||||
decrypt_fragment = self.decrypter(info_dict)
|
decrypt_fragment = self.decrypter(info_dict)
|
||||||
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
dest, _ = self.sanitize_open(tmpfilename, 'wb')
|
||||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||||
fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index)
|
fragment_filename = f'{tmpfilename}-Frag{frag_index}'
|
||||||
try:
|
try:
|
||||||
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
src, _ = self.sanitize_open(fragment_filename, 'rb')
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
@@ -184,9 +183,9 @@ class ExternalFD(FragmentFD):
|
|||||||
dest.write(decrypt_fragment(fragment, src.read()))
|
dest.write(decrypt_fragment(fragment, src.read()))
|
||||||
src.close()
|
src.close()
|
||||||
if not self.params.get('keep_fragments', False):
|
if not self.params.get('keep_fragments', False):
|
||||||
self.try_remove(encodeFilename(fragment_filename))
|
self.try_remove(fragment_filename)
|
||||||
dest.close()
|
dest.close()
|
||||||
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
self.try_remove(f'{tmpfilename}.frag.urls')
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def _call_process(self, cmd, info_dict):
|
def _call_process(self, cmd, info_dict):
|
||||||
@@ -335,12 +334,12 @@ class Aria2cFD(ExternalFD):
|
|||||||
cmd += ['--auto-file-renaming=false']
|
cmd += ['--auto-file-renaming=false']
|
||||||
|
|
||||||
if 'fragments' in info_dict:
|
if 'fragments' in info_dict:
|
||||||
cmd += ['--file-allocation=none', '--uri-selector=inorder']
|
cmd += ['--uri-selector=inorder']
|
||||||
url_list_file = '%s.frag.urls' % tmpfilename
|
url_list_file = f'{tmpfilename}.frag.urls'
|
||||||
url_list = []
|
url_list = []
|
||||||
for frag_index, fragment in enumerate(info_dict['fragments']):
|
for frag_index, fragment in enumerate(info_dict['fragments']):
|
||||||
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
|
fragment_filename = f'{os.path.basename(tmpfilename)}-Frag{frag_index}'
|
||||||
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
|
url_list.append('{}\n\tout={}'.format(fragment['url'], self._aria2c_filename(fragment_filename)))
|
||||||
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
stream, _ = self.sanitize_open(url_list_file, 'wb')
|
||||||
stream.write('\n'.join(url_list).encode())
|
stream.write('\n'.join(url_list).encode())
|
||||||
stream.close()
|
stream.close()
|
||||||
@@ -357,7 +356,7 @@ class Aria2cFD(ExternalFD):
|
|||||||
'id': sanitycheck,
|
'id': sanitycheck,
|
||||||
'method': method,
|
'method': method,
|
||||||
'params': [f'token:{rpc_secret}', *params],
|
'params': [f'token:{rpc_secret}', *params],
|
||||||
}).encode('utf-8')
|
}).encode()
|
||||||
request = Request(
|
request = Request(
|
||||||
f'http://localhost:{rpc_port}/jsonrpc',
|
f'http://localhost:{rpc_port}/jsonrpc',
|
||||||
data=d, headers={
|
data=d, headers={
|
||||||
@@ -416,7 +415,7 @@ class Aria2cFD(ExternalFD):
|
|||||||
'total_bytes_estimate': total,
|
'total_bytes_estimate': total,
|
||||||
'eta': (total - downloaded) / (speed or 1),
|
'eta': (total - downloaded) / (speed or 1),
|
||||||
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
||||||
'elapsed': time.time() - started
|
'elapsed': time.time() - started,
|
||||||
})
|
})
|
||||||
self._hook_progress(status, info_dict)
|
self._hook_progress(status, info_dict)
|
||||||
|
|
||||||
@@ -491,30 +490,16 @@ class FFmpegFD(ExternalFD):
|
|||||||
if not self.params.get('verbose'):
|
if not self.params.get('verbose'):
|
||||||
args += ['-hide_banner']
|
args += ['-hide_banner']
|
||||||
|
|
||||||
args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args'), default=[])
|
|
||||||
|
|
||||||
# These exists only for compatibility. Extractors should use
|
|
||||||
# info_dict['downloader_options']['ffmpeg_args'] instead
|
|
||||||
args += info_dict.get('_ffmpeg_args') or []
|
|
||||||
seekable = info_dict.get('_seekable')
|
|
||||||
if seekable is not None:
|
|
||||||
# setting -seekable prevents ffmpeg from guessing if the server
|
|
||||||
# supports seeking(by adding the header `Range: bytes=0-`), which
|
|
||||||
# can cause problems in some cases
|
|
||||||
# https://github.com/ytdl-org/youtube-dl/issues/11800#issuecomment-275037127
|
|
||||||
# http://trac.ffmpeg.org/ticket/6125#comment:10
|
|
||||||
args += ['-seekable', '1' if seekable else '0']
|
|
||||||
|
|
||||||
env = None
|
env = None
|
||||||
proxy = self.params.get('proxy')
|
proxy = self.params.get('proxy')
|
||||||
if proxy:
|
if proxy:
|
||||||
if not re.match(r'^[\da-zA-Z]+://', proxy):
|
if not re.match(r'[\da-zA-Z]+://', proxy):
|
||||||
proxy = 'http://%s' % proxy
|
proxy = f'http://{proxy}'
|
||||||
|
|
||||||
if proxy.startswith('socks'):
|
if proxy.startswith('socks'):
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'%s does not support SOCKS proxies. Downloading is likely to fail. '
|
f'{self.get_basename()} does not support SOCKS proxies. Downloading is likely to fail. '
|
||||||
'Consider adding --hls-prefer-native to your command.' % self.get_basename())
|
'Consider adding --hls-prefer-native to your command.')
|
||||||
|
|
||||||
# Since December 2015 ffmpeg supports -http_proxy option (see
|
# Since December 2015 ffmpeg supports -http_proxy option (see
|
||||||
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
|
# http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd)
|
||||||
@@ -524,17 +509,39 @@ class FFmpegFD(ExternalFD):
|
|||||||
env['HTTP_PROXY'] = proxy
|
env['HTTP_PROXY'] = proxy
|
||||||
env['http_proxy'] = proxy
|
env['http_proxy'] = proxy
|
||||||
|
|
||||||
protocol = info_dict.get('protocol')
|
start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end')
|
||||||
|
|
||||||
|
fallback_input_args = traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args', ...))
|
||||||
|
|
||||||
|
selected_formats = info_dict.get('requested_formats') or [info_dict]
|
||||||
|
for i, fmt in enumerate(selected_formats):
|
||||||
|
is_http = re.match(r'https?://', fmt['url'])
|
||||||
|
cookies = self.ydl.cookiejar.get_cookies_for_url(fmt['url']) if is_http else []
|
||||||
|
if cookies:
|
||||||
|
args.extend(['-cookies', ''.join(
|
||||||
|
f'{cookie.name}={cookie.value}; path={cookie.path}; domain={cookie.domain};\r\n'
|
||||||
|
for cookie in cookies)])
|
||||||
|
if fmt.get('http_headers') and is_http:
|
||||||
|
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg:
|
||||||
|
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||||
|
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in fmt['http_headers'].items())])
|
||||||
|
|
||||||
|
if start_time:
|
||||||
|
args += ['-ss', str(start_time)]
|
||||||
|
if end_time:
|
||||||
|
args += ['-t', str(end_time - start_time)]
|
||||||
|
|
||||||
|
protocol = fmt.get('protocol')
|
||||||
|
|
||||||
if protocol == 'rtmp':
|
if protocol == 'rtmp':
|
||||||
player_url = info_dict.get('player_url')
|
player_url = fmt.get('player_url')
|
||||||
page_url = info_dict.get('page_url')
|
page_url = fmt.get('page_url')
|
||||||
app = info_dict.get('app')
|
app = fmt.get('app')
|
||||||
play_path = info_dict.get('play_path')
|
play_path = fmt.get('play_path')
|
||||||
tc_url = info_dict.get('tc_url')
|
tc_url = fmt.get('tc_url')
|
||||||
flash_version = info_dict.get('flash_version')
|
flash_version = fmt.get('flash_version')
|
||||||
live = info_dict.get('rtmp_live', False)
|
live = fmt.get('rtmp_live', False)
|
||||||
conn = info_dict.get('rtmp_conn')
|
conn = fmt.get('rtmp_conn')
|
||||||
if player_url is not None:
|
if player_url is not None:
|
||||||
args += ['-rtmp_swfverify', player_url]
|
args += ['-rtmp_swfverify', player_url]
|
||||||
if page_url is not None:
|
if page_url is not None:
|
||||||
@@ -555,27 +562,29 @@ class FFmpegFD(ExternalFD):
|
|||||||
elif isinstance(conn, str):
|
elif isinstance(conn, str):
|
||||||
args += ['-rtmp_conn', conn]
|
args += ['-rtmp_conn', conn]
|
||||||
|
|
||||||
start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end')
|
elif protocol == 'http_dash_segments' and info_dict.get('is_live'):
|
||||||
|
# ffmpeg may try to read past the latest available segments for
|
||||||
|
# live DASH streams unless we pass `-re`. In modern ffmpeg, this
|
||||||
|
# is an alias of `-readrate 1`, but `-readrate` was not added
|
||||||
|
# until ffmpeg 5.0, so we must stick to using `-re`
|
||||||
|
args += ['-re']
|
||||||
|
|
||||||
selected_formats = info_dict.get('requested_formats') or [info_dict]
|
url = fmt['url']
|
||||||
for i, fmt in enumerate(selected_formats):
|
if self.params.get('enable_file_urls') and url.startswith('file:'):
|
||||||
is_http = re.match(r'^https?://', fmt['url'])
|
# The default protocol_whitelist is 'file,crypto,data' when reading local m3u8 URLs,
|
||||||
cookies = self.ydl.cookiejar.get_cookies_for_url(fmt['url']) if is_http else []
|
# so only local segments can be read unless we also include 'http,https,tcp,tls'
|
||||||
if cookies:
|
args += ['-protocol_whitelist', 'file,crypto,data,http,https,tcp,tls']
|
||||||
args.extend(['-cookies', ''.join(
|
# ffmpeg incorrectly handles 'file:' URLs by only removing the
|
||||||
f'{cookie.name}={cookie.value}; path={cookie.path}; domain={cookie.domain};\r\n'
|
# 'file:' prefix and treating the rest as if it's a normal filepath.
|
||||||
for cookie in cookies)])
|
# FFmpegPostProcessor also depends on this behavior, so we need to fixup the URLs:
|
||||||
if fmt.get('http_headers') and is_http:
|
# - On Windows/Cygwin, replace 'file:///' and 'file://localhost/' with 'file:'
|
||||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
# - On *nix, replace 'file://localhost/' with 'file:/'
|
||||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
# Ref: https://github.com/yt-dlp/yt-dlp/issues/13781
|
||||||
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in fmt['http_headers'].items())])
|
# https://trac.ffmpeg.org/ticket/2702
|
||||||
|
url = re.sub(r'^file://(?:localhost)?/', 'file:' if os.name == 'nt' else 'file:/', url)
|
||||||
|
|
||||||
if start_time:
|
args += traverse_obj(fmt, ('downloader_options', 'ffmpeg_args', ...)) or fallback_input_args
|
||||||
args += ['-ss', str(start_time)]
|
args += [*self._configuration_args((f'_i{i + 1}', '_i')), '-i', url]
|
||||||
if end_time:
|
|
||||||
args += ['-t', str(end_time - start_time)]
|
|
||||||
|
|
||||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']]
|
|
||||||
|
|
||||||
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
||||||
args += ['-c', 'copy']
|
args += ['-c', 'copy']
|
||||||
@@ -615,10 +624,12 @@ class FFmpegFD(ExternalFD):
|
|||||||
else:
|
else:
|
||||||
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
args += ['-f', EXT_TO_OUT_FORMATS.get(ext, ext)]
|
||||||
|
|
||||||
|
args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args_out', ...))
|
||||||
|
|
||||||
args += self._configuration_args(('_o1', '_o', ''))
|
args += self._configuration_args(('_o1', '_o', ''))
|
||||||
|
|
||||||
args = [encodeArgument(opt) for opt in args]
|
args = [encodeArgument(opt) for opt in args]
|
||||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
args.append(ffpp._ffmpeg_filename_argument(tmpfilename))
|
||||||
self._debug_cmd(args)
|
self._debug_cmd(args)
|
||||||
|
|
||||||
piped = any(fmt['url'] in ('-', 'pipe:') for fmt in selected_formats)
|
piped = any(fmt['url'] in ('-', 'pipe:') for fmt in selected_formats)
|
||||||
@@ -641,10 +652,6 @@ class FFmpegFD(ExternalFD):
|
|||||||
return retval
|
return retval
|
||||||
|
|
||||||
|
|
||||||
class AVconvFD(FFmpegFD):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
_BY_NAME = {
|
_BY_NAME = {
|
||||||
klass.get_basename(): klass
|
klass.get_basename(): klass
|
||||||
for name, klass in globals().items()
|
for name, klass in globals().items()
|
||||||
|
|||||||
@@ -67,12 +67,12 @@ class FlvReader(io.BytesIO):
|
|||||||
self.read_bytes(3)
|
self.read_bytes(3)
|
||||||
quality_entry_count = self.read_unsigned_char()
|
quality_entry_count = self.read_unsigned_char()
|
||||||
# QualityEntryCount
|
# QualityEntryCount
|
||||||
for i in range(quality_entry_count):
|
for _ in range(quality_entry_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
|
|
||||||
segment_run_count = self.read_unsigned_int()
|
segment_run_count = self.read_unsigned_int()
|
||||||
segments = []
|
segments = []
|
||||||
for i in range(segment_run_count):
|
for _ in range(segment_run_count):
|
||||||
first_segment = self.read_unsigned_int()
|
first_segment = self.read_unsigned_int()
|
||||||
fragments_per_segment = self.read_unsigned_int()
|
fragments_per_segment = self.read_unsigned_int()
|
||||||
segments.append((first_segment, fragments_per_segment))
|
segments.append((first_segment, fragments_per_segment))
|
||||||
@@ -91,12 +91,12 @@ class FlvReader(io.BytesIO):
|
|||||||
|
|
||||||
quality_entry_count = self.read_unsigned_char()
|
quality_entry_count = self.read_unsigned_char()
|
||||||
# QualitySegmentUrlModifiers
|
# QualitySegmentUrlModifiers
|
||||||
for i in range(quality_entry_count):
|
for _ in range(quality_entry_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
|
|
||||||
fragments_count = self.read_unsigned_int()
|
fragments_count = self.read_unsigned_int()
|
||||||
fragments = []
|
fragments = []
|
||||||
for i in range(fragments_count):
|
for _ in range(fragments_count):
|
||||||
first = self.read_unsigned_int()
|
first = self.read_unsigned_int()
|
||||||
first_ts = self.read_unsigned_long_long()
|
first_ts = self.read_unsigned_long_long()
|
||||||
duration = self.read_unsigned_int()
|
duration = self.read_unsigned_int()
|
||||||
@@ -135,11 +135,11 @@ class FlvReader(io.BytesIO):
|
|||||||
self.read_string() # MovieIdentifier
|
self.read_string() # MovieIdentifier
|
||||||
server_count = self.read_unsigned_char()
|
server_count = self.read_unsigned_char()
|
||||||
# ServerEntryTable
|
# ServerEntryTable
|
||||||
for i in range(server_count):
|
for _ in range(server_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
quality_count = self.read_unsigned_char()
|
quality_count = self.read_unsigned_char()
|
||||||
# QualityEntryTable
|
# QualityEntryTable
|
||||||
for i in range(quality_count):
|
for _ in range(quality_count):
|
||||||
self.read_string()
|
self.read_string()
|
||||||
# DrmData
|
# DrmData
|
||||||
self.read_string()
|
self.read_string()
|
||||||
@@ -148,15 +148,15 @@ class FlvReader(io.BytesIO):
|
|||||||
|
|
||||||
segments_count = self.read_unsigned_char()
|
segments_count = self.read_unsigned_char()
|
||||||
segments = []
|
segments = []
|
||||||
for i in range(segments_count):
|
for _ in range(segments_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
_box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'asrt'
|
assert box_type == b'asrt'
|
||||||
segment = FlvReader(box_data).read_asrt()
|
segment = FlvReader(box_data).read_asrt()
|
||||||
segments.append(segment)
|
segments.append(segment)
|
||||||
fragments_run_count = self.read_unsigned_char()
|
fragments_run_count = self.read_unsigned_char()
|
||||||
fragments = []
|
fragments = []
|
||||||
for i in range(fragments_run_count):
|
for _ in range(fragments_run_count):
|
||||||
box_size, box_type, box_data = self.read_box_info()
|
_box_size, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'afrt'
|
assert box_type == b'afrt'
|
||||||
fragments.append(FlvReader(box_data).read_afrt())
|
fragments.append(FlvReader(box_data).read_afrt())
|
||||||
|
|
||||||
@@ -167,7 +167,7 @@ class FlvReader(io.BytesIO):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def read_bootstrap_info(self):
|
def read_bootstrap_info(self):
|
||||||
total_size, box_type, box_data = self.read_box_info()
|
_, box_type, box_data = self.read_box_info()
|
||||||
assert box_type == b'abst'
|
assert box_type == b'abst'
|
||||||
return FlvReader(box_data).read_abst()
|
return FlvReader(box_data).read_abst()
|
||||||
|
|
||||||
@@ -309,7 +309,7 @@ class F4mFD(FragmentFD):
|
|||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
man_url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
requested_bitrate = info_dict.get('tbr')
|
requested_bitrate = info_dict.get('tbr')
|
||||||
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
self.to_screen(f'[{self.FD_NAME}] Downloading f4m manifest')
|
||||||
|
|
||||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||||
man_url = urlh.url
|
man_url = urlh.url
|
||||||
@@ -324,10 +324,10 @@ class F4mFD(FragmentFD):
|
|||||||
if requested_bitrate is None or len(formats) == 1:
|
if requested_bitrate is None or len(formats) == 1:
|
||||||
# get the best format
|
# get the best format
|
||||||
formats = sorted(formats, key=lambda f: f[0])
|
formats = sorted(formats, key=lambda f: f[0])
|
||||||
rate, media = formats[-1]
|
_, media = formats[-1]
|
||||||
else:
|
else:
|
||||||
rate, media = list(filter(
|
_, media = next(filter(
|
||||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
lambda f: int(f[0]) == requested_bitrate, formats))
|
||||||
|
|
||||||
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
|
||||||
man_base_url = get_base_url(doc) or man_url
|
man_base_url = get_base_url(doc) or man_url
|
||||||
|
|||||||
@@ -9,11 +9,11 @@ import time
|
|||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||||
from ..compat import compat_os_name
|
|
||||||
from ..networking import Request
|
from ..networking import Request
|
||||||
from ..networking.exceptions import HTTPError, IncompleteRead
|
from ..networking.exceptions import HTTPError, IncompleteRead
|
||||||
from ..utils import DownloadError, RetryManager, encodeFilename, traverse_obj
|
from ..utils import DownloadError, RetryManager, traverse_obj
|
||||||
from ..utils.networking import HTTPHeaderDict
|
from ..utils.networking import HTTPHeaderDict
|
||||||
|
from ..utils.progress import ProgressCalculator
|
||||||
|
|
||||||
|
|
||||||
class HttpQuietDownloader(HttpFD):
|
class HttpQuietDownloader(HttpFD):
|
||||||
@@ -151,7 +151,7 @@ class FragmentFD(FileDownloader):
|
|||||||
if self.__do_ytdl_file(ctx):
|
if self.__do_ytdl_file(ctx):
|
||||||
self._write_ytdl_file(ctx)
|
self._write_ytdl_file(ctx)
|
||||||
if not self.params.get('keep_fragments', False):
|
if not self.params.get('keep_fragments', False):
|
||||||
self.try_remove(encodeFilename(ctx['fragment_filename_sanitized']))
|
self.try_remove(ctx['fragment_filename_sanitized'])
|
||||||
del ctx['fragment_filename_sanitized']
|
del ctx['fragment_filename_sanitized']
|
||||||
|
|
||||||
def _prepare_frag_download(self, ctx):
|
def _prepare_frag_download(self, ctx):
|
||||||
@@ -187,7 +187,7 @@ class FragmentFD(FileDownloader):
|
|||||||
})
|
})
|
||||||
|
|
||||||
if self.__do_ytdl_file(ctx):
|
if self.__do_ytdl_file(ctx):
|
||||||
ytdl_file_exists = os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename'])))
|
ytdl_file_exists = os.path.isfile(self.ytdl_filename(ctx['filename']))
|
||||||
continuedl = self.params.get('continuedl', True)
|
continuedl = self.params.get('continuedl', True)
|
||||||
if continuedl and ytdl_file_exists:
|
if continuedl and ytdl_file_exists:
|
||||||
self._read_ytdl_file(ctx)
|
self._read_ytdl_file(ctx)
|
||||||
@@ -198,7 +198,7 @@ class FragmentFD(FileDownloader):
|
|||||||
'.ytdl file is corrupt' if is_corrupt else
|
'.ytdl file is corrupt' if is_corrupt else
|
||||||
'Inconsistent state of incomplete fragment download')
|
'Inconsistent state of incomplete fragment download')
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'%s. Restarting from the beginning ...' % message)
|
f'{message}. Restarting from the beginning ...')
|
||||||
ctx['fragment_index'] = resume_len = 0
|
ctx['fragment_index'] = resume_len = 0
|
||||||
if 'ytdl_corrupt' in ctx:
|
if 'ytdl_corrupt' in ctx:
|
||||||
del ctx['ytdl_corrupt']
|
del ctx['ytdl_corrupt']
|
||||||
@@ -226,8 +226,7 @@ class FragmentFD(FileDownloader):
|
|||||||
resume_len = ctx['complete_frags_downloaded_bytes']
|
resume_len = ctx['complete_frags_downloaded_bytes']
|
||||||
total_frags = ctx['total_frags']
|
total_frags = ctx['total_frags']
|
||||||
ctx_id = ctx.get('ctx_id')
|
ctx_id = ctx.get('ctx_id')
|
||||||
# This dict stores the download progress, it's updated by the progress
|
# Stores the download progress, updated by the progress hook
|
||||||
# hook
|
|
||||||
state = {
|
state = {
|
||||||
'status': 'downloading',
|
'status': 'downloading',
|
||||||
'downloaded_bytes': resume_len,
|
'downloaded_bytes': resume_len,
|
||||||
@@ -237,14 +236,8 @@ class FragmentFD(FileDownloader):
|
|||||||
'tmpfilename': ctx['tmpfilename'],
|
'tmpfilename': ctx['tmpfilename'],
|
||||||
}
|
}
|
||||||
|
|
||||||
start = time.time()
|
ctx['started'] = time.time()
|
||||||
ctx.update({
|
progress = ProgressCalculator(resume_len)
|
||||||
'started': start,
|
|
||||||
'fragment_started': start,
|
|
||||||
# Amount of fragment's bytes downloaded by the time of the previous
|
|
||||||
# frag progress hook invocation
|
|
||||||
'prev_frag_downloaded_bytes': 0,
|
|
||||||
})
|
|
||||||
|
|
||||||
def frag_progress_hook(s):
|
def frag_progress_hook(s):
|
||||||
if s['status'] not in ('downloading', 'finished'):
|
if s['status'] not in ('downloading', 'finished'):
|
||||||
@@ -259,38 +252,35 @@ class FragmentFD(FileDownloader):
|
|||||||
state['max_progress'] = ctx.get('max_progress')
|
state['max_progress'] = ctx.get('max_progress')
|
||||||
state['progress_idx'] = ctx.get('progress_idx')
|
state['progress_idx'] = ctx.get('progress_idx')
|
||||||
|
|
||||||
time_now = time.time()
|
state['elapsed'] = progress.elapsed
|
||||||
state['elapsed'] = time_now - start
|
|
||||||
frag_total_bytes = s.get('total_bytes') or 0
|
frag_total_bytes = s.get('total_bytes') or 0
|
||||||
s['fragment_info_dict'] = s.pop('info_dict', {})
|
s['fragment_info_dict'] = s.pop('info_dict', {})
|
||||||
|
|
||||||
|
# XXX: Fragment resume is not accounted for here
|
||||||
if not ctx['live']:
|
if not ctx['live']:
|
||||||
estimated_size = (
|
estimated_size = (
|
||||||
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
(ctx['complete_frags_downloaded_bytes'] + frag_total_bytes)
|
||||||
/ (state['fragment_index'] + 1) * total_frags)
|
/ (state['fragment_index'] + 1) * total_frags)
|
||||||
state['total_bytes_estimate'] = estimated_size
|
progress.total = estimated_size
|
||||||
|
progress.update(s.get('downloaded_bytes'))
|
||||||
|
state['total_bytes_estimate'] = progress.total
|
||||||
|
else:
|
||||||
|
progress.update(s.get('downloaded_bytes'))
|
||||||
|
|
||||||
if s['status'] == 'finished':
|
if s['status'] == 'finished':
|
||||||
state['fragment_index'] += 1
|
state['fragment_index'] += 1
|
||||||
ctx['fragment_index'] = state['fragment_index']
|
ctx['fragment_index'] = state['fragment_index']
|
||||||
state['downloaded_bytes'] += frag_total_bytes - ctx['prev_frag_downloaded_bytes']
|
progress.thread_reset()
|
||||||
ctx['complete_frags_downloaded_bytes'] = state['downloaded_bytes']
|
|
||||||
ctx['speed'] = state['speed'] = self.calc_speed(
|
state['downloaded_bytes'] = ctx['complete_frags_downloaded_bytes'] = progress.downloaded
|
||||||
ctx['fragment_started'], time_now, frag_total_bytes)
|
state['speed'] = ctx['speed'] = progress.speed.smooth
|
||||||
ctx['fragment_started'] = time.time()
|
state['eta'] = progress.eta.smooth
|
||||||
ctx['prev_frag_downloaded_bytes'] = 0
|
|
||||||
else:
|
|
||||||
frag_downloaded_bytes = s['downloaded_bytes']
|
|
||||||
state['downloaded_bytes'] += frag_downloaded_bytes - ctx['prev_frag_downloaded_bytes']
|
|
||||||
ctx['speed'] = state['speed'] = self.calc_speed(
|
|
||||||
ctx['fragment_started'], time_now, frag_downloaded_bytes - ctx.get('frag_resume_len', 0))
|
|
||||||
if not ctx['live']:
|
|
||||||
state['eta'] = self.calc_eta(state['speed'], estimated_size - state['downloaded_bytes'])
|
|
||||||
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
|
||||||
self._hook_progress(state, info_dict)
|
self._hook_progress(state, info_dict)
|
||||||
|
|
||||||
ctx['dl'].add_progress_hook(frag_progress_hook)
|
ctx['dl'].add_progress_hook(frag_progress_hook)
|
||||||
|
|
||||||
return start
|
return ctx['started']
|
||||||
|
|
||||||
def _finish_frag_download(self, ctx, info_dict):
|
def _finish_frag_download(self, ctx, info_dict):
|
||||||
ctx['dest_stream'].close()
|
ctx['dest_stream'].close()
|
||||||
@@ -312,7 +302,7 @@ class FragmentFD(FileDownloader):
|
|||||||
elif to_file:
|
elif to_file:
|
||||||
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
self.try_rename(ctx['tmpfilename'], ctx['filename'])
|
||||||
filetime = ctx.get('fragment_filetime')
|
filetime = ctx.get('fragment_filetime')
|
||||||
if self.params.get('updatetime', True) and filetime:
|
if self.params.get('updatetime') and filetime:
|
||||||
with contextlib.suppress(Exception):
|
with contextlib.suppress(Exception):
|
||||||
os.utime(ctx['filename'], (time.time(), filetime))
|
os.utime(ctx['filename'], (time.time(), filetime))
|
||||||
|
|
||||||
@@ -375,10 +365,10 @@ class FragmentFD(FileDownloader):
|
|||||||
return decrypt_fragment
|
return decrypt_fragment
|
||||||
|
|
||||||
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
def download_and_append_fragments_multiple(self, *args, **kwargs):
|
||||||
'''
|
"""
|
||||||
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
@params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ...
|
||||||
all args must be either tuple or list
|
all args must be either tuple or list
|
||||||
'''
|
"""
|
||||||
interrupt_trigger = [True]
|
interrupt_trigger = [True]
|
||||||
max_progress = len(args)
|
max_progress = len(args)
|
||||||
if max_progress == 1:
|
if max_progress == 1:
|
||||||
@@ -399,7 +389,7 @@ class FragmentFD(FileDownloader):
|
|||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
if os.name == 'nt':
|
||||||
def future_result(future):
|
def future_result(future):
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@@ -433,7 +423,7 @@ class FragmentFD(FileDownloader):
|
|||||||
finally:
|
finally:
|
||||||
tpe.shutdown(wait=True)
|
tpe.shutdown(wait=True)
|
||||||
if not interrupt_trigger[0] and not is_live:
|
if not interrupt_trigger[0] and not is_live:
|
||||||
raise KeyboardInterrupt()
|
raise KeyboardInterrupt
|
||||||
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
# we expect the user wants to stop and DO WANT the preceding postprocessors to run;
|
||||||
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
# so returning a intermediate result here instead of KeyboardInterrupt on live
|
||||||
return result
|
return result
|
||||||
@@ -500,7 +490,6 @@ class FragmentFD(FileDownloader):
|
|||||||
download_fragment(fragment, ctx_copy)
|
download_fragment(fragment, ctx_copy)
|
||||||
return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized')
|
return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized')
|
||||||
|
|
||||||
self.report_warning('The download speed shown is only of one thread. This is a known issue')
|
|
||||||
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool:
|
||||||
try:
|
try:
|
||||||
for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments):
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ from ..utils import (
|
|||||||
update_url_query,
|
update_url_query,
|
||||||
urljoin,
|
urljoin,
|
||||||
)
|
)
|
||||||
|
from ..utils._utils import _request_dump_filename
|
||||||
|
|
||||||
|
|
||||||
class HlsFD(FragmentFD):
|
class HlsFD(FragmentFD):
|
||||||
@@ -72,21 +73,40 @@ class HlsFD(FragmentFD):
|
|||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
man_url = info_dict['url']
|
man_url = info_dict['url']
|
||||||
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
|
||||||
|
|
||||||
|
s = info_dict.get('hls_media_playlist_data')
|
||||||
|
if s:
|
||||||
|
self.to_screen(f'[{self.FD_NAME}] Using m3u8 manifest from extracted info')
|
||||||
|
else:
|
||||||
|
self.to_screen(f'[{self.FD_NAME}] Downloading m3u8 manifest')
|
||||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||||
man_url = urlh.url
|
man_url = urlh.url
|
||||||
s = urlh.read().decode('utf-8', 'ignore')
|
s_bytes = urlh.read()
|
||||||
|
if self.params.get('write_pages'):
|
||||||
|
dump_filename = _request_dump_filename(
|
||||||
|
man_url, info_dict['id'], None,
|
||||||
|
trim_length=self.params.get('trim_file_name'))
|
||||||
|
self.to_screen(f'[{self.FD_NAME}] Saving request to {dump_filename}')
|
||||||
|
with open(dump_filename, 'wb') as outf:
|
||||||
|
outf.write(s_bytes)
|
||||||
|
s = s_bytes.decode('utf-8', 'ignore')
|
||||||
|
|
||||||
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
||||||
if can_download:
|
if can_download:
|
||||||
has_ffmpeg = FFmpegFD.available()
|
has_ffmpeg = FFmpegFD.available()
|
||||||
no_crypto = not Cryptodome.AES and '#EXT-X-KEY:METHOD=AES-128' in s
|
if not Cryptodome.AES and '#EXT-X-KEY:METHOD=AES-128' in s:
|
||||||
if no_crypto and has_ffmpeg:
|
# Even if pycryptodomex isn't available, force HlsFD for m3u8s that won't work with ffmpeg
|
||||||
can_download, message = False, 'The stream has AES-128 encryption and pycryptodomex is not available'
|
ffmpeg_can_dl = not traverse_obj(info_dict, ((
|
||||||
elif no_crypto:
|
'extra_param_to_segment_url', 'extra_param_to_key_url',
|
||||||
message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; '
|
'hls_media_playlist_data', ('hls_aes', ('uri', 'key', 'iv')),
|
||||||
'Decryption will be performed natively, but will be extremely slow')
|
), any))
|
||||||
|
message = 'The stream has AES-128 encryption and {} available'.format(
|
||||||
|
'neither ffmpeg nor pycryptodomex are' if ffmpeg_can_dl and not has_ffmpeg else
|
||||||
|
'pycryptodomex is not')
|
||||||
|
if has_ffmpeg and ffmpeg_can_dl:
|
||||||
|
can_download = False
|
||||||
|
else:
|
||||||
|
message += '; decryption will be performed natively, but will be extremely slow'
|
||||||
elif info_dict.get('extractor_key') == 'Generic' and re.search(r'(?m)#EXT-X-MEDIA-SEQUENCE:(?!0$)', s):
|
elif info_dict.get('extractor_key') == 'Generic' and re.search(r'(?m)#EXT-X-MEDIA-SEQUENCE:(?!0$)', s):
|
||||||
install_ffmpeg = '' if has_ffmpeg else 'install ffmpeg and '
|
install_ffmpeg = '' if has_ffmpeg else 'install ffmpeg and '
|
||||||
message = ('Live HLS streams are not supported by the native downloader. If this is a livestream, '
|
message = ('Live HLS streams are not supported by the native downloader. If this is a livestream, '
|
||||||
@@ -119,12 +139,12 @@ class HlsFD(FragmentFD):
|
|||||||
self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
|
self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')
|
||||||
|
|
||||||
def is_ad_fragment_start(s):
|
def is_ad_fragment_start(s):
|
||||||
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
|
return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s)
|
||||||
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
|
or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')))
|
||||||
|
|
||||||
def is_ad_fragment_end(s):
|
def is_ad_fragment_end(s):
|
||||||
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
|
return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s)
|
||||||
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
|
or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')))
|
||||||
|
|
||||||
fragments = []
|
fragments = []
|
||||||
|
|
||||||
@@ -160,10 +180,12 @@ class HlsFD(FragmentFD):
|
|||||||
extra_state = ctx.setdefault('extra_state', {})
|
extra_state = ctx.setdefault('extra_state', {})
|
||||||
|
|
||||||
format_index = info_dict.get('format_index')
|
format_index = info_dict.get('format_index')
|
||||||
extra_query = None
|
extra_segment_query = None
|
||||||
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
|
if extra_param_to_segment_url := info_dict.get('extra_param_to_segment_url'):
|
||||||
if extra_param_to_segment_url:
|
extra_segment_query = urllib.parse.parse_qs(extra_param_to_segment_url)
|
||||||
extra_query = urllib.parse.parse_qs(extra_param_to_segment_url)
|
extra_key_query = None
|
||||||
|
if extra_param_to_key_url := info_dict.get('extra_param_to_key_url'):
|
||||||
|
extra_key_query = urllib.parse.parse_qs(extra_param_to_key_url)
|
||||||
i = 0
|
i = 0
|
||||||
media_sequence = 0
|
media_sequence = 0
|
||||||
decrypt_info = {'METHOD': 'NONE'}
|
decrypt_info = {'METHOD': 'NONE'}
|
||||||
@@ -175,6 +197,7 @@ class HlsFD(FragmentFD):
|
|||||||
if external_aes_iv:
|
if external_aes_iv:
|
||||||
external_aes_iv = binascii.unhexlify(remove_start(external_aes_iv, '0x').zfill(32))
|
external_aes_iv = binascii.unhexlify(remove_start(external_aes_iv, '0x').zfill(32))
|
||||||
byte_range = {}
|
byte_range = {}
|
||||||
|
byte_range_offset = 0
|
||||||
discontinuity_count = 0
|
discontinuity_count = 0
|
||||||
frag_index = 0
|
frag_index = 0
|
||||||
ad_frag_next = False
|
ad_frag_next = False
|
||||||
@@ -182,7 +205,7 @@ class HlsFD(FragmentFD):
|
|||||||
line = line.strip()
|
line = line.strip()
|
||||||
if line:
|
if line:
|
||||||
if not line.startswith('#'):
|
if not line.startswith('#'):
|
||||||
if format_index and discontinuity_count != format_index:
|
if format_index is not None and discontinuity_count != format_index:
|
||||||
continue
|
continue
|
||||||
if ad_frag_next:
|
if ad_frag_next:
|
||||||
continue
|
continue
|
||||||
@@ -190,8 +213,8 @@ class HlsFD(FragmentFD):
|
|||||||
if frag_index <= ctx['fragment_index']:
|
if frag_index <= ctx['fragment_index']:
|
||||||
continue
|
continue
|
||||||
frag_url = urljoin(man_url, line)
|
frag_url = urljoin(man_url, line)
|
||||||
if extra_query:
|
if extra_segment_query:
|
||||||
frag_url = update_url_query(frag_url, extra_query)
|
frag_url = update_url_query(frag_url, extra_segment_query)
|
||||||
|
|
||||||
fragments.append({
|
fragments.append({
|
||||||
'frag_index': frag_index,
|
'frag_index': frag_index,
|
||||||
@@ -202,8 +225,13 @@ class HlsFD(FragmentFD):
|
|||||||
})
|
})
|
||||||
media_sequence += 1
|
media_sequence += 1
|
||||||
|
|
||||||
|
# If the byte_range is truthy, reset it after appending a fragment that uses it
|
||||||
|
if byte_range:
|
||||||
|
byte_range_offset = byte_range['end']
|
||||||
|
byte_range = {}
|
||||||
|
|
||||||
elif line.startswith('#EXT-X-MAP'):
|
elif line.startswith('#EXT-X-MAP'):
|
||||||
if format_index and discontinuity_count != format_index:
|
if format_index is not None and discontinuity_count != format_index:
|
||||||
continue
|
continue
|
||||||
if frag_index > 0:
|
if frag_index > 0:
|
||||||
self.report_error(
|
self.report_error(
|
||||||
@@ -212,13 +240,15 @@ class HlsFD(FragmentFD):
|
|||||||
frag_index += 1
|
frag_index += 1
|
||||||
map_info = parse_m3u8_attributes(line[11:])
|
map_info = parse_m3u8_attributes(line[11:])
|
||||||
frag_url = urljoin(man_url, map_info.get('URI'))
|
frag_url = urljoin(man_url, map_info.get('URI'))
|
||||||
if extra_query:
|
if extra_segment_query:
|
||||||
frag_url = update_url_query(frag_url, extra_query)
|
frag_url = update_url_query(frag_url, extra_segment_query)
|
||||||
|
|
||||||
|
map_byte_range = {}
|
||||||
|
|
||||||
if map_info.get('BYTERANGE'):
|
if map_info.get('BYTERANGE'):
|
||||||
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
splitted_byte_range = map_info.get('BYTERANGE').split('@')
|
||||||
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
|
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else 0
|
||||||
byte_range = {
|
map_byte_range = {
|
||||||
'start': sub_range_start,
|
'start': sub_range_start,
|
||||||
'end': sub_range_start + int(splitted_byte_range[0]),
|
'end': sub_range_start + int(splitted_byte_range[0]),
|
||||||
}
|
}
|
||||||
@@ -227,8 +257,8 @@ class HlsFD(FragmentFD):
|
|||||||
'frag_index': frag_index,
|
'frag_index': frag_index,
|
||||||
'url': frag_url,
|
'url': frag_url,
|
||||||
'decrypt_info': decrypt_info,
|
'decrypt_info': decrypt_info,
|
||||||
'byte_range': byte_range,
|
'byte_range': map_byte_range,
|
||||||
'media_sequence': media_sequence
|
'media_sequence': media_sequence,
|
||||||
})
|
})
|
||||||
media_sequence += 1
|
media_sequence += 1
|
||||||
|
|
||||||
@@ -244,8 +274,10 @@ class HlsFD(FragmentFD):
|
|||||||
decrypt_info['KEY'] = external_aes_key
|
decrypt_info['KEY'] = external_aes_key
|
||||||
else:
|
else:
|
||||||
decrypt_info['URI'] = urljoin(man_url, decrypt_info['URI'])
|
decrypt_info['URI'] = urljoin(man_url, decrypt_info['URI'])
|
||||||
if extra_query:
|
if extra_key_query or extra_segment_query:
|
||||||
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)
|
# Fall back to extra_segment_query to key for backwards compat
|
||||||
|
decrypt_info['URI'] = update_url_query(
|
||||||
|
decrypt_info['URI'], extra_key_query or extra_segment_query)
|
||||||
if decrypt_url != decrypt_info['URI']:
|
if decrypt_url != decrypt_info['URI']:
|
||||||
decrypt_info['KEY'] = None
|
decrypt_info['KEY'] = None
|
||||||
|
|
||||||
@@ -253,7 +285,7 @@ class HlsFD(FragmentFD):
|
|||||||
media_sequence = int(line[22:])
|
media_sequence = int(line[22:])
|
||||||
elif line.startswith('#EXT-X-BYTERANGE'):
|
elif line.startswith('#EXT-X-BYTERANGE'):
|
||||||
splitted_byte_range = line[17:].split('@')
|
splitted_byte_range = line[17:].split('@')
|
||||||
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end']
|
sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range_offset
|
||||||
byte_range = {
|
byte_range = {
|
||||||
'start': sub_range_start,
|
'start': sub_range_start,
|
||||||
'end': sub_range_start + int(splitted_byte_range[0]),
|
'end': sub_range_start + int(splitted_byte_range[0]),
|
||||||
@@ -350,9 +382,8 @@ class HlsFD(FragmentFD):
|
|||||||
# XXX: this should probably be silent as well
|
# XXX: this should probably be silent as well
|
||||||
# or verify that all segments contain the same data
|
# or verify that all segments contain the same data
|
||||||
self.report_warning(bug_reports_message(
|
self.report_warning(bug_reports_message(
|
||||||
'Discarding a %s block found in the middle of the stream; '
|
f'Discarding a {type(block).__name__} block found in the middle of the stream; '
|
||||||
'if the subtitles display incorrectly,'
|
'if the subtitles display incorrectly,'))
|
||||||
% (type(block).__name__)))
|
|
||||||
continue
|
continue
|
||||||
block.write_into(output)
|
block.write_into(output)
|
||||||
|
|
||||||
@@ -369,6 +400,9 @@ class HlsFD(FragmentFD):
|
|||||||
|
|
||||||
return output.getvalue().encode()
|
return output.getvalue().encode()
|
||||||
|
|
||||||
|
if len(fragments) == 1:
|
||||||
|
self.download_and_append_fragments(ctx, fragments, info_dict)
|
||||||
|
else:
|
||||||
self.download_and_append_fragments(
|
self.download_and_append_fragments(
|
||||||
ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments)
|
ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments)
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -13,13 +13,9 @@ from ..utils import (
|
|||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
RetryManager,
|
RetryManager,
|
||||||
ThrottledDownload,
|
ThrottledDownload,
|
||||||
XAttrMetadataError,
|
|
||||||
XAttrUnavailableError,
|
|
||||||
encodeFilename,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_http_range,
|
parse_http_range,
|
||||||
try_call,
|
try_call,
|
||||||
write_xattr,
|
|
||||||
)
|
)
|
||||||
from ..utils.networking import HTTPHeaderDict
|
from ..utils.networking import HTTPHeaderDict
|
||||||
|
|
||||||
@@ -28,6 +24,10 @@ class HttpFD(FileDownloader):
|
|||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
url = info_dict['url']
|
url = info_dict['url']
|
||||||
request_data = info_dict.get('request_data', None)
|
request_data = info_dict.get('request_data', None)
|
||||||
|
request_extensions = {}
|
||||||
|
impersonate_target = self._get_impersonate_target(info_dict)
|
||||||
|
if impersonate_target is not None:
|
||||||
|
request_extensions['impersonate'] = impersonate_target
|
||||||
|
|
||||||
class DownloadContext(dict):
|
class DownloadContext(dict):
|
||||||
__getattr__ = dict.get
|
__getattr__ = dict.get
|
||||||
@@ -58,9 +58,8 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
if self.params.get('continuedl', True):
|
if self.params.get('continuedl', True):
|
||||||
# Establish possible resume length
|
# Establish possible resume length
|
||||||
if os.path.isfile(encodeFilename(ctx.tmpfilename)):
|
if os.path.isfile(ctx.tmpfilename):
|
||||||
ctx.resume_len = os.path.getsize(
|
ctx.resume_len = os.path.getsize(ctx.tmpfilename)
|
||||||
encodeFilename(ctx.tmpfilename))
|
|
||||||
|
|
||||||
ctx.is_resume = ctx.resume_len > 0
|
ctx.is_resume = ctx.resume_len > 0
|
||||||
|
|
||||||
@@ -111,7 +110,7 @@ class HttpFD(FileDownloader):
|
|||||||
if try_call(lambda: range_end >= ctx.content_len):
|
if try_call(lambda: range_end >= ctx.content_len):
|
||||||
range_end = ctx.content_len - 1
|
range_end = ctx.content_len - 1
|
||||||
|
|
||||||
request = Request(url, request_data, headers)
|
request = Request(url, request_data, headers, extensions=request_extensions)
|
||||||
has_range = range_start is not None
|
has_range = range_start is not None
|
||||||
if has_range:
|
if has_range:
|
||||||
request.headers['Range'] = f'bytes={int(range_start)}-{int_or_none(range_end) or ""}'
|
request.headers['Range'] = f'bytes={int(range_start)}-{int_or_none(range_end) or ""}'
|
||||||
@@ -176,7 +175,7 @@ class HttpFD(FileDownloader):
|
|||||||
'downloaded_bytes': ctx.resume_len,
|
'downloaded_bytes': ctx.resume_len,
|
||||||
'total_bytes': ctx.resume_len,
|
'total_bytes': ctx.resume_len,
|
||||||
}, info_dict)
|
}, info_dict)
|
||||||
raise SucceedDownload()
|
raise SucceedDownload
|
||||||
else:
|
else:
|
||||||
# The length does not match, we start the download over
|
# The length does not match, we start the download over
|
||||||
self.report_unable_to_resume()
|
self.report_unable_to_resume()
|
||||||
@@ -194,7 +193,7 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
def close_stream():
|
def close_stream():
|
||||||
if ctx.stream is not None:
|
if ctx.stream is not None:
|
||||||
if not ctx.tmpfilename == '-':
|
if ctx.tmpfilename != '-':
|
||||||
ctx.stream.close()
|
ctx.stream.close()
|
||||||
ctx.stream = None
|
ctx.stream = None
|
||||||
|
|
||||||
@@ -237,8 +236,13 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
def retry(e):
|
def retry(e):
|
||||||
close_stream()
|
close_stream()
|
||||||
ctx.resume_len = (byte_counter if ctx.tmpfilename == '-'
|
if ctx.tmpfilename == '-':
|
||||||
else os.path.getsize(encodeFilename(ctx.tmpfilename)))
|
ctx.resume_len = byte_counter
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
ctx.resume_len = os.path.getsize(ctx.tmpfilename)
|
||||||
|
except FileNotFoundError:
|
||||||
|
ctx.resume_len = 0
|
||||||
raise RetryDownload(e)
|
raise RetryDownload(e)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
@@ -263,20 +267,14 @@ class HttpFD(FileDownloader):
|
|||||||
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
|
||||||
self.report_destination(ctx.filename)
|
self.report_destination(ctx.filename)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
self.report_error('unable to open for writing: %s' % str(err))
|
self.report_error(f'unable to open for writing: {err}')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.params.get('xattr_set_filesize', False) and data_len is not None:
|
|
||||||
try:
|
|
||||||
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode())
|
|
||||||
except (XAttrUnavailableError, XAttrMetadataError) as err:
|
|
||||||
self.report_error('unable to set filesize xattr: %s' % str(err))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
ctx.stream.write(data_block)
|
ctx.stream.write(data_block)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
self.to_stderr('\n')
|
self.to_stderr('\n')
|
||||||
self.report_error('unable to write data: %s' % str(err))
|
self.report_error(f'unable to write data: {err}')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Apply rate limit
|
# Apply rate limit
|
||||||
@@ -322,7 +320,7 @@ class HttpFD(FileDownloader):
|
|||||||
elif now - ctx.throttle_start > 3:
|
elif now - ctx.throttle_start > 3:
|
||||||
if ctx.stream is not None and ctx.tmpfilename != '-':
|
if ctx.stream is not None and ctx.tmpfilename != '-':
|
||||||
ctx.stream.close()
|
ctx.stream.close()
|
||||||
raise ThrottledDownload()
|
raise ThrottledDownload
|
||||||
elif speed:
|
elif speed:
|
||||||
ctx.throttle_start = None
|
ctx.throttle_start = None
|
||||||
|
|
||||||
@@ -333,7 +331,7 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
|
if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len:
|
||||||
ctx.resume_len = byte_counter
|
ctx.resume_len = byte_counter
|
||||||
raise NextFragment()
|
raise NextFragment
|
||||||
|
|
||||||
if ctx.tmpfilename != '-':
|
if ctx.tmpfilename != '-':
|
||||||
ctx.stream.close()
|
ctx.stream.close()
|
||||||
@@ -345,7 +343,7 @@ class HttpFD(FileDownloader):
|
|||||||
self.try_rename(ctx.tmpfilename, ctx.filename)
|
self.try_rename(ctx.tmpfilename, ctx.filename)
|
||||||
|
|
||||||
# Update file modification time
|
# Update file modification time
|
||||||
if self.params.get('updatetime', True):
|
if self.params.get('updatetime'):
|
||||||
info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.headers.get('last-modified', None))
|
info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.headers.get('last-modified', None))
|
||||||
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
|
|||||||
@@ -251,7 +251,7 @@ class IsmFD(FragmentFD):
|
|||||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
|
|
||||||
frag_index = 0
|
frag_index = 0
|
||||||
for i, segment in enumerate(segments):
|
for segment in segments:
|
||||||
frag_index += 1
|
frag_index += 1
|
||||||
if frag_index <= ctx['fragment_index']:
|
if frag_index <= ctx['fragment_index']:
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from ..version import __version__ as YT_DLP_VERSION
|
|||||||
|
|
||||||
|
|
||||||
class MhtmlFD(FragmentFD):
|
class MhtmlFD(FragmentFD):
|
||||||
_STYLESHEET = """\
|
_STYLESHEET = '''\
|
||||||
html, body {
|
html, body {
|
||||||
margin: 0;
|
margin: 0;
|
||||||
padding: 0;
|
padding: 0;
|
||||||
@@ -45,7 +45,7 @@ body > figure > img {
|
|||||||
max-width: 100%;
|
max-width: 100%;
|
||||||
max-height: calc(100vh - 5em);
|
max-height: calc(100vh - 5em);
|
||||||
}
|
}
|
||||||
"""
|
'''
|
||||||
_STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET)
|
_STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET)
|
||||||
_STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET)
|
_STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET)
|
||||||
|
|
||||||
@@ -57,24 +57,19 @@ body > figure > img {
|
|||||||
)).decode('us-ascii') + '?='
|
)).decode('us-ascii') + '?='
|
||||||
|
|
||||||
def _gen_cid(self, i, fragment, frag_boundary):
|
def _gen_cid(self, i, fragment, frag_boundary):
|
||||||
return '%u.%s@yt-dlp.github.io.invalid' % (i, frag_boundary)
|
return f'{i}.{frag_boundary}@yt-dlp.github.io.invalid'
|
||||||
|
|
||||||
def _gen_stub(self, *, fragments, frag_boundary, title):
|
def _gen_stub(self, *, fragments, frag_boundary, title):
|
||||||
output = io.StringIO()
|
output = io.StringIO()
|
||||||
|
|
||||||
output.write((
|
output.write(
|
||||||
'<!DOCTYPE html>'
|
'<!DOCTYPE html>'
|
||||||
'<html>'
|
'<html>'
|
||||||
'<head>'
|
'<head>'
|
||||||
'' '<meta name="generator" content="yt-dlp {version}">'
|
f'<meta name="generator" content="yt-dlp {escapeHTML(YT_DLP_VERSION)}">'
|
||||||
'' '<title>{title}</title>'
|
f'<title>{escapeHTML(title)}</title>'
|
||||||
'' '<style>{styles}</style>'
|
f'<style>{self._STYLESHEET}</style>'
|
||||||
'<body>'
|
'<body>')
|
||||||
).format(
|
|
||||||
version=escapeHTML(YT_DLP_VERSION),
|
|
||||||
styles=self._STYLESHEET,
|
|
||||||
title=escapeHTML(title)
|
|
||||||
))
|
|
||||||
|
|
||||||
t0 = 0
|
t0 = 0
|
||||||
for i, frag in enumerate(fragments):
|
for i, frag in enumerate(fragments):
|
||||||
@@ -87,15 +82,12 @@ body > figure > img {
|
|||||||
num=i + 1,
|
num=i + 1,
|
||||||
t0=srt_subtitles_timecode(t0),
|
t0=srt_subtitles_timecode(t0),
|
||||||
t1=srt_subtitles_timecode(t1),
|
t1=srt_subtitles_timecode(t1),
|
||||||
duration=formatSeconds(frag['duration'], msec=True)
|
duration=formatSeconds(frag['duration'], msec=True),
|
||||||
))
|
))
|
||||||
except (KeyError, ValueError, TypeError):
|
except (KeyError, ValueError, TypeError):
|
||||||
t1 = None
|
t1 = None
|
||||||
output.write((
|
output.write(f'<figcaption>Slide #{i + 1}</figcaption>')
|
||||||
'<figcaption>Slide #{num}</figcaption>'
|
output.write(f'<img src="cid:{self._gen_cid(i, frag, frag_boundary)}">')
|
||||||
).format(num=i + 1))
|
|
||||||
output.write('<img src="cid:{cid}">'.format(
|
|
||||||
cid=self._gen_cid(i, frag, frag_boundary)))
|
|
||||||
output.write('</figure>')
|
output.write('</figure>')
|
||||||
t0 = t1
|
t0 = t1
|
||||||
|
|
||||||
@@ -126,31 +118,24 @@ body > figure > img {
|
|||||||
stub = self._gen_stub(
|
stub = self._gen_stub(
|
||||||
fragments=fragments,
|
fragments=fragments,
|
||||||
frag_boundary=frag_boundary,
|
frag_boundary=frag_boundary,
|
||||||
title=title
|
title=title,
|
||||||
)
|
)
|
||||||
|
|
||||||
ctx['dest_stream'].write((
|
ctx['dest_stream'].write((
|
||||||
'MIME-Version: 1.0\r\n'
|
'MIME-Version: 1.0\r\n'
|
||||||
'From: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
'From: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||||
'To: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
'To: <nowhere@yt-dlp.github.io.invalid>\r\n'
|
||||||
'Subject: {title}\r\n'
|
f'Subject: {self._escape_mime(title)}\r\n'
|
||||||
'Content-type: multipart/related; '
|
'Content-type: multipart/related; '
|
||||||
'' 'boundary="{boundary}"; '
|
f'boundary="{frag_boundary}"; '
|
||||||
'' 'type="text/html"\r\n'
|
'type="text/html"\r\n'
|
||||||
'X.yt-dlp.Origin: {origin}\r\n'
|
f'X.yt-dlp.Origin: {origin}\r\n'
|
||||||
'\r\n'
|
'\r\n'
|
||||||
'--{boundary}\r\n'
|
f'--{frag_boundary}\r\n'
|
||||||
'Content-Type: text/html; charset=utf-8\r\n'
|
'Content-Type: text/html; charset=utf-8\r\n'
|
||||||
'Content-Length: {length}\r\n'
|
f'Content-Length: {len(stub)}\r\n'
|
||||||
'\r\n'
|
'\r\n'
|
||||||
'{stub}\r\n'
|
f'{stub}\r\n').encode())
|
||||||
).format(
|
|
||||||
origin=origin,
|
|
||||||
boundary=frag_boundary,
|
|
||||||
length=len(stub),
|
|
||||||
title=self._escape_mime(title),
|
|
||||||
stub=stub
|
|
||||||
).encode())
|
|
||||||
extra_state['header_written'] = True
|
extra_state['header_written'] = True
|
||||||
|
|
||||||
for i, fragment in enumerate(fragments):
|
for i, fragment in enumerate(fragments):
|
||||||
|
|||||||
@@ -2,103 +2,49 @@ import json
|
|||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from . import get_suitable_downloader
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .external import FFmpegFD
|
from .external import FFmpegFD
|
||||||
from ..networking import Request
|
from ..networking import Request
|
||||||
from ..utils import DownloadError, WebSocketsWrapper, str_or_none, try_get
|
from ..networking.websocket import WebSocketResponse
|
||||||
|
from ..utils import DownloadError, str_or_none, truncate_string
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
class NiconicoDmcFD(FileDownloader):
|
|
||||||
""" Downloading niconico douga from DMC with heartbeat """
|
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
|
||||||
from ..extractor.niconico import NiconicoIE
|
|
||||||
|
|
||||||
self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)
|
|
||||||
ie = NiconicoIE(self.ydl)
|
|
||||||
info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
|
|
||||||
|
|
||||||
fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)
|
|
||||||
|
|
||||||
success = download_complete = False
|
|
||||||
timer = [None]
|
|
||||||
heartbeat_lock = threading.Lock()
|
|
||||||
heartbeat_url = heartbeat_info_dict['url']
|
|
||||||
heartbeat_data = heartbeat_info_dict['data'].encode()
|
|
||||||
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
|
||||||
|
|
||||||
request = Request(heartbeat_url, heartbeat_data)
|
|
||||||
|
|
||||||
def heartbeat():
|
|
||||||
try:
|
|
||||||
self.ydl.urlopen(request).read()
|
|
||||||
except Exception:
|
|
||||||
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
|
|
||||||
|
|
||||||
with heartbeat_lock:
|
|
||||||
if not download_complete:
|
|
||||||
timer[0] = threading.Timer(heartbeat_interval, heartbeat)
|
|
||||||
timer[0].start()
|
|
||||||
|
|
||||||
heartbeat_info_dict['ping']()
|
|
||||||
self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))
|
|
||||||
try:
|
|
||||||
heartbeat()
|
|
||||||
if type(fd).__name__ == 'HlsFD':
|
|
||||||
info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])
|
|
||||||
success = fd.real_download(filename, info_dict)
|
|
||||||
finally:
|
|
||||||
if heartbeat_lock:
|
|
||||||
with heartbeat_lock:
|
|
||||||
timer[0].cancel()
|
|
||||||
download_complete = True
|
|
||||||
return success
|
|
||||||
|
|
||||||
|
|
||||||
class NiconicoLiveFD(FileDownloader):
|
class NiconicoLiveFD(FileDownloader):
|
||||||
""" Downloads niconico live without being stopped """
|
""" Downloads niconico live without being stopped """
|
||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
video_id = info_dict['video_id']
|
video_id = info_dict['id']
|
||||||
ws_url = info_dict['url']
|
opts = info_dict['downloader_options']
|
||||||
ws_extractor = info_dict['ws']
|
quality, ws_extractor, ws_url = opts['max_quality'], opts['ws'], opts['ws_url']
|
||||||
ws_origin_host = info_dict['origin']
|
|
||||||
cookies = info_dict.get('cookies')
|
|
||||||
live_quality = info_dict.get('live_quality', 'high')
|
|
||||||
live_latency = info_dict.get('live_latency', 'high')
|
|
||||||
dl = FFmpegFD(self.ydl, self.params or {})
|
dl = FFmpegFD(self.ydl, self.params or {})
|
||||||
|
|
||||||
new_info_dict = info_dict.copy()
|
new_info_dict = info_dict.copy()
|
||||||
new_info_dict.update({
|
new_info_dict['protocol'] = 'm3u8'
|
||||||
'protocol': 'm3u8',
|
|
||||||
})
|
|
||||||
|
|
||||||
def communicate_ws(reconnect):
|
def communicate_ws(reconnect):
|
||||||
if reconnect:
|
# Support --load-info-json as if it is a reconnect attempt
|
||||||
ws = WebSocketsWrapper(ws_url, {
|
if reconnect or not isinstance(ws_extractor, WebSocketResponse):
|
||||||
'Cookies': str_or_none(cookies) or '',
|
ws = self.ydl.urlopen(Request(
|
||||||
'Origin': f'https://{ws_origin_host}',
|
ws_url, headers={'Origin': 'https://live.nicovideo.jp'}))
|
||||||
'Accept': '*/*',
|
|
||||||
'User-Agent': self.params['http_headers']['User-Agent'],
|
|
||||||
})
|
|
||||||
if self.ydl.params.get('verbose', False):
|
if self.ydl.params.get('verbose', False):
|
||||||
self.to_screen('[debug] Sending startWatching request')
|
self.write_debug('Sending startWatching request')
|
||||||
ws.send(json.dumps({
|
ws.send(json.dumps({
|
||||||
'type': 'startWatching',
|
|
||||||
'data': {
|
'data': {
|
||||||
'stream': {
|
|
||||||
'quality': live_quality,
|
|
||||||
'protocol': 'hls+fmp4',
|
|
||||||
'latency': live_latency,
|
|
||||||
'chasePlay': False
|
|
||||||
},
|
|
||||||
'room': {
|
|
||||||
'protocol': 'webSocket',
|
|
||||||
'commentable': True
|
|
||||||
},
|
|
||||||
'reconnect': True,
|
'reconnect': True,
|
||||||
}
|
'room': {
|
||||||
|
'commentable': True,
|
||||||
|
'protocol': 'webSocket',
|
||||||
|
},
|
||||||
|
'stream': {
|
||||||
|
'accessRightMethod': 'single_cookie',
|
||||||
|
'chasePlay': False,
|
||||||
|
'latency': 'high',
|
||||||
|
'protocol': 'hls',
|
||||||
|
'quality': quality,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
'type': 'startWatching',
|
||||||
}))
|
}))
|
||||||
else:
|
else:
|
||||||
ws = ws_extractor
|
ws = ws_extractor
|
||||||
@@ -111,7 +57,6 @@ class NiconicoLiveFD(FileDownloader):
|
|||||||
if not data or not isinstance(data, dict):
|
if not data or not isinstance(data, dict):
|
||||||
continue
|
continue
|
||||||
if data.get('type') == 'ping':
|
if data.get('type') == 'ping':
|
||||||
# pong back
|
|
||||||
ws.send(r'{"type":"pong"}')
|
ws.send(r'{"type":"pong"}')
|
||||||
ws.send(r'{"type":"keepSeat"}')
|
ws.send(r'{"type":"keepSeat"}')
|
||||||
elif data.get('type') == 'disconnect':
|
elif data.get('type') == 'disconnect':
|
||||||
@@ -119,12 +64,10 @@ class NiconicoLiveFD(FileDownloader):
|
|||||||
return True
|
return True
|
||||||
elif data.get('type') == 'error':
|
elif data.get('type') == 'error':
|
||||||
self.write_debug(data)
|
self.write_debug(data)
|
||||||
message = try_get(data, lambda x: x['body']['code'], str) or recv
|
message = traverse_obj(data, ('body', 'code', {str_or_none}), default=recv)
|
||||||
return DownloadError(message)
|
return DownloadError(message)
|
||||||
elif self.ydl.params.get('verbose', False):
|
elif self.ydl.params.get('verbose', False):
|
||||||
if len(recv) > 100:
|
self.write_debug(f'Server response: {truncate_string(recv, 100)}')
|
||||||
recv = recv[:100] + '...'
|
|
||||||
self.to_screen('[debug] Server said: %s' % recv)
|
|
||||||
|
|
||||||
def ws_main():
|
def ws_main():
|
||||||
reconnect = False
|
reconnect = False
|
||||||
@@ -134,7 +77,8 @@ class NiconicoLiveFD(FileDownloader):
|
|||||||
if ret is True:
|
if ret is True:
|
||||||
return
|
return
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
self.to_screen('[%s] %s: Connection error occured, reconnecting after 10 seconds: %s' % ('niconico:live', video_id, str_or_none(e)))
|
self.to_screen(
|
||||||
|
f'[niconico:live] {video_id}: Connection error occured, reconnecting after 10 seconds: {e}')
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
continue
|
continue
|
||||||
finally:
|
finally:
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ from ..utils import (
|
|||||||
Popen,
|
Popen,
|
||||||
check_executable,
|
check_executable,
|
||||||
encodeArgument,
|
encodeArgument,
|
||||||
encodeFilename,
|
|
||||||
get_exe_version,
|
get_exe_version,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -179,15 +178,15 @@ class RtmpFD(FileDownloader):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
|
while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live:
|
||||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
prevsize = os.path.getsize(tmpfilename)
|
||||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize)
|
self.to_screen(f'[rtmpdump] Downloaded {prevsize} bytes')
|
||||||
time.sleep(5.0) # This seems to be needed
|
time.sleep(5.0) # This seems to be needed
|
||||||
args = basic_args + ['--resume']
|
args = [*basic_args, '--resume']
|
||||||
if retval == RD_FAILED:
|
if retval == RD_FAILED:
|
||||||
args += ['--skip', '1']
|
args += ['--skip', '1']
|
||||||
args = [encodeArgument(a) for a in args]
|
args = [encodeArgument(a) for a in args]
|
||||||
retval = run_rtmpdump(args)
|
retval = run_rtmpdump(args)
|
||||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
cursize = os.path.getsize(tmpfilename)
|
||||||
if prevsize == cursize and retval == RD_FAILED:
|
if prevsize == cursize and retval == RD_FAILED:
|
||||||
break
|
break
|
||||||
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
|
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
|
||||||
@@ -196,8 +195,8 @@ class RtmpFD(FileDownloader):
|
|||||||
retval = RD_SUCCESS
|
retval = RD_SUCCESS
|
||||||
break
|
break
|
||||||
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
|
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(tmpfilename)
|
||||||
self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize)
|
self.to_screen(f'[rtmpdump] Downloaded {fsize} bytes')
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': fsize,
|
'downloaded_bytes': fsize,
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ import os
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..utils import check_executable, encodeFilename
|
from ..utils import check_executable
|
||||||
|
|
||||||
|
|
||||||
class RtspFD(FileDownloader):
|
class RtspFD(FileDownloader):
|
||||||
@@ -26,7 +26,7 @@ class RtspFD(FileDownloader):
|
|||||||
|
|
||||||
retval = subprocess.call(args)
|
retval = subprocess.call(args)
|
||||||
if retval == 0:
|
if retval == 0:
|
||||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
fsize = os.path.getsize(tmpfilename)
|
||||||
self.to_screen(f'\r[{args[0]}] {fsize} bytes')
|
self.to_screen(f'\r[{args[0]}] {fsize} bytes')
|
||||||
self.try_rename(tmpfilename, filename)
|
self.try_rename(tmpfilename, filename)
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
|
|
||||||
def real_download(self, filename, info_dict):
|
def real_download(self, filename, info_dict):
|
||||||
video_id = info_dict['video_id']
|
video_id = info_dict['video_id']
|
||||||
self.to_screen('[%s] Downloading live chat' % self.FD_NAME)
|
self.to_screen(f'[{self.FD_NAME}] Downloading live chat')
|
||||||
if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat':
|
if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat':
|
||||||
self.report_warning('Live chat download runs until the livestream ends. '
|
self.report_warning('Live chat download runs until the livestream ends. '
|
||||||
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
||||||
@@ -123,8 +123,8 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
data,
|
data,
|
||||||
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
||||||
|
|
||||||
func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live
|
func = ((info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live)
|
||||||
or frag_index == 1 and try_refresh_replay_beginning
|
or (frag_index == 1 and try_refresh_replay_beginning)
|
||||||
or parse_actions_replay)
|
or parse_actions_replay)
|
||||||
return (True, *func(live_chat_continuation))
|
return (True, *func(live_chat_continuation))
|
||||||
except HTTPError as err:
|
except HTTPError as err:
|
||||||
|
|||||||
@@ -1,16 +1,25 @@
|
|||||||
from ..compat.compat_utils import passthrough_module
|
from ..compat.compat_utils import passthrough_module
|
||||||
|
from ..globals import extractors as _extractors_context
|
||||||
|
from ..globals import plugin_ies as _plugin_ies_context
|
||||||
|
from ..plugins import PluginSpec, register_plugin_spec
|
||||||
|
|
||||||
passthrough_module(__name__, '.extractors')
|
passthrough_module(__name__, '.extractors')
|
||||||
del passthrough_module
|
del passthrough_module
|
||||||
|
|
||||||
|
register_plugin_spec(PluginSpec(
|
||||||
|
module_name='extractor',
|
||||||
|
suffix='IE',
|
||||||
|
destination=_extractors_context,
|
||||||
|
plugin_destination=_plugin_ies_context,
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
def gen_extractor_classes():
|
def gen_extractor_classes():
|
||||||
""" Return a list of supported extractors.
|
""" Return a list of supported extractors.
|
||||||
The order does matter; the first extractor matched is the one handling the URL.
|
The order does matter; the first extractor matched is the one handling the URL.
|
||||||
"""
|
"""
|
||||||
from .extractors import _ALL_CLASSES
|
import_extractors()
|
||||||
|
return list(_extractors_context.value.values())
|
||||||
return _ALL_CLASSES
|
|
||||||
|
|
||||||
|
|
||||||
def gen_extractors():
|
def gen_extractors():
|
||||||
@@ -37,6 +46,9 @@ def list_extractors(age_limit=None):
|
|||||||
|
|
||||||
def get_info_extractor(ie_name):
|
def get_info_extractor(ie_name):
|
||||||
"""Returns the info extractor class with the given ie_name"""
|
"""Returns the info extractor class with the given ie_name"""
|
||||||
from . import extractors
|
import_extractors()
|
||||||
|
return _extractors_context.value[f'{ie_name}IE']
|
||||||
|
|
||||||
return getattr(extractors, f'{ie_name}IE')
|
|
||||||
|
def import_extractors():
|
||||||
|
from . import extractors # noqa: F401
|
||||||
|
|||||||
@@ -4,24 +4,24 @@ import re
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
js_to_json,
|
dict_get,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
js_to_json,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_get,
|
try_get,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ABCIE(InfoExtractor):
|
class ABCIE(InfoExtractor):
|
||||||
IE_NAME = 'abc.net.au'
|
IE_NAME = 'abc.net.au'
|
||||||
_VALID_URL = r'https?://(?:www\.)?abc\.net\.au/(?:news|btn)/(?:[^/]+/){1,4}(?P<id>\d{5,})'
|
_VALID_URL = r'https?://(?:www\.)?abc\.net\.au/(?:news|btn|listen)/(?:[^/?#]+/){1,4}(?P<id>\d{5,})'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||||
@@ -53,8 +53,9 @@ class ABCIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '6880080',
|
'id': '6880080',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'NAB lifts interest rates, following Westpac and CBA',
|
'title': 'NAB lifts interest rates, following Westpac and CBA - ABC listen',
|
||||||
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
|
'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
|
||||||
|
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/2193d7437c84b25eafd6360c82b5fa21',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
|
'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
|
||||||
@@ -64,17 +65,19 @@ class ABCIE(InfoExtractor):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '10527914',
|
'id': '10527914',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'WWI Centenary',
|
'title': 'WWI Centenary - Behind The News',
|
||||||
'description': 'md5:c2379ec0ca84072e86b446e536954546',
|
'description': 'md5:fa4405939ff750fade46ff0cd4c66a52',
|
||||||
}
|
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/bcc3433c97bf992dff32ec5a768713c9',
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074',
|
'url': 'https://www.abc.net.au/news/programs/the-world/2020-06-10/black-lives-matter-protests-spawn-support-for/12342074',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '12342074',
|
'id': '12342074',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia',
|
'title': 'Black Lives Matter protests spawn support for Papuans in Indonesia',
|
||||||
'description': 'md5:2961a17dc53abc558589ccd0fb8edd6f',
|
'description': 'md5:625257209f2d14ce23cb4e3785da9beb',
|
||||||
}
|
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/7ee6f190de6d7dbb04203e514bfae9ec',
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476',
|
'url': 'https://www.abc.net.au/btn/newsbreak/btn-newsbreak-20200814/12560476',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -85,7 +88,7 @@ class ABCIE(InfoExtractor):
|
|||||||
'upload_date': '20200813',
|
'upload_date': '20200813',
|
||||||
'uploader': 'Behind the News',
|
'uploader': 'Behind the News',
|
||||||
'uploader_id': 'behindthenews',
|
'uploader_id': 'behindthenews',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540',
|
'url': 'https://www.abc.net.au/news/2023-06-25/wagner-boss-orders-troops-back-to-bases-to-avoid-bloodshed/102520540',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@@ -93,8 +96,17 @@ class ABCIE(InfoExtractor):
|
|||||||
'title': 'Wagner Group retreating from Russia, leader Prigozhin to move to Belarus',
|
'title': 'Wagner Group retreating from Russia, leader Prigozhin to move to Belarus',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.',
|
'description': 'Wagner troops leave Rostov-on-Don and\xa0Yevgeny Prigozhin will move to Belarus under a deal brokered by Belarusian President Alexander Lukashenko to end the mutiny.',
|
||||||
'thumbnail': 'https://live-production.wcms.abc-cdn.net.au/0c170f5b57f0105c432f366c0e8e267b?impolicy=wcms_crop_resize&cropH=2813&cropW=5000&xPos=0&yPos=249&width=862&height=485',
|
'thumbnail': r're:https://live-production\.wcm\.abc-cdn\.net\.au/0c170f5b57f0105c432f366c0e8e267b',
|
||||||
}
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.abc.net.au/listen/programs/the-followers-madness-of-two/presents-followers-madness-of-two/105697646',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '105697646',
|
||||||
|
'title': 'INTRODUCING — The Followers: Madness of Two - ABC listen',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'description': 'md5:2310cd0d440a4e01656abea15db8d1f3',
|
||||||
|
'thumbnail': r're:https://live-production\.wcms\.abc-cdn\.net\.au/90d7078214e5d66553ffb7fcf0da0cda',
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -125,7 +137,7 @@ class ABCIE(InfoExtractor):
|
|||||||
if mobj is None:
|
if mobj is None:
|
||||||
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
|
expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
|
||||||
if expired:
|
if expired:
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
|
raise ExtractorError(f'{self.IE_NAME} said: {expired}', expected=True)
|
||||||
raise ExtractorError('Unable to extract video urls')
|
raise ExtractorError('Unable to extract video urls')
|
||||||
|
|
||||||
urls_info = self._parse_json(
|
urls_info = self._parse_json(
|
||||||
@@ -163,7 +175,7 @@ class ABCIE(InfoExtractor):
|
|||||||
'height': height,
|
'height': height,
|
||||||
'tbr': bitrate,
|
'tbr': bitrate,
|
||||||
'filesize': int_or_none(url_info.get('filesize')),
|
'filesize': int_or_none(url_info.get('filesize')),
|
||||||
'format_id': format_id
|
'format_id': format_id,
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@@ -180,20 +192,100 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
_VALID_URL = r'https?://iview\.abc\.net\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)'
|
_VALID_URL = r'https?://iview\.abc\.net\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)'
|
||||||
_GEO_COUNTRIES = ['AU']
|
_GEO_COUNTRIES = ['AU']
|
||||||
|
|
||||||
# ABC iview programs are normally available for 14 days only.
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
'url': 'https://iview.abc.net.au/show/utopia/series/1/video/CO1211V001S00',
|
||||||
|
'md5': '52a942bfd7a0b79a6bfe9b4ce6c9d0ed',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'CO1211V001S00',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Series 1 Ep 1 Wood For The Trees',
|
||||||
|
'series': 'Utopia',
|
||||||
|
'description': 'md5:0cfb2c183c1b952d1548fd65c8a95c00',
|
||||||
|
'upload_date': '20230726',
|
||||||
|
'uploader_id': 'abc1',
|
||||||
|
'series_id': 'CO1211V',
|
||||||
|
'episode_id': 'CO1211V001S00',
|
||||||
|
'season_number': 1,
|
||||||
|
'season': 'Season 1',
|
||||||
|
'episode_number': 1,
|
||||||
|
'episode': 'Wood For The Trees',
|
||||||
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/co/CO1211V001S00_5ad8353f4df09_1280.jpg',
|
||||||
|
'timestamp': 1690403700,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'note': 'No episode name',
|
||||||
'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00',
|
'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00',
|
||||||
'md5': '67715ce3c78426b11ba167d875ac6abf',
|
'md5': '67715ce3c78426b11ba167d875ac6abf',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'LE1927H001S00',
|
'id': 'LE1927H001S00',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': "Series 11 Ep 1",
|
'title': 'Series 11 Ep 1',
|
||||||
'series': "Gruen",
|
'series': 'Gruen',
|
||||||
'description': 'md5:52cc744ad35045baf6aded2ce7287f67',
|
'description': 'md5:52cc744ad35045baf6aded2ce7287f67',
|
||||||
'upload_date': '20190925',
|
'upload_date': '20190925',
|
||||||
'uploader_id': 'abc1',
|
'uploader_id': 'abc1',
|
||||||
|
'series_id': 'LE1927H',
|
||||||
|
'episode_id': 'LE1927H001S00',
|
||||||
|
'season_number': 11,
|
||||||
|
'season': 'Season 11',
|
||||||
|
'episode_number': 1,
|
||||||
|
'episode': 'Episode 1',
|
||||||
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/le/LE1927H001S00_5d954fbd79e25_1280.jpg',
|
||||||
'timestamp': 1569445289,
|
'timestamp': 1569445289,
|
||||||
},
|
},
|
||||||
|
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'],
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'note': 'No episode number',
|
||||||
|
'url': 'https://iview.abc.net.au/show/four-corners/series/2022/video/NC2203H039S00',
|
||||||
|
'md5': '77cb7d8434440e3b28fbebe331c2456a',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'NC2203H039S00',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Series 2022 Locking Up Kids',
|
||||||
|
'series': 'Four Corners',
|
||||||
|
'description': 'md5:54829ca108846d1a70e1fcce2853e720',
|
||||||
|
'upload_date': '20221114',
|
||||||
|
'uploader_id': 'abc1',
|
||||||
|
'series_id': 'NC2203H',
|
||||||
|
'episode_id': 'NC2203H039S00',
|
||||||
|
'season_number': 2022,
|
||||||
|
'season': 'Season 2022',
|
||||||
|
'episode': 'Locking Up Kids',
|
||||||
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/nc/NC2203H039S00_636d8a0944a22_1920.jpg',
|
||||||
|
'timestamp': 1668460497,
|
||||||
|
|
||||||
|
},
|
||||||
|
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'],
|
||||||
|
'params': {
|
||||||
|
'skip_download': True,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'note': 'No episode name or number',
|
||||||
|
'url': 'https://iview.abc.net.au/show/landline/series/2021/video/RF2004Q043S00',
|
||||||
|
'md5': '2e17dec06b13cc81dc119d2565289396',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'RF2004Q043S00',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Series 2021',
|
||||||
|
'series': 'Landline',
|
||||||
|
'description': 'md5:c9f30d9c0c914a7fd23842f6240be014',
|
||||||
|
'upload_date': '20211205',
|
||||||
|
'uploader_id': 'abc1',
|
||||||
|
'series_id': 'RF2004Q',
|
||||||
|
'episode_id': 'RF2004Q043S00',
|
||||||
|
'season_number': 2021,
|
||||||
|
'season': 'Season 2021',
|
||||||
|
'thumbnail': 'https://cdn.iview.abc.net.au/thumbs/i/rf/RF2004Q043S00_61a950639dbc0_1920.jpg',
|
||||||
|
'timestamp': 1638710705,
|
||||||
|
|
||||||
|
},
|
||||||
|
'expected_warnings': ['Ignoring subtitle tracks found in the HLS manifest'],
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
@@ -207,13 +299,12 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
|
stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
|
||||||
|
|
||||||
house_number = video_params.get('episodeHouseNumber') or video_id
|
house_number = video_params.get('episodeHouseNumber') or video_id
|
||||||
path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format(
|
path = f'/auth/hls/sign?ts={int(time.time())}&hn={house_number}&d=android-tablet'
|
||||||
int(time.time()), house_number)
|
|
||||||
sig = hmac.new(
|
sig = hmac.new(
|
||||||
b'android.content.res.Resources',
|
b'android.content.res.Resources',
|
||||||
path.encode('utf-8'), hashlib.sha256).hexdigest()
|
path.encode(), hashlib.sha256).hexdigest()
|
||||||
token = self._download_webpage(
|
token = self._download_webpage(
|
||||||
'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)
|
f'http://iview.abc.net.au{path}&sig={sig}', video_id)
|
||||||
|
|
||||||
def tokenize_url(url, token):
|
def tokenize_url(url, token):
|
||||||
return update_url_query(url, {
|
return update_url_query(url, {
|
||||||
@@ -222,7 +313,7 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
|
|
||||||
for sd in ('1080', '720', 'sd', 'sd-low'):
|
for sd in ('1080', '720', 'sd', 'sd-low'):
|
||||||
sd_url = try_get(
|
sd_url = try_get(
|
||||||
stream, lambda x: x['streams']['hls'][sd], compat_str)
|
stream, lambda x: x['streams']['hls'][sd], str)
|
||||||
if not sd_url:
|
if not sd_url:
|
||||||
continue
|
continue
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
@@ -230,6 +321,8 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
|
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
|
||||||
if formats:
|
if formats:
|
||||||
break
|
break
|
||||||
|
else:
|
||||||
|
formats = []
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
src_vtt = stream.get('captions', {}).get('src-vtt')
|
src_vtt = stream.get('captions', {}).get('src-vtt')
|
||||||
@@ -255,6 +348,8 @@ class ABCIViewIE(InfoExtractor):
|
|||||||
'episode_number': int_or_none(self._search_regex(
|
'episode_number': int_or_none(self._search_regex(
|
||||||
r'\bEp\s+(\d+)\b', title, 'episode number', default=None)),
|
r'\bEp\s+(\d+)\b', title, 'episode number', default=None)),
|
||||||
'episode_id': house_number,
|
'episode_id': house_number,
|
||||||
|
'episode': self._search_regex(
|
||||||
|
r'^(?:Series\s+\d+)?\s*(?:Ep\s+\d+)?\s*(.*)$', title, 'episode', default='') or None,
|
||||||
'uploader_id': video_params.get('channel'),
|
'uploader_id': video_params.get('channel'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
@@ -275,7 +370,7 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
|||||||
'description': 'md5:93119346c24a7c322d446d8eece430ff',
|
'description': 'md5:93119346c24a7c322d446d8eece430ff',
|
||||||
'series': 'Upper Middle Bogan',
|
'series': 'Upper Middle Bogan',
|
||||||
'season': 'Series 1',
|
'season': 'Series 1',
|
||||||
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
|
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
|
||||||
},
|
},
|
||||||
'playlist_count': 8,
|
'playlist_count': 8,
|
||||||
}, {
|
}, {
|
||||||
@@ -294,17 +389,39 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
|||||||
'noplaylist': True,
|
'noplaylist': True,
|
||||||
'skip_download': 'm3u8',
|
'skip_download': 'm3u8',
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
# 'videoEpisodes' is a dict with `items` key
|
||||||
|
'url': 'https://iview.abc.net.au/show/7-30-mark-humphries-satire',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '178458-0',
|
||||||
|
'title': 'Episodes',
|
||||||
|
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
|
||||||
|
'series': '7.30 Mark Humphries Satire',
|
||||||
|
'season': 'Episodes',
|
||||||
|
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$',
|
||||||
|
},
|
||||||
|
'playlist_count': 15,
|
||||||
|
'skip': 'This program is not currently available in ABC iview',
|
||||||
|
}, {
|
||||||
|
'url': 'https://iview.abc.net.au/show/inbestigators',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '175343-1',
|
||||||
|
'title': 'Series 1',
|
||||||
|
'description': 'md5:b9976935a6450e5b78ce2a940a755685',
|
||||||
|
'series': 'The Inbestigators',
|
||||||
|
'season': 'Series 1',
|
||||||
|
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.+\.jpg',
|
||||||
|
},
|
||||||
|
'playlist_count': 17,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
show_id = self._match_id(url)
|
show_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, show_id)
|
webpage = self._download_webpage(url, show_id)
|
||||||
webpage_data = self._search_regex(
|
video_data = self._search_json(
|
||||||
r'window\.__INITIAL_STATE__\s*=\s*[\'"](.+?)[\'"]\s*;',
|
r'window\.__INITIAL_STATE__\s*=\s*[\'"]', webpage, 'initial state', show_id,
|
||||||
webpage, 'initial state')
|
transform_source=lambda x: x.encode().decode('unicode_escape'),
|
||||||
video_data = self._parse_json(
|
end_pattern=r'[\'"]\s*;')['route']['pageData']['_embedded']
|
||||||
unescapeHTML(webpage_data).encode('utf-8').decode('unicode_escape'), show_id)
|
|
||||||
video_data = video_data['route']['pageData']['_embedded']
|
|
||||||
|
|
||||||
highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl'])
|
highlight = try_get(video_data, lambda x: x['highlightVideo']['shareUrl'])
|
||||||
if not self._yes_playlist(show_id, bool(highlight), video_label='highlight video'):
|
if not self._yes_playlist(show_id, bool(highlight), video_label='highlight video'):
|
||||||
@@ -313,12 +430,14 @@ class ABCIViewShowSeriesIE(InfoExtractor):
|
|||||||
series = video_data['selectedSeries']
|
series = video_data['selectedSeries']
|
||||||
return {
|
return {
|
||||||
'_type': 'playlist',
|
'_type': 'playlist',
|
||||||
'entries': [self.url_result(episode['shareUrl'])
|
'entries': [self.url_result(episode_url, ABCIViewIE)
|
||||||
for episode in series['_embedded']['videoEpisodes']],
|
for episode_url in traverse_obj(series, (
|
||||||
|
'_embedded', 'videoEpisodes', (None, 'items'), ..., 'shareUrl', {url_or_none}))],
|
||||||
'id': series.get('id'),
|
'id': series.get('id'),
|
||||||
'title': dict_get(series, ('title', 'displaySubtitle')),
|
'title': dict_get(series, ('title', 'displaySubtitle')),
|
||||||
'description': series.get('description'),
|
'description': series.get('description'),
|
||||||
'series': dict_get(series, ('showTitle', 'displayTitle')),
|
'series': dict_get(series, ('showTitle', 'displayTitle')),
|
||||||
'season': dict_get(series, ('title', 'displaySubtitle')),
|
'season': dict_get(series, ('title', 'displaySubtitle')),
|
||||||
'thumbnail': series.get('thumbnail'),
|
'thumbnail': traverse_obj(
|
||||||
|
series, 'thumbnail', ('images', lambda _, v: v['name'] == 'seriesThumbnail', 'url'), get_all=False),
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ class AbcNewsVideoIE(AMPIE):
|
|||||||
display_id = mobj.group('display_id')
|
display_id = mobj.group('display_id')
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
info_dict = self._extract_feed_info(
|
info_dict = self._extract_feed_info(
|
||||||
'http://abcnews.go.com/video/itemfeed?id=%s' % video_id)
|
f'http://abcnews.go.com/video/itemfeed?id={video_id}')
|
||||||
info_dict.update({
|
info_dict.update({
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'display_id': display_id,
|
'display_id': display_id,
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
dict_get,
|
dict_get,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
@@ -57,11 +56,11 @@ class ABCOTVSIE(InfoExtractor):
|
|||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
'https://api.abcotvs.com/v2/content', display_id, query={
|
'https://api.abcotvs.com/v2/content', display_id, query={
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'key': 'otv.web.%s.story' % station,
|
'key': f'otv.web.{station}.story',
|
||||||
'station': station,
|
'station': station,
|
||||||
})['data']
|
})['data']
|
||||||
video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data
|
video = try_get(data, lambda x: x['featuredMedia']['video'], dict) or data
|
||||||
video_id = compat_str(dict_get(video, ('id', 'publishedKey'), video_id))
|
video_id = str(dict_get(video, ('id', 'publishedKey'), video_id))
|
||||||
title = video.get('title') or video['linkText']
|
title = video.get('title') or video['linkText']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|||||||
@@ -6,53 +6,54 @@ import hmac
|
|||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import struct
|
|
||||||
import time
|
import time
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import urllib.request
|
|
||||||
import urllib.response
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_ecb_decrypt
|
from ..aes import aes_ecb_decrypt
|
||||||
|
from ..networking import RequestHandler, Response
|
||||||
|
from ..networking.exceptions import TransportError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
bytes_to_intlist,
|
OnDemandPagedList,
|
||||||
decode_base_n,
|
decode_base_n,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
|
||||||
OnDemandPagedList,
|
|
||||||
time_seconds,
|
time_seconds,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
|
update_url,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def add_opener(ydl, handler): # FIXME: Create proper API in .networking
|
class AbemaLicenseRH(RequestHandler):
|
||||||
"""Add a handler for opening URLs, like _download_webpage"""
|
_SUPPORTED_URL_SCHEMES = ('abematv-license',)
|
||||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
_SUPPORTED_PROXY_SCHEMES = None
|
||||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
_SUPPORTED_FEATURES = None
|
||||||
rh = ydl._request_director.handlers['Urllib']
|
RH_NAME = 'abematv_license'
|
||||||
if 'abematv-license' in rh._SUPPORTED_URL_SCHEMES:
|
|
||||||
return
|
|
||||||
opener = rh._get_instance(cookiejar=ydl.cookiejar, proxies=ydl.proxies)
|
|
||||||
assert isinstance(opener, urllib.request.OpenerDirector)
|
|
||||||
opener.add_handler(handler)
|
|
||||||
rh._SUPPORTED_URL_SCHEMES = (*rh._SUPPORTED_URL_SCHEMES, 'abematv-license')
|
|
||||||
|
|
||||||
|
_STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||||
|
_HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
|
||||||
|
|
||||||
class AbemaLicenseHandler(urllib.request.BaseHandler):
|
def __init__(self, *, ie: 'AbemaTVIE', **kwargs):
|
||||||
handler_order = 499
|
super().__init__(**kwargs)
|
||||||
STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
|
||||||
HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
|
|
||||||
|
|
||||||
def __init__(self, ie: 'AbemaTVIE'):
|
|
||||||
# the protocol that this should really handle is 'abematv-license://'
|
|
||||||
# abematv_license_open is just a placeholder for development purposes
|
|
||||||
# ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510
|
|
||||||
setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open'))
|
|
||||||
self.ie = ie
|
self.ie = ie
|
||||||
|
|
||||||
|
def _send(self, request):
|
||||||
|
url = request.url
|
||||||
|
ticket = urllib.parse.urlparse(url).netloc
|
||||||
|
|
||||||
|
try:
|
||||||
|
response_data = self._get_videokey_from_ticket(ticket)
|
||||||
|
except ExtractorError as e:
|
||||||
|
raise TransportError(cause=e.cause) from e
|
||||||
|
except (IndexError, KeyError, TypeError) as e:
|
||||||
|
raise TransportError(cause=repr(e)) from e
|
||||||
|
|
||||||
|
return Response(
|
||||||
|
io.BytesIO(response_data), url,
|
||||||
|
headers={'Content-Length': str(len(response_data))})
|
||||||
|
|
||||||
def _get_videokey_from_ticket(self, ticket):
|
def _get_videokey_from_ticket(self, ticket):
|
||||||
to_show = self.ie.get_param('verbose', False)
|
to_show = self.ie.get_param('verbose', False)
|
||||||
media_token = self.ie._get_media_token(to_show=to_show)
|
media_token = self.ie._get_media_token(to_show=to_show)
|
||||||
@@ -62,33 +63,27 @@ class AbemaLicenseHandler(urllib.request.BaseHandler):
|
|||||||
query={'t': media_token},
|
query={'t': media_token},
|
||||||
data=json.dumps({
|
data=json.dumps({
|
||||||
'kv': 'a',
|
'kv': 'a',
|
||||||
'lt': ticket
|
'lt': ticket,
|
||||||
}).encode('utf-8'),
|
}).encode(),
|
||||||
headers={
|
headers={
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
})
|
})
|
||||||
|
|
||||||
res = decode_base_n(license_response['k'], table=self.STRTABLE)
|
res = decode_base_n(license_response['k'], table=self._STRTABLE)
|
||||||
encvideokey = bytes_to_intlist(struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff))
|
encvideokey = list(res.to_bytes(16, 'big'))
|
||||||
|
|
||||||
h = hmac.new(
|
h = hmac.new(
|
||||||
binascii.unhexlify(self.HKEY),
|
binascii.unhexlify(self._HKEY),
|
||||||
(license_response['cid'] + self.ie._DEVICE_ID).encode('utf-8'),
|
(license_response['cid'] + self.ie._DEVICE_ID).encode(),
|
||||||
digestmod=hashlib.sha256)
|
digestmod=hashlib.sha256)
|
||||||
enckey = bytes_to_intlist(h.digest())
|
enckey = list(h.digest())
|
||||||
|
|
||||||
return intlist_to_bytes(aes_ecb_decrypt(encvideokey, enckey))
|
return bytes(aes_ecb_decrypt(encvideokey, enckey))
|
||||||
|
|
||||||
def abematv_license_open(self, url):
|
|
||||||
url = url.get_full_url() if isinstance(url, urllib.request.Request) else url
|
|
||||||
ticket = urllib.parse.urlparse(url).netloc
|
|
||||||
response_data = self._get_videokey_from_ticket(ticket)
|
|
||||||
return urllib.response.addinfourl(io.BytesIO(response_data), headers={
|
|
||||||
'Content-Length': str(len(response_data)),
|
|
||||||
}, url=url, code=200)
|
|
||||||
|
|
||||||
|
|
||||||
class AbemaTVBaseIE(InfoExtractor):
|
class AbemaTVBaseIE(InfoExtractor):
|
||||||
|
_NETRC_MACHINE = 'abematv'
|
||||||
|
|
||||||
_USERTOKEN = None
|
_USERTOKEN = None
|
||||||
_DEVICE_ID = None
|
_DEVICE_ID = None
|
||||||
_MEDIATOKEN = None
|
_MEDIATOKEN = None
|
||||||
@@ -97,11 +92,11 @@ class AbemaTVBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _generate_aks(cls, deviceid):
|
def _generate_aks(cls, deviceid):
|
||||||
deviceid = deviceid.encode('utf-8')
|
deviceid = deviceid.encode()
|
||||||
# add 1 hour and then drop minute and secs
|
# add 1 hour and then drop minute and secs
|
||||||
ts_1hour = int((time_seconds() // 3600 + 1) * 3600)
|
ts_1hour = int((time_seconds() // 3600 + 1) * 3600)
|
||||||
time_struct = time.gmtime(ts_1hour)
|
time_struct = time.gmtime(ts_1hour)
|
||||||
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
ts_1hour_str = str(ts_1hour).encode()
|
||||||
|
|
||||||
tmp = None
|
tmp = None
|
||||||
|
|
||||||
@@ -113,7 +108,7 @@ class AbemaTVBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
def mix_tmp(count):
|
def mix_tmp(count):
|
||||||
nonlocal tmp
|
nonlocal tmp
|
||||||
for i in range(count):
|
for _ in range(count):
|
||||||
mix_once(tmp)
|
mix_once(tmp)
|
||||||
|
|
||||||
def mix_twist(nonce):
|
def mix_twist(nonce):
|
||||||
@@ -133,11 +128,15 @@ class AbemaTVBaseIE(InfoExtractor):
|
|||||||
if self._USERTOKEN:
|
if self._USERTOKEN:
|
||||||
return self._USERTOKEN
|
return self._USERTOKEN
|
||||||
|
|
||||||
|
self._downloader._request_director.add_handler(AbemaLicenseRH(ie=self, logger=None))
|
||||||
|
|
||||||
username, _ = self._get_login_info()
|
username, _ = self._get_login_info()
|
||||||
AbemaTVBaseIE._USERTOKEN = username and self.cache.load(self._NETRC_MACHINE, username)
|
auth_cache = username and self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19')
|
||||||
|
AbemaTVBaseIE._USERTOKEN = auth_cache and auth_cache.get('usertoken')
|
||||||
if AbemaTVBaseIE._USERTOKEN:
|
if AbemaTVBaseIE._USERTOKEN:
|
||||||
# try authentication with locally stored token
|
# try authentication with locally stored token
|
||||||
try:
|
try:
|
||||||
|
AbemaTVBaseIE._DEVICE_ID = auth_cache.get('device_id')
|
||||||
self._get_media_token(True)
|
self._get_media_token(True)
|
||||||
return
|
return
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
@@ -150,13 +149,12 @@ class AbemaTVBaseIE(InfoExtractor):
|
|||||||
data=json.dumps({
|
data=json.dumps({
|
||||||
'deviceId': self._DEVICE_ID,
|
'deviceId': self._DEVICE_ID,
|
||||||
'applicationKeySecret': aks,
|
'applicationKeySecret': aks,
|
||||||
}).encode('utf-8'),
|
}).encode(),
|
||||||
headers={
|
headers={
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
})
|
})
|
||||||
AbemaTVBaseIE._USERTOKEN = user_data['token']
|
AbemaTVBaseIE._USERTOKEN = user_data['token']
|
||||||
|
|
||||||
add_opener(self._downloader, AbemaLicenseHandler(self))
|
|
||||||
return self._USERTOKEN
|
return self._USERTOKEN
|
||||||
|
|
||||||
def _get_media_token(self, invalidate=False, to_show=True):
|
def _get_media_token(self, invalidate=False, to_show=True):
|
||||||
@@ -171,13 +169,44 @@ class AbemaTVBaseIE(InfoExtractor):
|
|||||||
'osLang': 'ja_JP',
|
'osLang': 'ja_JP',
|
||||||
'osTimezone': 'Asia/Tokyo',
|
'osTimezone': 'Asia/Tokyo',
|
||||||
'appId': 'tv.abema',
|
'appId': 'tv.abema',
|
||||||
'appVersion': '3.27.1'
|
'appVersion': '3.27.1',
|
||||||
}, headers={
|
}, headers={
|
||||||
'Authorization': f'bearer {self._get_device_token()}',
|
'Authorization': f'bearer {self._get_device_token()}',
|
||||||
})['token']
|
})['token']
|
||||||
|
|
||||||
return self._MEDIATOKEN
|
return self._MEDIATOKEN
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
self._get_device_token()
|
||||||
|
if self.cache.load(self._NETRC_MACHINE, username, min_ver='2024.01.19') and self._get_media_token():
|
||||||
|
self.write_debug('Skipping logging in')
|
||||||
|
return
|
||||||
|
|
||||||
|
if '@' in username: # don't strictly check if it's email address or not
|
||||||
|
ep, method = 'user/email', 'email'
|
||||||
|
else:
|
||||||
|
ep, method = 'oneTimePassword', 'userId'
|
||||||
|
|
||||||
|
login_response = self._download_json(
|
||||||
|
f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
|
||||||
|
data=json.dumps({
|
||||||
|
method: username,
|
||||||
|
'password': password,
|
||||||
|
}).encode(), headers={
|
||||||
|
'Authorization': f'bearer {self._get_device_token()}',
|
||||||
|
'Origin': 'https://abema.tv',
|
||||||
|
'Referer': 'https://abema.tv/',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
})
|
||||||
|
|
||||||
|
AbemaTVBaseIE._USERTOKEN = login_response['token']
|
||||||
|
self._get_media_token(True)
|
||||||
|
auth_cache = {
|
||||||
|
'device_id': AbemaTVBaseIE._DEVICE_ID,
|
||||||
|
'usertoken': AbemaTVBaseIE._USERTOKEN,
|
||||||
|
}
|
||||||
|
self.cache.store(self._NETRC_MACHINE, username, auth_cache)
|
||||||
|
|
||||||
def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'):
|
def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'):
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
f'https://api.abema.io/{endpoint}', video_id, query=query or {},
|
f'https://api.abema.io/{endpoint}', video_id, query=query or {},
|
||||||
@@ -201,14 +230,14 @@ class AbemaTVBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
class AbemaTVIE(AbemaTVBaseIE):
|
class AbemaTVIE(AbemaTVBaseIE):
|
||||||
_VALID_URL = r'https?://abema\.tv/(?P<type>now-on-air|video/episode|channels/.+?/slots)/(?P<id>[^?/]+)'
|
_VALID_URL = r'https?://abema\.tv/(?P<type>now-on-air|video/episode|channels/.+?/slots)/(?P<id>[^?/]+)'
|
||||||
_NETRC_MACHINE = 'abematv'
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://abema.tv/video/episode/194-25_s2_p1',
|
'url': 'https://abema.tv/video/episode/194-25_s2_p1',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '194-25_s2_p1',
|
'id': '194-25_s2_p1',
|
||||||
'title': '第1話 「チーズケーキ」 「モーニング再び」',
|
'title': '第1話 「チーズケーキ」 「モーニング再び」',
|
||||||
'series': '異世界食堂2',
|
'series': '異世界食堂2',
|
||||||
'series_number': 2,
|
'season': 'シーズン2',
|
||||||
|
'season_number': 2,
|
||||||
'episode': '第1話 「チーズケーキ」 「モーニング再び」',
|
'episode': '第1話 「チーズケーキ」 「モーニング再び」',
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
},
|
},
|
||||||
@@ -220,7 +249,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series': 'ゆるキャン△ SEASON2',
|
'series': 'ゆるキャン△ SEASON2',
|
||||||
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】',
|
||||||
'series_number': 2,
|
'season_number': 2,
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
'description': 'md5:9c5a3172ae763278f9303922f0ea5b17',
|
||||||
},
|
},
|
||||||
@@ -249,33 +278,6 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
}]
|
}]
|
||||||
_TIMETABLE = None
|
_TIMETABLE = None
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
|
||||||
self._get_device_token()
|
|
||||||
if self.cache.load(self._NETRC_MACHINE, username) and self._get_media_token():
|
|
||||||
self.write_debug('Skipping logging in')
|
|
||||||
return
|
|
||||||
|
|
||||||
if '@' in username: # don't strictly check if it's email address or not
|
|
||||||
ep, method = 'user/email', 'email'
|
|
||||||
else:
|
|
||||||
ep, method = 'oneTimePassword', 'userId'
|
|
||||||
|
|
||||||
login_response = self._download_json(
|
|
||||||
f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in',
|
|
||||||
data=json.dumps({
|
|
||||||
method: username,
|
|
||||||
'password': password
|
|
||||||
}).encode('utf-8'), headers={
|
|
||||||
'Authorization': f'bearer {self._get_device_token()}',
|
|
||||||
'Origin': 'https://abema.tv',
|
|
||||||
'Referer': 'https://abema.tv/',
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
})
|
|
||||||
|
|
||||||
AbemaTVBaseIE._USERTOKEN = login_response['token']
|
|
||||||
self._get_media_token(True)
|
|
||||||
self.cache.store(self._NETRC_MACHINE, username, AbemaTVBaseIE._USERTOKEN)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
# starting download using infojson from this extractor is undefined behavior,
|
# starting download using infojson from this extractor is undefined behavior,
|
||||||
# and never be fixed in the future; you must trigger downloads by directly specifying URL.
|
# and never be fixed in the future; you must trigger downloads by directly specifying URL.
|
||||||
@@ -331,7 +333,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
|
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
(r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
|
(r'<p\s+class="com-video-EpisodeDetailsBlock__content"><span\s+class=".+?">(.+?)</span></p><div',
|
||||||
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div',),
|
r'<span\s+class=".+?SlotSummary.+?">(.+?)</span></div><div'),
|
||||||
webpage, 'description', default=None, group=1)
|
webpage, 'description', default=None, group=1)
|
||||||
if not description:
|
if not description:
|
||||||
og_desc = self._html_search_meta(
|
og_desc = self._html_search_meta(
|
||||||
@@ -344,17 +346,18 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
)?
|
)?
|
||||||
''', r'\1', og_desc)
|
''', r'\1', og_desc)
|
||||||
|
|
||||||
# canonical URL may contain series and episode number
|
# canonical URL may contain season and episode number
|
||||||
mobj = re.search(r's(\d+)_p(\d+)$', canonical_url)
|
mobj = re.search(r's(\d+)_p(\d+)$', canonical_url)
|
||||||
if mobj:
|
if mobj:
|
||||||
seri = int_or_none(mobj.group(1), default=float('inf'))
|
seri = int_or_none(mobj.group(1), default=float('inf'))
|
||||||
epis = int_or_none(mobj.group(2), default=float('inf'))
|
epis = int_or_none(mobj.group(2), default=float('inf'))
|
||||||
info['series_number'] = seri if seri < 100 else None
|
info['season_number'] = seri if seri < 100 else None
|
||||||
# some anime like Detective Conan (though not available in AbemaTV)
|
# some anime like Detective Conan (though not available in AbemaTV)
|
||||||
# has more than 1000 episodes (1026 as of 2021/11/15)
|
# has more than 1000 episodes (1026 as of 2021/11/15)
|
||||||
info['episode_number'] = epis if epis < 2000 else None
|
info['episode_number'] = epis if epis < 2000 else None
|
||||||
|
|
||||||
is_live, m3u8_url = False, None
|
is_live, m3u8_url = False, None
|
||||||
|
availability = 'public'
|
||||||
if video_type == 'now-on-air':
|
if video_type == 'now-on-air':
|
||||||
is_live = True
|
is_live = True
|
||||||
channel_url = 'https://api.abema.io/v1/channels'
|
channel_url = 'https://api.abema.io/v1/channels'
|
||||||
@@ -372,13 +375,13 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
f'https://api.abema.io/v1/video/programs/{video_id}', video_id,
|
f'https://api.abema.io/v1/video/programs/{video_id}', video_id,
|
||||||
note='Checking playability',
|
note='Checking playability',
|
||||||
headers=headers)
|
headers=headers)
|
||||||
ondemand_types = traverse_obj(api_response, ('terms', ..., 'onDemandType'))
|
if not traverse_obj(api_response, ('label', 'free', {bool})):
|
||||||
if 3 not in ondemand_types:
|
|
||||||
# cannot acquire decryption key for these streams
|
# cannot acquire decryption key for these streams
|
||||||
self.report_warning('This is a premium-only stream')
|
self.report_warning('This is a premium-only stream')
|
||||||
|
availability = 'premium_only'
|
||||||
info.update(traverse_obj(api_response, {
|
info.update(traverse_obj(api_response, {
|
||||||
'series': ('series', 'title'),
|
'series': ('series', 'title'),
|
||||||
'season': ('season', 'title'),
|
'season': ('season', 'name'),
|
||||||
'season_number': ('season', 'sequence'),
|
'season_number': ('season', 'sequence'),
|
||||||
'episode_number': ('episode', 'number'),
|
'episode_number': ('episode', 'number'),
|
||||||
}))
|
}))
|
||||||
@@ -395,6 +398,7 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
headers=headers)
|
headers=headers)
|
||||||
if not traverse_obj(api_response, ('slot', 'flags', 'timeshiftFree'), default=False):
|
if not traverse_obj(api_response, ('slot', 'flags', 'timeshiftFree'), default=False):
|
||||||
self.report_warning('This is a premium-only stream')
|
self.report_warning('This is a premium-only stream')
|
||||||
|
availability = 'premium_only'
|
||||||
|
|
||||||
m3u8_url = f'https://vod-abematv.akamaized.net/slot/{video_id}/playlist.m3u8'
|
m3u8_url = f'https://vod-abematv.akamaized.net/slot/{video_id}/playlist.m3u8'
|
||||||
else:
|
else:
|
||||||
@@ -412,19 +416,25 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
'description': description,
|
'description': description,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'is_live': is_live,
|
'is_live': is_live,
|
||||||
|
'availability': availability,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if thumbnail := update_url(self._og_search_thumbnail(webpage, default=''), query=None):
|
||||||
|
info['thumbnails'] = [{'url': thumbnail}]
|
||||||
|
|
||||||
return info
|
return info
|
||||||
|
|
||||||
|
|
||||||
class AbemaTVTitleIE(AbemaTVBaseIE):
|
class AbemaTVTitleIE(AbemaTVBaseIE):
|
||||||
_VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
|
_VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/#]+)/?(?:\?(?:[^#]+&)?s=(?P<season>[^&#]+))?'
|
||||||
_PAGE_SIZE = 25
|
_PAGE_SIZE = 25
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://abema.tv/video/title/90-1597',
|
'url': 'https://abema.tv/video/title/90-1887',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '90-1597',
|
'id': '90-1887',
|
||||||
'title': 'シャッフルアイランド',
|
'title': 'シャッフルアイランド',
|
||||||
|
'description': 'md5:61b2425308f41a5282a926edda66f178',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 2,
|
'playlist_mincount': 2,
|
||||||
}, {
|
}, {
|
||||||
@@ -432,41 +442,54 @@ class AbemaTVTitleIE(AbemaTVBaseIE):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '193-132',
|
'id': '193-132',
|
||||||
'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
|
'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
|
||||||
|
'description': 'md5:9b59493d1f3a792bafbc7319258e7af8',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 16,
|
'playlist_mincount': 16,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://abema.tv/video/title/25-102',
|
'url': 'https://abema.tv/video/title/25-1nzan-whrxe',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '25-102',
|
'id': '25-1nzan-whrxe',
|
||||||
'title': 'ソードアート・オンライン アリシゼーション',
|
'title': 'ソードアート・オンライン',
|
||||||
|
'description': 'md5:c094904052322e6978495532bdbf06e6',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 24,
|
'playlist_mincount': 25,
|
||||||
|
}, {
|
||||||
|
'url': 'https://abema.tv/video/title/26-2mzbynr-cph?s=26-2mzbynr-cph_s40',
|
||||||
|
'info_dict': {
|
||||||
|
'title': '〈物語〉シリーズ',
|
||||||
|
'id': '26-2mzbynr-cph',
|
||||||
|
'description': 'md5:e67873de1c88f360af1f0a4b84847a52',
|
||||||
|
},
|
||||||
|
'playlist_count': 59,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _fetch_page(self, playlist_id, series_version, page):
|
def _fetch_page(self, playlist_id, series_version, season_id, page):
|
||||||
programs = self._call_api(
|
query = {
|
||||||
f'v1/video/series/{playlist_id}/programs', playlist_id,
|
|
||||||
note=f'Downloading page {page + 1}',
|
|
||||||
query={
|
|
||||||
'seriesVersion': series_version,
|
'seriesVersion': series_version,
|
||||||
'offset': str(page * self._PAGE_SIZE),
|
'offset': str(page * self._PAGE_SIZE),
|
||||||
'order': 'seq',
|
'order': 'seq',
|
||||||
'limit': str(self._PAGE_SIZE),
|
'limit': str(self._PAGE_SIZE),
|
||||||
})
|
}
|
||||||
|
if season_id:
|
||||||
|
query['seasonId'] = season_id
|
||||||
|
programs = self._call_api(
|
||||||
|
f'v1/video/series/{playlist_id}/programs', playlist_id,
|
||||||
|
note=f'Downloading page {page + 1}',
|
||||||
|
query=query)
|
||||||
yield from (
|
yield from (
|
||||||
self.url_result(f'https://abema.tv/video/episode/{x}')
|
self.url_result(f'https://abema.tv/video/episode/{x}')
|
||||||
for x in traverse_obj(programs, ('programs', ..., 'id')))
|
for x in traverse_obj(programs, ('programs', ..., 'id')))
|
||||||
|
|
||||||
def _entries(self, playlist_id, series_version):
|
def _entries(self, playlist_id, series_version, season_id):
|
||||||
return OnDemandPagedList(
|
return OnDemandPagedList(
|
||||||
functools.partial(self._fetch_page, playlist_id, series_version),
|
functools.partial(self._fetch_page, playlist_id, series_version, season_id),
|
||||||
self._PAGE_SIZE)
|
self._PAGE_SIZE)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
playlist_id = self._match_id(url)
|
playlist_id, season_id = self._match_valid_url(url).group('id', 'season')
|
||||||
series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id)
|
series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id)
|
||||||
|
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
self._entries(playlist_id, series_info['version']), playlist_id=playlist_id,
|
self._entries(playlist_id, series_info['version'], season_id), playlist_id=playlist_id,
|
||||||
playlist_title=series_info.get('title'),
|
playlist_title=series_info.get('title'),
|
||||||
playlist_description=series_info.get('content'))
|
playlist_description=series_info.get('content'))
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
|
|
||||||
class AcademicEarthCourseIE(InfoExtractor):
|
class AcademicEarthCourseIE(InfoExtractor):
|
||||||
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
|
_VALID_URL = r'https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
|
||||||
IE_NAME = 'AcademicEarth:Course'
|
IE_NAME = 'AcademicEarth:Course'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'http://academicearth.org/playlists/laws-of-nature/',
|
'url': 'http://academicearth.org/playlists/laws-of-nature/',
|
||||||
|
|||||||
@@ -43,14 +43,14 @@ class ACastIE(ACastBaseIE):
|
|||||||
_VALID_URL = r'''(?x:
|
_VALID_URL = r'''(?x:
|
||||||
https?://
|
https?://
|
||||||
(?:
|
(?:
|
||||||
(?:(?:embed|www)\.)?acast\.com/|
|
(?:(?:embed|www|shows)\.)?acast\.com/|
|
||||||
play\.acast\.com/s/
|
play\.acast\.com/s/
|
||||||
)
|
)
|
||||||
(?P<channel>[^/]+)/(?P<id>[^/#?"]+)
|
(?P<channel>[^/?#]+)/(?:episodes/)?(?P<id>[^/#?"]+)
|
||||||
)'''
|
)'''
|
||||||
_EMBED_REGEX = [rf'(?x)<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL})']
|
_EMBED_REGEX = [rf'(?x)<iframe[^>]+\bsrc=[\'"](?P<url>{_VALID_URL})']
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
'url': 'https://shows.acast.com/sparpodcast/episodes/2.raggarmordet-rosterurdetforflutna',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
@@ -59,7 +59,7 @@ class ACastIE(ACastBaseIE):
|
|||||||
'timestamp': 1477346700,
|
'timestamp': 1477346700,
|
||||||
'upload_date': '20161024',
|
'upload_date': '20161024',
|
||||||
'duration': 2766,
|
'duration': 2766,
|
||||||
'creator': 'Third Ear Studio',
|
'creators': ['Third Ear Studio'],
|
||||||
'series': 'Spår',
|
'series': 'Spår',
|
||||||
'episode': '2. Raggarmordet - Röster ur det förflutna',
|
'episode': '2. Raggarmordet - Röster ur det förflutna',
|
||||||
'thumbnail': 'https://assets.pippa.io/shows/616ebe1886d7b1398620b943/616ebe33c7e6e70013cae7da.jpg',
|
'thumbnail': 'https://assets.pippa.io/shows/616ebe1886d7b1398620b943/616ebe33c7e6e70013cae7da.jpg',
|
||||||
@@ -67,13 +67,16 @@ class ACastIE(ACastBaseIE):
|
|||||||
'display_id': '2.raggarmordet-rosterurdetforflutna',
|
'display_id': '2.raggarmordet-rosterurdetforflutna',
|
||||||
'season_number': 4,
|
'season_number': 4,
|
||||||
'season': 'Season 4',
|
'season': 'Season 4',
|
||||||
}
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2',
|
'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
||||||
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9',
|
'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@@ -93,13 +96,13 @@ class ACastIE(ACastBaseIE):
|
|||||||
'series': 'Democracy Sausage with Mark Kenny',
|
'series': 'Democracy Sausage with Mark Kenny',
|
||||||
'timestamp': 1684826362,
|
'timestamp': 1684826362,
|
||||||
'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16',
|
'description': 'md5:feabe1fc5004c78ee59c84a46bf4ba16',
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
channel, display_id = self._match_valid_url(url).groups()
|
channel, display_id = self._match_valid_url(url).groups()
|
||||||
episode = self._call_api(
|
episode = self._call_api(
|
||||||
'%s/episodes/%s' % (channel, display_id),
|
f'{channel}/episodes/{display_id}',
|
||||||
display_id, {'showInfo': 'true'})
|
display_id, {'showInfo': 'true'})
|
||||||
return self._extract_episode(
|
return self._extract_episode(
|
||||||
episode, self._extract_show_info(episode.get('show') or {}))
|
episode, self._extract_show_info(episode.get('show') or {}))
|
||||||
@@ -110,7 +113,7 @@ class ACastChannelIE(ACastBaseIE):
|
|||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
https?://
|
https?://
|
||||||
(?:
|
(?:
|
||||||
(?:www\.)?acast\.com/|
|
(?:(?:www|shows)\.)?acast\.com/|
|
||||||
play\.acast\.com/s/
|
play\.acast\.com/s/
|
||||||
)
|
)
|
||||||
(?P<id>[^/#?]+)
|
(?P<id>[^/#?]+)
|
||||||
@@ -120,17 +123,20 @@ class ACastChannelIE(ACastBaseIE):
|
|||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '4efc5294-5385-4847-98bd-519799ce5786',
|
'id': '4efc5294-5385-4847-98bd-519799ce5786',
|
||||||
'title': 'Today in Focus',
|
'title': 'Today in Focus',
|
||||||
'description': 'md5:c09ce28c91002ce4ffce71d6504abaae',
|
'description': 'md5:feca253de9947634605080cd9eeea2bf',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 200,
|
'playlist_mincount': 200,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://play.acast.com/s/ft-banking-weekly',
|
'url': 'http://play.acast.com/s/ft-banking-weekly',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://shows.acast.com/sparpodcast',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
return False if ACastIE.suitable(url) else super().suitable(url)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
show_slug = self._match_id(url)
|
show_slug = self._match_id(url)
|
||||||
|
|||||||
@@ -3,9 +3,10 @@ from ..utils import (
|
|||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
traverse_obj,
|
|
||||||
parse_codecs,
|
parse_codecs,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
|
str_or_none,
|
||||||
|
traverse_obj,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@@ -24,7 +25,7 @@ class AcFunVideoBaseIE(InfoExtractor):
|
|||||||
'width': int_or_none(video.get('width')),
|
'width': int_or_none(video.get('width')),
|
||||||
'height': int_or_none(video.get('height')),
|
'height': int_or_none(video.get('height')),
|
||||||
'tbr': float_or_none(video.get('avgBitrate')),
|
'tbr': float_or_none(video.get('avgBitrate')),
|
||||||
**parse_codecs(video.get('codecs', ''))
|
**parse_codecs(video.get('codecs', '')),
|
||||||
})
|
})
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@@ -76,7 +77,7 @@ class AcFunVideoIE(AcFunVideoBaseIE):
|
|||||||
'comment_count': int,
|
'comment_count': int,
|
||||||
'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
|
'thumbnail': r're:^https?://.*\.(jpg|jpeg)',
|
||||||
'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17',
|
'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17',
|
||||||
}
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@@ -129,7 +130,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
|||||||
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
'title': '红孩儿之趴趴蛙寻石记 第5话 ',
|
||||||
'duration': 760.0,
|
'duration': 760.0,
|
||||||
'season': '红孩儿之趴趴蛙寻石记',
|
'season': '红孩儿之趴趴蛙寻石记',
|
||||||
'season_id': 5023171,
|
'season_id': '5023171',
|
||||||
'season_number': 1, # series has only 1 season
|
'season_number': 1, # series has only 1 season
|
||||||
'episode': 'Episode 5',
|
'episode': 'Episode 5',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
@@ -146,7 +147,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
|||||||
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
'title': '叽歪老表(第二季) 第5话 坚不可摧',
|
||||||
'season': '叽歪老表(第二季)',
|
'season': '叽歪老表(第二季)',
|
||||||
'season_number': 2,
|
'season_number': 2,
|
||||||
'season_id': 6065485,
|
'season_id': '6065485',
|
||||||
'episode': '坚不可摧',
|
'episode': '坚不可摧',
|
||||||
'episode_number': 5,
|
'episode_number': 5,
|
||||||
'upload_date': '20220324',
|
'upload_date': '20220324',
|
||||||
@@ -191,7 +192,7 @@ class AcFunBangumiIE(AcFunVideoBaseIE):
|
|||||||
'title': json_bangumi_data.get('showTitle'),
|
'title': json_bangumi_data.get('showTitle'),
|
||||||
'thumbnail': json_bangumi_data.get('image'),
|
'thumbnail': json_bangumi_data.get('image'),
|
||||||
'season': json_bangumi_data.get('bangumiTitle'),
|
'season': json_bangumi_data.get('bangumiTitle'),
|
||||||
'season_id': season_id,
|
'season_id': str_or_none(season_id),
|
||||||
'season_number': season_number,
|
'season_number': season_number,
|
||||||
'episode': json_bangumi_data.get('title'),
|
'episode': json_bangumi_data.get('title'),
|
||||||
'episode_number': episode_number,
|
'episode_number': episode_number,
|
||||||
|
|||||||
@@ -3,33 +3,53 @@ import binascii
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||||
from ..compat import compat_b64decode
|
|
||||||
from ..networking.exceptions import HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ass_subtitles_timecode,
|
|
||||||
bytes_to_intlist,
|
|
||||||
bytes_to_long,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
ass_subtitles_timecode,
|
||||||
|
bytes_to_long,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
join_nonempty,
|
||||||
long_to_bytes,
|
long_to_bytes,
|
||||||
|
parse_iso8601,
|
||||||
pkcs1pad,
|
pkcs1pad,
|
||||||
|
str_or_none,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class ADNIE(InfoExtractor):
|
class ADNBaseIE(InfoExtractor):
|
||||||
IE_DESC = 'Animation Digital Network'
|
IE_DESC = 'Animation Digital Network'
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?:animation|anime)digitalnetwork\.fr/video/[^/]+/(?P<id>\d+)'
|
_NETRC_MACHINE = 'animationdigitalnetwork'
|
||||||
|
_BASE = 'animationdigitalnetwork.fr'
|
||||||
|
_API_BASE_URL = f'https://gw.api.{_BASE}/'
|
||||||
|
_PLAYER_BASE_URL = f'{_API_BASE_URL}player/'
|
||||||
|
_HEADERS = {}
|
||||||
|
_LOGIN_ERR_MESSAGE = 'Unable to log in'
|
||||||
|
_RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)
|
||||||
|
_POS_ALIGN_MAP = {
|
||||||
|
'start': 1,
|
||||||
|
'end': 3,
|
||||||
|
}
|
||||||
|
_LINE_ALIGN_MAP = {
|
||||||
|
'middle': 8,
|
||||||
|
'end': 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ADNIE(ADNBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?animationdigitalnetwork\.com/(?:(?P<lang>de)/)?video/[^/?#]+/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://animationdigitalnetwork.fr/video/fruits-basket/9841-episode-1-a-ce-soir',
|
'url': 'https://animationdigitalnetwork.com/video/558-fruits-basket/9841-episode-1-a-ce-soir',
|
||||||
'md5': '1c9ef066ceb302c86f80c2b371615261',
|
'md5': '1c9ef066ceb302c86f80c2b371615261',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '9841',
|
'id': '9841',
|
||||||
@@ -44,29 +64,32 @@ class ADNIE(InfoExtractor):
|
|||||||
'season_number': 1,
|
'season_number': 1,
|
||||||
'episode': 'À ce soir !',
|
'episode': 'À ce soir !',
|
||||||
'episode_number': 1,
|
'episode_number': 1,
|
||||||
|
'thumbnail': str,
|
||||||
|
'season': 'Season 1',
|
||||||
},
|
},
|
||||||
'skip': 'Only available in region (FR, ...)',
|
'skip': 'Only available in French and German speaking Europe',
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites',
|
'url': 'https://animationdigitalnetwork.com/de/video/973-the-eminence-in-shadow/23550-folge-1',
|
||||||
'only_matching': True,
|
'md5': '5c5651bf5791fa6fcd7906012b9d94e8',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '23550',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'episode_number': 1,
|
||||||
|
'duration': 1417,
|
||||||
|
'release_date': '20231004',
|
||||||
|
'series': 'The Eminence in Shadow',
|
||||||
|
'season_number': 2,
|
||||||
|
'episode': str,
|
||||||
|
'title': str,
|
||||||
|
'thumbnail': str,
|
||||||
|
'season': 'Season 2',
|
||||||
|
'comment_count': int,
|
||||||
|
'average_rating': float,
|
||||||
|
'description': str,
|
||||||
|
},
|
||||||
|
# 'skip': 'Only available in French and German speaking Europe',
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_NETRC_MACHINE = 'animationdigitalnetwork'
|
|
||||||
_BASE = 'animationdigitalnetwork.fr'
|
|
||||||
_API_BASE_URL = 'https://gw.api.' + _BASE + '/'
|
|
||||||
_PLAYER_BASE_URL = _API_BASE_URL + 'player/'
|
|
||||||
_HEADERS = {}
|
|
||||||
_LOGIN_ERR_MESSAGE = 'Unable to log in'
|
|
||||||
_RSA_KEY = (0x9B42B08905199A5CCE2026274399CA560ECB209EE9878A708B1C0812E1BB8CB5D1FB7441861147C1A1F2F3A0476DD63A9CAC20D3E983613346850AA6CB38F16DC7D720FD7D86FC6E5B3D5BBC72E14CD0BF9E869F2CEA2CCAD648F1DCE38F1FF916CEFB2D339B64AA0264372344BC775E265E8A852F88144AB0BD9AA06C1A4ABB, 65537)
|
|
||||||
_POS_ALIGN_MAP = {
|
|
||||||
'start': 1,
|
|
||||||
'end': 3,
|
|
||||||
}
|
|
||||||
_LINE_ALIGN_MAP = {
|
|
||||||
'middle': 8,
|
|
||||||
'end': 4,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _get_subtitles(self, sub_url, video_id):
|
def _get_subtitles(self, sub_url, video_id):
|
||||||
if not sub_url:
|
if not sub_url:
|
||||||
return None
|
return None
|
||||||
@@ -83,9 +106,9 @@ class ADNIE(InfoExtractor):
|
|||||||
|
|
||||||
# http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
# http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js
|
||||||
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes(
|
||||||
compat_b64decode(enc_subtitles[24:]),
|
base64.b64decode(enc_subtitles[24:]),
|
||||||
binascii.unhexlify(self._K + '7fac1178830cfe0c'),
|
binascii.unhexlify(self._K + '7fac1178830cfe0c'),
|
||||||
compat_b64decode(enc_subtitles[:24])))
|
base64.b64decode(enc_subtitles[:24])))
|
||||||
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
|
subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False)
|
||||||
if not subtitles_json:
|
if not subtitles_json:
|
||||||
return None
|
return None
|
||||||
@@ -108,7 +131,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
if start is None or end is None or text is None:
|
if start is None or end is None or text is None:
|
||||||
continue
|
continue
|
||||||
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
|
alignment = self._POS_ALIGN_MAP.get(position_align, 2) + self._LINE_ALIGN_MAP.get(line_align, 0)
|
||||||
ssa += os.linesep + 'Dialogue: Marked=0,%s,%s,Default,,0,0,0,,%s%s' % (
|
ssa += os.linesep + 'Dialogue: Marked=0,{},{},Default,,0,0,0,,{}{}'.format(
|
||||||
ass_subtitles_timecode(start),
|
ass_subtitles_timecode(start),
|
||||||
ass_subtitles_timecode(end),
|
ass_subtitles_timecode(end),
|
||||||
'{\\a%d}' % alignment if alignment != 2 else '',
|
'{\\a%d}' % alignment if alignment != 2 else '',
|
||||||
@@ -116,6 +139,8 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
|
|
||||||
if sub_lang == 'vostf':
|
if sub_lang == 'vostf':
|
||||||
sub_lang = 'fr'
|
sub_lang = 'fr'
|
||||||
|
elif sub_lang == 'vostde':
|
||||||
|
sub_lang = 'de'
|
||||||
subtitles.setdefault(sub_lang, []).extend([{
|
subtitles.setdefault(sub_lang, []).extend([{
|
||||||
'ext': 'json',
|
'ext': 'json',
|
||||||
'data': json.dumps(sub),
|
'data': json.dumps(sub),
|
||||||
@@ -137,7 +162,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
'username': username,
|
'username': username,
|
||||||
})) or {}).get('accessToken')
|
})) or {}).get('accessToken')
|
||||||
if access_token:
|
if access_token:
|
||||||
self._HEADERS = {'authorization': 'Bearer ' + access_token}
|
self._HEADERS['Authorization'] = f'Bearer {access_token}'
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
message = None
|
message = None
|
||||||
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
@@ -147,8 +172,9 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
self.report_warning(message or self._LOGIN_ERR_MESSAGE)
|
self.report_warning(message or self._LOGIN_ERR_MESSAGE)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
lang, video_id = self._match_valid_url(url).group('lang', 'id')
|
||||||
video_base_url = self._PLAYER_BASE_URL + 'video/%s/' % video_id
|
self._HEADERS['X-Target-Distribution'] = lang or 'fr'
|
||||||
|
video_base_url = self._PLAYER_BASE_URL + f'video/{video_id}/'
|
||||||
player = self._download_json(
|
player = self._download_json(
|
||||||
video_base_url + 'configuration', video_id,
|
video_base_url + 'configuration', video_id,
|
||||||
'Downloading player config JSON metadata',
|
'Downloading player config JSON metadata',
|
||||||
@@ -157,26 +183,29 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
|
|
||||||
user = options['user']
|
user = options['user']
|
||||||
if not user.get('hasAccess'):
|
if not user.get('hasAccess'):
|
||||||
self.raise_login_required()
|
start_date = traverse_obj(options, ('video', 'startDate', {str}))
|
||||||
|
if (parse_iso8601(start_date) or 0) > time.time():
|
||||||
|
raise ExtractorError(f'This video is not available yet. Release date: {start_date}', expected=True)
|
||||||
|
self.raise_login_required('This video requires a subscription', method='password')
|
||||||
|
|
||||||
token = self._download_json(
|
token = self._download_json(
|
||||||
user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),
|
user.get('refreshTokenUrl') or (self._PLAYER_BASE_URL + 'refresh/token'),
|
||||||
video_id, 'Downloading access token', headers={
|
video_id, 'Downloading access token', headers={
|
||||||
'x-player-refresh-token': user['refreshToken']
|
'X-Player-Refresh-Token': user['refreshToken'],
|
||||||
}, data=b'')['token']
|
}, data=b'')['token']
|
||||||
|
|
||||||
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
|
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
|
||||||
self._K = ''.join(random.choices('0123456789abcdef', k=16))
|
self._K = ''.join(random.choices('0123456789abcdef', k=16))
|
||||||
message = bytes_to_intlist(json.dumps({
|
message = list(json.dumps({
|
||||||
'k': self._K,
|
'k': self._K,
|
||||||
't': token,
|
't': token,
|
||||||
}))
|
}).encode())
|
||||||
|
|
||||||
# Sometimes authentication fails for no good reason, retry with
|
# Sometimes authentication fails for no good reason, retry with
|
||||||
# a different random padding
|
# a different random padding
|
||||||
links_data = None
|
links_data = None
|
||||||
for _ in range(3):
|
for _ in range(3):
|
||||||
padded_message = intlist_to_bytes(pkcs1pad(message, 128))
|
padded_message = bytes(pkcs1pad(message, 128))
|
||||||
n, e = self._RSA_KEY
|
n, e = self._RSA_KEY
|
||||||
encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))
|
encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n))
|
||||||
authorization = base64.b64encode(encrypted_message).decode()
|
authorization = base64.b64encode(encrypted_message).decode()
|
||||||
@@ -184,12 +213,13 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
try:
|
try:
|
||||||
links_data = self._download_json(
|
links_data = self._download_json(
|
||||||
links_url, video_id, 'Downloading links JSON metadata', headers={
|
links_url, video_id, 'Downloading links JSON metadata', headers={
|
||||||
'X-Player-Token': authorization
|
'X-Player-Token': authorization,
|
||||||
|
**self._HEADERS,
|
||||||
}, query={
|
}, query={
|
||||||
'freeWithAds': 'true',
|
'freeWithAds': 'true',
|
||||||
'adaptive': 'false',
|
'adaptive': 'false',
|
||||||
'withMetadata': 'true',
|
'withMetadata': 'true',
|
||||||
'source': 'Web'
|
'source': 'Web',
|
||||||
})
|
})
|
||||||
break
|
break
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
@@ -202,7 +232,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
|
|
||||||
error = self._parse_json(e.cause.response.read(), video_id)
|
error = self._parse_json(e.cause.response.read(), video_id)
|
||||||
message = error.get('message')
|
message = error.get('message')
|
||||||
if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
|
if e.cause.status == 403 and error.get('code') == 'player-bad-geolocation-country':
|
||||||
self.raise_geo_restricted(msg=message)
|
self.raise_geo_restricted(msg=message)
|
||||||
raise ExtractorError(message)
|
raise ExtractorError(message)
|
||||||
else:
|
else:
|
||||||
@@ -221,7 +251,8 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
for quality, load_balancer_url in qualities.items():
|
for quality, load_balancer_url in qualities.items():
|
||||||
load_balancer_data = self._download_json(
|
load_balancer_data = self._download_json(
|
||||||
load_balancer_url, video_id,
|
load_balancer_url, video_id,
|
||||||
'Downloading %s %s JSON metadata' % (format_id, quality),
|
f'Downloading {format_id} {quality} JSON metadata',
|
||||||
|
headers=self._HEADERS,
|
||||||
fatal=False) or {}
|
fatal=False) or {}
|
||||||
m3u8_url = load_balancer_data.get('location')
|
m3u8_url = load_balancer_data.get('location')
|
||||||
if not m3u8_url:
|
if not m3u8_url:
|
||||||
@@ -232,11 +263,17 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
if format_id == 'vf':
|
if format_id == 'vf':
|
||||||
for f in m3u8_formats:
|
for f in m3u8_formats:
|
||||||
f['language'] = 'fr'
|
f['language'] = 'fr'
|
||||||
|
elif format_id == 'vde':
|
||||||
|
for f in m3u8_formats:
|
||||||
|
f['language'] = 'de'
|
||||||
formats.extend(m3u8_formats)
|
formats.extend(m3u8_formats)
|
||||||
|
|
||||||
|
if not formats:
|
||||||
|
self.raise_login_required('This video requires a subscription', method='password')
|
||||||
|
|
||||||
video = (self._download_json(
|
video = (self._download_json(
|
||||||
self._API_BASE_URL + 'video/%s' % video_id, video_id,
|
self._API_BASE_URL + f'video/{video_id}', video_id,
|
||||||
'Downloading additional video metadata', fatal=False) or {}).get('video') or {}
|
'Downloading additional video metadata', fatal=False, headers=self._HEADERS) or {}).get('video') or {}
|
||||||
show = video.get('show') or {}
|
show = video.get('show') or {}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
@@ -255,3 +292,38 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||||||
'average_rating': float_or_none(video.get('rating') or metas.get('rating')),
|
'average_rating': float_or_none(video.get('rating') or metas.get('rating')),
|
||||||
'comment_count': int_or_none(video.get('commentsCount')),
|
'comment_count': int_or_none(video.get('commentsCount')),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class ADNSeasonIE(ADNBaseIE):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?animationdigitalnetwork\.com/(?:(?P<lang>de)/)?video/(?P<id>\d+)[^/?#]*/?(?:$|[#?])'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://animationdigitalnetwork.com/video/911-tokyo-mew-mew-new',
|
||||||
|
'playlist_count': 12,
|
||||||
|
'info_dict': {
|
||||||
|
'id': '911',
|
||||||
|
'title': 'Tokyo Mew Mew New',
|
||||||
|
},
|
||||||
|
# 'skip': 'Only available in French end German speaking Europe',
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
lang, video_show_slug = self._match_valid_url(url).group('lang', 'id')
|
||||||
|
self._HEADERS['X-Target-Distribution'] = lang or 'fr'
|
||||||
|
show = self._download_json(
|
||||||
|
f'{self._API_BASE_URL}show/{video_show_slug}/', video_show_slug,
|
||||||
|
'Downloading show JSON metadata', headers=self._HEADERS)['show']
|
||||||
|
show_id = str(show['id'])
|
||||||
|
episodes = self._download_json(
|
||||||
|
f'{self._API_BASE_URL}video/show/{show_id}', video_show_slug,
|
||||||
|
'Downloading episode list', headers=self._HEADERS, query={
|
||||||
|
'order': 'asc',
|
||||||
|
'limit': '-1',
|
||||||
|
})
|
||||||
|
|
||||||
|
def entries():
|
||||||
|
for episode_id in traverse_obj(episodes, ('videos', ..., 'id', {str_or_none})):
|
||||||
|
yield self.url_result(join_nonempty(
|
||||||
|
'https://animationdigitalnetwork.com', lang, 'video',
|
||||||
|
video_show_slug, episode_id, delim='/'), ADNIE, episode_id)
|
||||||
|
|
||||||
|
return self.playlist_result(entries(), show_id, show.get('title'))
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_parse_qs,
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class AdobeConnectIE(InfoExtractor):
|
class AdobeConnectIE(InfoExtractor):
|
||||||
@@ -12,13 +10,13 @@ class AdobeConnectIE(InfoExtractor):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
title = self._html_extract_title(webpage)
|
title = self._html_extract_title(webpage)
|
||||||
qs = compat_parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
|
qs = urllib.parse.parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1])
|
||||||
is_live = qs.get('isLive', ['false'])[0] == 'true'
|
is_live = qs.get('isLive', ['false'])[0] == 'true'
|
||||||
formats = []
|
formats = []
|
||||||
for con_string in qs['conStrings'][0].split(','):
|
for con_string in qs['conStrings'][0].split(','):
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': con_string.split('://')[0],
|
'format_id': con_string.split('://')[0],
|
||||||
'app': compat_urlparse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
|
'app': urllib.parse.quote('?' + con_string.split('?')[1] + 'flvplayerapp/' + qs['appInstance'][0]),
|
||||||
'ext': 'flv',
|
'ext': 'flv',
|
||||||
'play_path': 'mp4:' + qs['streamName'][0],
|
'play_path': 'mp4:' + qs['streamName'][0],
|
||||||
'rtmp_conn': 'S:' + qs['ticket'][0],
|
'rtmp_conn': 'S:' + qs['ticket'][0],
|
||||||
|
|||||||