diff --git a/debs/build.sh b/debs/build.sh deleted file mode 100644 index 48e85d7..0000000 --- a/debs/build.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Fixes ownershp -function main() { - sudo find . -type f -exec chmod 644 {} + - sudo find . -type d -exec chmod 755 {} + - - # Set postrm permissions - for i in `find . -name postrm`; do - sudo chmod 755 "${i}" - done - - # Set pytop permissions - for i in `find . -name pytop`; do - sudo chmod 755 "${i}" - done - - sudo chown -R root:root ./*/ - - builder; - bash ./chownAll.sh -} - -#builds debs -function builder() { - for i in `ls`; do - if [[ -d "${i}" ]]; then - dpkg --build "${i}" - else - echo "Not a dir." - fi - done -} -main; diff --git a/debs/chown_all.sh b/debs/chown_all.sh deleted file mode 100644 index 2c3d7c8..0000000 --- a/debs/chown_all.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -function main() { - sudo chown -R "${USER}":"${USER}" . -} -main; \ No newline at end of file diff --git a/debs/solarfm-0-0-1-x64/DEBIAN/control b/debs/solarfm-0-0-1-x64/DEBIAN/control deleted file mode 100644 index 50ac836..0000000 --- a/debs/solarfm-0-0-1-x64/DEBIAN/control +++ /dev/null @@ -1,8 +0,0 @@ -Package: solarfm64 -Version: 0.0-1 -Section: python -Priority: optional -Architecture: amd64 -Depends: python3.8, wget, ffmpegthumbnailer, python3-setproctitle, python3-gi, steamcmd -Maintainer: Maxim Stewart <1itdominator@gmail.com> -Description: SolarFM is a Gtk + Python file manager. diff --git a/debs/solarfm-0-0-1-x64/DEBIAN/postrm b/debs/solarfm-0-0-1-x64/DEBIAN/postrm deleted file mode 100755 index 2646766..0000000 --- a/debs/solarfm-0-0-1-x64/DEBIAN/postrm +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -# Note: postrm (script executed after uninstalling the package) -# set -e - - -if [ -f /bin/solarfm ]; then - rm /bin/solarfm -fi - -if [ -f /opt/solarfm.zip ]; then - rm /opt/solarfm.zip -fi diff --git a/debs/solarfm-0-0-1-x64/bin/solarfm b/debs/solarfm-0-0-1-x64/bin/solarfm deleted file mode 100755 index 8cca2db..0000000 --- a/debs/solarfm-0-0-1-x64/bin/solarfm +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# . CONFIG.sh - -# set -o xtrace ## To debug scripts -# set -o errexit ## To exit on error -# set -o errunset ## To exit if a variable is referenced but not set - - -function main() { - call_path=`pwd` - cd "${call_path}" - echo "Working Dir: " $(pwd) - - python /opt/solarfm.zip "$@" -} -main "$@"; diff --git a/debs/solarfm-0-0-1-x64/opt/DELETE_ME.txt b/debs/solarfm-0-0-1-x64/opt/DELETE_ME.txt deleted file mode 100644 index 913acd6..0000000 --- a/debs/solarfm-0-0-1-x64/opt/DELETE_ME.txt +++ /dev/null @@ -1 +0,0 @@ -Place the zipped up solarfm zip here amnd run the build script. \ No newline at end of file diff --git a/debs/solarfm-0-0-1-x64/usr/share/applications/solarfm.desktop b/debs/solarfm-0-0-1-x64/usr/share/applications/solarfm.desktop deleted file mode 100755 index 77b2ecb..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/applications/solarfm.desktop +++ /dev/null @@ -1,11 +0,0 @@ -[Desktop Entry] -Name=SolarFM -GenericName=File Manager -Comment=A file manager built with Python and GObject introspection. -Exec=/bin/solarfm %F -Icon=/usr/share/solarfm/icons/solarfm.png -Type=Application -StartupNotify=true -Categories=System;FileTools;Utility;Core;GTK;FileManager; -MimeType=inode/directory;inode/mount-point;x-scheme-handler/ssh;x-scheme-handler/smb;x-scheme-handler/nfs;x-scheme-handler/ftp;x-scheme-handler/ptp;x-scheme-handler/mtp;x-scheme-handler/webdav;x-scheme-handler/http;x-scheme-handler/https; -Terminal=false diff --git a/debs/solarfm-0-0-1-x64/usr/share/doc/solarfm/copyright b/debs/solarfm-0-0-1-x64/usr/share/doc/solarfm/copyright deleted file mode 100644 index b293cfe..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/doc/solarfm/copyright +++ /dev/null @@ -1,22 +0,0 @@ -SolarFM is copyright 2021 Maxim Stewart. -SolarFM is currently developed by ITDominator <1itdominator@gmail.com>. - -License: GPLv2 - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. - -See /usr/share/common-licenses/GPL-2, or - for the terms of the latest version -of the GNU General Public License. diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/Main_Window.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/Main_Window.glade deleted file mode 100644 index e604e38..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/Main_Window.glade +++ /dev/null @@ -1,968 +0,0 @@ - - - - - - True - False - gtk-save-as - - - True - False - gtk-file - - - True - False - gtk-justify-center - - - True - False - gtk-save - - - True - False - gtk-execute - - - True - False - gtk-redo - - - True - False - gtk-justify-center - - - True - False - gtk-stop - - - True - False - gtk-apply - 3 - - - True - False - gtk-apply - 3 - - - True - False - gtk-apply - 3 - - - True - False - gtk-apply - 3 - - - True - False - vertical - top - - - True - False - - - True - False - - - True - False - _File - True - - - True - False - - - gtk-new - create - True - False - New File/Folder... - True - True - - - - - - gtk-open - open - True - False - Open... - True - True - - - - - - True - False - - - - - Reload Plugins - True - False - image6 - False - - - - - - Terminal - True - False - image5 - False - - - - - - True - False - Session - - - True - False - - - Save Session - save_session - True - False - New File/Folder... - image4 - False - - - - - - Save Session As - save_session_as - True - False - New File/Folder... - image1 - False - - - - - - Load Session - load_session - True - False - New File/Folder... - image2 - False - - - - - - - - - - True - False - Debug - - - True - False - - - Show Errors - messages_popup - True - False - image3 - False - - - - - - Show Interactive Debugger - ui_debug - True - False - image7 - False - - - - - - - - - - gtk-quit - tear_down - True - False - True - True - - - - - - - - - - True - False - _Edit - True - - - True - False - - - gtk-cut - cut - True - False - True - True - - - - - - gtk-copy - copy - True - False - True - True - - - - - - gtk-paste - paste - True - False - True - True - - - - - - - - - - True - False - _Help - True - - - True - False - - - gtk-about - about_page - True - False - True - True - - - - - - - - - - True - True - 0 - - - - - True - False - 5 - start - - - Plugins - plugins_popup - True - True - True - - - - True - True - 0 - - - - - tggl_notebook_1 - True - True - True - tggl_notebook_1_img - True - - - - True - True - 1 - - - - - tggl_notebook_2 - True - True - True - tggl_notebook_2_img - True - - - - True - True - 2 - - - - - tggl_notebook_3 - True - True - True - tggl_notebook_3_img - True - - - - True - True - 3 - - - - - tggl_notebook_4 - True - True - True - tggl_notebook_4_img - True - - - - True - True - 4 - - - - - I/O - io_popup - True - True - True - io_img - True - - - - True - True - 5 - - - - - False - True - 1 - - - - - - True - False - False - False - False - False - - - True - True - 2 - - - - - False - True - 0 - - - - - True - False - - - gtk-home - go_home - True - True - True - True - True - - - - False - True - 0 - - - - - gtk-add - create_tab - True - True - True - True - True - - - - False - True - 1 - - - - - gtk-go-up - go_up - True - True - True - True - True - - - - False - True - 2 - - - - - path_entry - True - True - True - Path... - - - - True - True - 3 - - - - - gtk-refresh - refresh_view - True - True - True - True - True - - - - False - True - 4 - - - - - False - True - 1 - - - - - True - True - True - True - vertical - True - - - True - True - 5 - True - True - True - - - notebook1 - True - True - True - 5 - 5 - 5 - 5 - False - True - sfm_windows - - - - - - - - - - - - - - - - - - - - - - - - False - start - - - gtk-go-back - True - True - True - True - True - - - True - True - 0 - - - - - gtk-go-forward - True - True - True - True - True - - - True - True - 1 - - - - - False - - - - - window_1 - True - True - edit-find-symbolic - False - False - Search... - - - - False - - - - - False - False - - - - - notebook2 - True - True - True - 5 - 5 - 5 - 5 - False - True - sfm_windows - - - - - - - - - - - - - - - - - - - - - - - - False - start - - - gtk-go-back - True - True - True - True - True - - - True - True - 0 - - - - - gtk-go-forward - True - True - True - True - True - - - True - True - 1 - - - - - False - - - - - window_2 - True - True - edit-find-symbolic - False - False - Search... - - - - False - - - - - False - False - - - - - True - True - - - - - True - True - 5 - True - True - True - - - notebook3 - True - True - True - 5 - 5 - 5 - 5 - False - True - sfm_windows - - - - - - - - - - - - - - - - - - - - - - - - False - start - - - gtk-go-back - True - True - True - True - True - - - True - True - 0 - - - - - gtk-go-forward - True - True - True - True - True - - - True - True - 1 - - - - - False - - - - - window_3 - True - True - edit-find-symbolic - False - False - Search... - - - - False - - - - - False - False - - - - - notebook4 - True - True - True - 5 - 5 - 5 - False - True - sfm_windows - - - - - - - - - - - - - - - - - - - - - - - - False - start - - - gtk-go-back - True - True - True - True - True - - - True - True - 0 - - - - - gtk-go-forward - True - True - True - True - True - - - True - True - 1 - - - - - False - - - - - window_4 - True - True - edit-find-symbolic - False - False - Search... - - - - False - - - - - False - False - - - - - True - True - - - - - True - True - 2 - - - - - - - diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/contexct_menu.json b/debs/solarfm-0-0-1-x64/usr/share/solarfm/contexct_menu.json deleted file mode 100644 index c17be4c..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/contexct_menu.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "Open Actions": { - "Open": ["STOCK_OPEN", "open"], - "Open With": ["STOCK_OPEN", "open_with"], - "Execute": ["STOCK_EXECUTE", "execute"], - "Execute in Terminal": ["STOCK_EXECUTE", "execute_in_terminal"] - }, - "File Actions": { - "New": ["STOCK_ADD", "create"], - "Rename": ["STOCK_EDIT", "rename"], - "Cut": ["STOCK_CUT", "cut"], - "Copy": ["STOCK_COPY", "copy"], - "Copy Name": ["STOCK_COPY", "copy_name"], - "Copy Path": ["STOCK_COPY", "copy_path"], - "Copy Path+Name": ["STOCK_COPY", "copy_path_name"], - "Paste": ["STOCK_PASTE", "paste"] - }, - "Plugins": {} -} diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/3g2.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/3g2.png deleted file mode 100644 index cccf50a..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/3g2.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/3gp.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/3gp.png deleted file mode 100644 index b3fb117..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/3gp.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ai.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ai.png deleted file mode 100644 index ddb172f..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ai.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/air.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/air.png deleted file mode 100644 index 076f08e..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/air.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/asf.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/asf.png deleted file mode 100644 index b700cf4..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/asf.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/avi.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/avi.png deleted file mode 100644 index f4436f7..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/avi.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/bib.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/bib.png deleted file mode 100644 index 2789ca5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/bib.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/cls.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/cls.png deleted file mode 100644 index 4759ad6..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/cls.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/csv.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/csv.png deleted file mode 100644 index 869e354..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/csv.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/deb.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/deb.png deleted file mode 100644 index e5581ad..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/deb.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/djvu.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/djvu.png deleted file mode 100644 index f3ed05d..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/djvu.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dmg.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dmg.png deleted file mode 100644 index b52c92c..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dmg.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/doc.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/doc.png deleted file mode 100644 index 8f615d1..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/doc.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/docx.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/docx.png deleted file mode 100644 index 377ecc7..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/docx.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dwf.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dwf.png deleted file mode 100644 index 349610c..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dwf.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dwg.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dwg.png deleted file mode 100644 index 5398b08..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/dwg.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/eps.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/eps.png deleted file mode 100644 index 10f19eb..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/eps.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/epub.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/epub.png deleted file mode 100644 index 6f8a256..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/epub.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/exe.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/exe.png deleted file mode 100644 index 0910322..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/exe.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f.png deleted file mode 100644 index 7cfb90a..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f77.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f77.png deleted file mode 100644 index 752fa8c..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f77.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f90.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f90.png deleted file mode 100644 index 32c9feb..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/f90.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/flac.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/flac.png deleted file mode 100644 index b529135..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/flac.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/flv.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/flv.png deleted file mode 100644 index c4e35d5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/flv.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/gif.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/gif.png deleted file mode 100644 index 7cd9773..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/gif.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/gz.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/gz.png deleted file mode 100644 index 987d4f0..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/gz.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ico.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ico.png deleted file mode 100644 index b33287e..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ico.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/indd.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/indd.png deleted file mode 100644 index 24389f0..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/indd.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/iso.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/iso.png deleted file mode 100644 index de2a19f..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/iso.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/jpeg.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/jpeg.png deleted file mode 100644 index b1ba768..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/jpeg.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/jpg.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/jpg.png deleted file mode 100644 index b1ba768..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/jpg.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/log.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/log.png deleted file mode 100644 index c1acea1..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/log.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/m4a.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/m4a.png deleted file mode 100644 index f8f3ada..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/m4a.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/m4v.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/m4v.png deleted file mode 100644 index fef795b..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/m4v.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/midi.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/midi.png deleted file mode 100644 index 85132d5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/midi.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mkv.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mkv.png deleted file mode 100644 index b0b1f92..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mkv.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mov.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mov.png deleted file mode 100644 index 9799d32..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mov.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mp3.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mp3.png deleted file mode 100644 index 18394f5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mp3.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mp4.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mp4.png deleted file mode 100644 index b34c7d5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mp4.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mpeg.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mpeg.png deleted file mode 100644 index eb58ef5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mpeg.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mpg.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mpg.png deleted file mode 100644 index eb58ef5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/mpg.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/msi.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/msi.png deleted file mode 100644 index 95fe7d7..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/msi.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/odp.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/odp.png deleted file mode 100644 index 69f8663..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/odp.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ods.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ods.png deleted file mode 100644 index 8f415a9..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ods.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/odt.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/odt.png deleted file mode 100644 index 5e10765..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/odt.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/oga.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/oga.png deleted file mode 100644 index c236464..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/oga.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ogg.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ogg.png deleted file mode 100644 index 1f70cb8..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ogg.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ogv.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ogv.png deleted file mode 100644 index 027dfe9..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ogv.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pdf.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pdf.png deleted file mode 100644 index 867f287..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pdf.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/png.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/png.png deleted file mode 100644 index 9433c35..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/png.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pps.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pps.png deleted file mode 100644 index f75d9b6..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pps.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ppsx.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ppsx.png deleted file mode 100644 index a9fd94b..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ppsx.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ppt.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ppt.png deleted file mode 100644 index 0cb28ba..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ppt.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pptx.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pptx.png deleted file mode 100644 index d1d7785..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pptx.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/psd.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/psd.png deleted file mode 100644 index 568684b..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/psd.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pub.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pub.png deleted file mode 100644 index ff27076..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/pub.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/py.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/py.png deleted file mode 100644 index 4fadf04..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/py.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/qt.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/qt.png deleted file mode 100644 index 839742f..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/qt.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ra.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ra.png deleted file mode 100644 index 0103372..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ra.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ram.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ram.png deleted file mode 100644 index ffdfa05..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ram.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rar.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rar.png deleted file mode 100644 index 96a5cd0..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rar.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rm.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rm.png deleted file mode 100644 index ae680c2..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rm.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rpm.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rpm.png deleted file mode 100644 index 5f3d622..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rpm.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rtf.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rtf.png deleted file mode 100644 index c26ede1..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rtf.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rv.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rv.png deleted file mode 100644 index d7d46b5..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/rv.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/skp.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/skp.png deleted file mode 100644 index 778f0e3..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/skp.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/spx.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/spx.png deleted file mode 100644 index 4f3d7a1..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/spx.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/sql.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/sql.png deleted file mode 100644 index bf6c1ab..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/sql.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/sty.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/sty.png deleted file mode 100644 index 5512ae8..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/sty.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tar.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tar.png deleted file mode 100644 index dda5cea..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tar.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tex.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tex.png deleted file mode 100644 index 36bf00e..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tex.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tgz.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tgz.png deleted file mode 100644 index 651d0b1..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tgz.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tiff.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tiff.png deleted file mode 100644 index b780ffa..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/tiff.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ttf.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ttf.png deleted file mode 100644 index 842f566..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/ttf.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/txt.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/txt.png deleted file mode 100644 index cbae3ce..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/txt.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/vob.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/vob.png deleted file mode 100644 index 70cc91d..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/vob.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/wav.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/wav.png deleted file mode 100644 index 1dfa320..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/wav.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/wmv.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/wmv.png deleted file mode 100644 index c120508..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/wmv.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xls.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xls.png deleted file mode 100644 index cf5a2d0..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xls.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xlsx.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xlsx.png deleted file mode 100644 index 454fd5d..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xlsx.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xml.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xml.png deleted file mode 100644 index 609f131..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xml.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xpi.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xpi.png deleted file mode 100644 index fd479a6..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/xpi.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/zip.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/zip.png deleted file mode 100644 index 8caadb2..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/fileicons/zip.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/archive.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/archive.png deleted file mode 100644 index 7943e4e..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/archive.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/audio.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/audio.png deleted file mode 100644 index c010134..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/audio.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/bin.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/bin.png deleted file mode 100644 index d6954e3..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/bin.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/dir.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/dir.png deleted file mode 100644 index a9b5e9f..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/dir.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/doc.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/doc.png deleted file mode 100644 index f838826..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/doc.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/pdf.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/pdf.png deleted file mode 100644 index 9f40122..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/pdf.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/presentation.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/presentation.png deleted file mode 100644 index 3a339af..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/presentation.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/solarfm-64x64.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/solarfm-64x64.png deleted file mode 100644 index 1a403ae..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/solarfm-64x64.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/solarfm.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/solarfm.png deleted file mode 100644 index 1a403ae..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/solarfm.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/spreadsheet.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/spreadsheet.png deleted file mode 100644 index 710efa6..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/spreadsheet.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/text.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/text.png deleted file mode 100644 index 2546fcd..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/text.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/trash.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/trash.png deleted file mode 100644 index c6514b9..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/trash.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/video.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/video.png deleted file mode 100644 index 55afa98..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/video.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/web.png b/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/web.png deleted file mode 100644 index 17017ce..0000000 Binary files a/debs/solarfm-0-0-1-x64/usr/share/solarfm/icons/web.png and /dev/null differ diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/key-bindings.json b/debs/solarfm-0-0-1-x64/usr/share/solarfm/key-bindings.json deleted file mode 100644 index 83e0081..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/key-bindings.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "keybindings": { - "help" : "F1", - "rename_files" : ["F2", - "e"], - "open_terminal" : "F4", - "refresh_tab" : ["F5", - "r"], - "tggl_top_main_menubar" : "0", - "tear_down" : "q", - "go_up" : "Up", - "go_home" : "slash", - "grab_focus_path_entry" : "l", - "open_files" : "o", - "show_hide_hidden_files" : "h", - "create_tab" : "t", - "keyboard_close_tab" : "w", - "copy_files" : "c", - "cut_files" : "x", - "paste_files" : "v", - "create_files" : "n" - } -} diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/settings.json b/debs/solarfm-0-0-1-x64/usr/share/solarfm/settings.json deleted file mode 100644 index a67c142..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/settings.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "config": { - "base_of_home": "", - "hide_hidden_files": "true", - "thumbnailer_path": "ffmpegthumbnailer", - "blender_thumbnailer_path": "", - "go_past_home": "true", - "lock_folder": "false", - "locked_folders": "venv::::flasks", - "mplayer_options": "-quiet -really-quiet -xy 1600 -geometry 50%:50%", - "music_app": "deadbeef", - "media_app": "mpv", - "image_app": "mirage2", - "office_app": "libreoffice", - "pdf_app": "evince", - "code_app": "newton", - "text_app": "mousepad", - "terminal_app": "terminator", - "container_icon_wh": [128, 128], - "video_icon_wh": [128, 64], - "sys_icon_wh": [56, 56], - "file_manager_app": "solarfm", - "steam_cdn_url": "https://steamcdn-a.akamaihd.net/steam/apps/", - "remux_folder_max_disk_usage": "8589934592" - }, - "filters": { - "meshs": [".dae", ".fbx", ".gltf", ".obj", ".stl"], - "code": [".cpp", ".css", ".c", ".go", ".html", ".htm", ".java", ".js", ".json", ".lua", ".md", ".py", ".rs", ".toml", ".xml", ".pom"], - "videos": [".mkv", ".mp4", ".webm", ".avi", ".mov", ".m4v", ".mpg", ".mpeg", ".wmv", ".flv"], - "office": [".doc", ".docx", ".xls", ".xlsx", ".xlt", ".xltx", ".xlm", ".ppt", ".pptx", ".pps", ".ppsx", ".odt", ".rtf"], - "images": [".png", ".jpg", ".jpeg", ".gif", ".ico", ".tga", ".webp"], - "text": [".txt", ".text", ".sh", ".cfg", ".conf", ".log"], - "music": [".psf", ".mp3", ".ogg", ".flac", ".m4a"], - "pdf": [".pdf"] - }, - "theming":{ - "success_color": "#88cc27", - "warning_color": "#ffa800", - "error_color": "#ff0000" - }, - "debugging": { - "ch_log_lvl": 20, - "fh_log_lvl": 10 - } -} \ No newline at end of file diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/stylesheet.css b/debs/solarfm-0-0-1-x64/usr/share/solarfm/stylesheet.css deleted file mode 100644 index e802040..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/stylesheet.css +++ /dev/null @@ -1,165 +0,0 @@ -/* Set fm to have transparent window */ - -/* * {*/ -/* background: rgba(39, 43, 52, 0.24);*/ -/* color: rgba(255, 255, 255, 1);*/ -/* }*/ - -box, -iconview, -notebook, -paned, -stack, -scrolledwindow, -treeview.view, -.content-view, -.view { - background: rgba(19, 21, 25, 0.14); - /* background: rgba(39, 43, 52, 0.14); */ - color: rgba(255, 255, 255, 1); -} - -button:hover { - background-color: rgba(255, 185, 25, 0.34); -} - -/* ---- top controls ---- */ -window > box > box > button, -window > box > box > buttonbox > button { - background: rgba(39, 43, 52, 0.64); -} - -buttonbox > button * { - background: rgba(116, 0, 0, 0.0); - color: rgba(255, 255, 255, 1.0); -} - - -buttonbox > button:checked { - background-color: rgba(255, 125, 25, 0.34); -} - -buttonbox > button:hover { - background-color: rgba(255, 185, 25, 0.34); -} - -/* ---- notebook headers ---- */ -notebook > header { - background: rgba(39, 43, 52, 0.46); -} - -notebook > header > tabs > tab { - color: rgba(255, 255, 255, 1); -} - -notebook > header > tabs > tab:active { - background: rgba(0, 0, 0, 0.0); -} - -notebook > header > tabs > tab:checked { - /* Neon Blue 00e8ff */ - background-color: rgba(0, 232, 255, 0.2); - /* Dark Bergundy */ - /* background-color: rgba(116, 0, 0, 0.25); */ - - color: rgba(255, 255, 255, 0.8); -} - -popover { - background: rgba(39, 43, 52, 0.86); - color: rgba(255, 255, 255, 1); -} - -/* ---- make text selection slightly transparent ---- */ -* selection { - background-color: rgba(0, 115, 115, 0.34); - /* Bergundy */ - /* background-color: rgba(116, 0, 0, 0.64); */ - color: rgba(255, 255, 255, 0.5); -} - - -/* ---- notebook tab buttons ---- */ -tab > box > button { - background: rgba(116, 0, 0, 0.64); -} - -tab > box > button:hover { - background: rgba(256, 0, 0, 0.64); -} - - - - - - - - - - - - - -#message_view { - font: 16px "Monospace"; -} - -.view:selected, -.view:selected:hover { - box-shadow: inset 0 0 0 9999px rgba(21, 158, 167, 0.34); - color: rgba(255, 255, 255, 0.5); -} - -.alert-border { - border: 2px solid rgba(116, 0, 0, 0.64); -} - -.search-border { - border: 2px solid rgba(136, 204, 39, 1); -} - -.notebook-selected-focus { - /* Neon Blue 00e8ff border */ - border: 2px solid rgba(0, 232, 255, 0.34); - /* Dark Bergundy */ - /* border: 2px solid rgba(116, 0, 0, 0.64); */ -} - -.notebook-unselected-focus { - /* Neon Blue 00e8ff border */ - /* border: 2px solid rgba(0, 232, 255, 0.25); */ - /* Dark Bergundy */ - /* border: 2px solid rgba(116, 0, 0, 0.64); */ - /* Snow White */ - border: 2px solid rgba(255, 255, 255, 0.24); -} - - - - - -/* * { - background: rgba(0, 0, 0, 0.14); - color: rgba(255, 255, 255, 1); -} */ - -/* * selection { - background-color: rgba(116, 0, 0, 0.65); - color: rgba(255, 255, 255, 0.5); -} */ - -/* Rubberband coloring */ -/* .rubberband, -rubberband, -flowbox rubberband, -treeview.view rubberband, -.content-view rubberband, -.content-view .rubberband, -XfdesktopIconView.view .rubberband { - border: 1px solid #6c6c6c; - background-color: rgba(21, 158, 167, 0.57); -} - -XfdesktopIconView.view:active { - background-color: rgba(172, 102, 21, 1); -} */ \ No newline at end of file diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/about_ui.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/about_ui.glade deleted file mode 100644 index f559f6c..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/about_ui.glade +++ /dev/null @@ -1,390 +0,0 @@ - - - - - - 320 - 480 - False - 5 - center-on-parent - ../icons/solarfm.png - dialog - True - True - False - center - SolarFM - 0.0.1 - Copyright (C) 2021 GPL2 - by ITDominator - https://code.itdominator.com/itdominator/SolarFM - SolarFM - Copyright (C) 2021 ITDominator GPL2 - - - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - <one line to give the program's name and a brief idea of what it does.> - Copyright (C) <year> <name of author> - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - <signature of Ty Coon>, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. - - Lead Developer: - ITDominator <1itdominator@gmail.com> - - -SolarFM is developed on Atom, git, and using Python 3+ with Gtk GObject introspection. - translator-credits - ../icons/solarfm-64x64.png - True - custom - - - False - - - False - - - False - False - 0 - - - - - - diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/appchooser_ui.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/appchooser_ui.glade deleted file mode 100644 index 6959306..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/appchooser_ui.glade +++ /dev/null @@ -1,75 +0,0 @@ - - - - - - False - mouse - splashscreen - south - - - - False - vertical - 2 - - - False - end - - - gtk-cancel - True - True - True - True - - - True - True - 0 - - - - - Select - True - True - True - - - True - True - 1 - - - - - False - False - 0 - - - - - True - False - False - True - - - - False - True - 1 - - - - - - button31 - appchooser_select_btn - - - diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/bottom_status_info_ui.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/bottom_status_info_ui.glade deleted file mode 100644 index b2dd13d..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/bottom_status_info_ui.glade +++ /dev/null @@ -1,50 +0,0 @@ - - - - - - True - False - 10 - 10 - 10 - 10 - 6 - 6 - 15 - top - - - True - False - - - False - True - 0 - - - - - True - False - - - False - True - 1 - - - - - True - False - - - False - True - 2 - - - - diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/file_exists_ui.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/file_exists_ui.glade deleted file mode 100644 index 70d7291..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/file_exists_ui.glade +++ /dev/null @@ -1,337 +0,0 @@ - - - - - - 120 - False - False - True - center-always - True - dialog - True - True - True - False - False - center - - - False - 5 - 5 - 5 - 5 - vertical - 2 - - - False - end - - - Overwrite - True - True - True - - - True - True - 0 - - - - - Overwrite (All) - True - True - True - - - True - True - 1 - - - - - Skip - True - True - True - - - True - True - 2 - - - - - Skip (All) - True - True - True - - - True - True - 3 - - - - - False - False - 0 - - - - - True - False - vertical - - - True - False - Filename already exists. Please rename or select an action. - 0.10000000149011612 - - - - - - False - True - 0 - - - - - True - False - 15 - 10 - - - True - False - Moving From: - - - False - True - 0 - - - - - True - False - - - True - True - 1 - - - - - False - True - 1 - - - - - True - False - 0 - - - True - True - 2 - - - - - True - False - 20 - 10 - - - True - False - Moving To: - - - False - True - 0 - - - - - True - False - - - True - True - 1 - - - - - False - True - 3 - - - - - True - False - 0 - - - True - True - 4 - - - - - True - False - 20 - - - True - False - Filename: - - - False - True - 0 - - - - - True - False - - - True - True - 1 - - - - - False - True - 5 - - - - - True - True - - - - False - True - 6 - - - - - True - False - 20 - top - start - - - - - - Rename - True - False - True - True - - - - True - True - 1 - True - - - - - Auto Rename - True - True - True - - - - True - True - 2 - True - - - - - Auto Rename All - True - True - True - - - - True - True - 3 - True - - - - - False - True - 7 - - - - - True - True - 1 - - - - - - button5 - button6 - button7 - button8 - - - - diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/new_file_ui.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/new_file_ui.glade deleted file mode 100644 index 8b11ea8..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/new_file_ui.glade +++ /dev/null @@ -1,186 +0,0 @@ - - - - - - True - False - gtk-new - - - False - False - True - center-always - True - dialog - True - True - False - False - center - - - False - 5 - 5 - 5 - 5 - vertical - 2 - - - False - end - - - gtk-cancel - True - True - True - True - True - - - True - True - 0 - - - - - Create - create - True - True - True - Create File/Folder... - create_img - True - - - False - True - 1 - - - - - False - False - 0 - - - - - True - False - vertical - - - 500 - 26 - True - True - True - New File/Dir Name... - True - gtk-edit - False - False - False - False - New File/Dir Name... - - - - False - True - 0 - - - - - True - False - 20 - vertical - True - - - True - False - - - True - False - 15 - Folder - - - - - - True - True - 0 - - - - - True - False - 15 - File - - - - - - True - True - 1 - - - - - False - False - 0 - - - - - True - True - File/Folder - True - - - False - False - 1 - - - - - False - True - 1 - - - - - False - True - 1 - - - - - - button9 - button10 - - - diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/rename_ui.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/rename_ui.glade deleted file mode 100644 index d06e7a9..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/rename_ui.glade +++ /dev/null @@ -1,241 +0,0 @@ - - - - - - True - False - gtk-edit - 3 - - - True - False - gtk-media-forward - - - False - False - True - center-always - True - dialog - True - True - False - False - center - - - False - 5 - 5 - 5 - 5 - vertical - 2 - - - False - end - - - gtk-cancel - cancel_renames - True - True - True - True - - - True - True - 0 - - - - - Skip - skip_renames - True - True - True - skip_img - True - - - True - True - 1 - - - - - False - False - 0 - - - - - True - False - vertical - - - True - False - - - True - False - Rename: - - - False - True - 0 - - - - - True - False - - - True - True - 1 - - - - - False - True - 0 - - - - - 500 - 26 - True - True - True - Rename To: - True - gtk-edit - False - False - False - False - To: - - - - False - True - 1 - - - - - True - False - True - expand - - - Title Case - True - True - True - - - - True - True - 0 - - - - - Upper Case - True - True - True - - - - True - True - 1 - - - - - Lower Case - True - True - True - - - - True - True - 2 - - - - - Invert Case - True - True - True - - - - True - True - 3 - - - - - False - True - 2 - - - - - Rename - rename - True - True - True - rename_img - True - - - - False - True - 3 - - - - - True - True - 1 - - - - - - button2 - button1 - - - diff --git a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/save_load_ui.glade b/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/save_load_ui.glade deleted file mode 100644 index ba42cba..0000000 --- a/debs/solarfm-0-0-1-x64/usr/share/solarfm/ui_widgets/save_load_ui.glade +++ /dev/null @@ -1,65 +0,0 @@ - - - - - - False - dialog - True - - - False - vertical - 2 - - - False - end - - - gtk-cancel - True - True - True - True - True - - - True - True - 0 - - - - - gtk-ok - True - True - True - True - True - - - True - True - 1 - - - - - False - False - 0 - - - - - - - - - button11 - button12 - - - diff --git a/plugins/thumbnailer/icons/icon.py b/plugins/thumbnailer/icons/icon.py index 79903ca..5d742e0 100644 --- a/plugins/thumbnailer/icons/icon.py +++ b/plugins/thumbnailer/icons/icon.py @@ -29,13 +29,17 @@ class IconException(Exception): class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin): + cache = {} + def create_icon(self, dir, file): full_path = f"{dir}/{file}" return self.get_icon_image(dir, file, full_path) def get_icon_image(self, dir, file, full_path): try: - thumbnl = None + thumbnl = self.cache.get(full_path) + if thumbnl: + return thumbnl if file.lower().endswith(self.fmeshs): # 3D Mesh icon ... @@ -55,7 +59,7 @@ class Icon(DesktopIconMixin, VideoIconMixin, MeshsIconMixin): if not thumbnl: raise IconException("No known icons found.") - + self.cache[full_path] = thumbnl return thumbnl except IconException: ... diff --git a/plugins/youtube_download/yt_dlp/YoutubeDL.py b/plugins/youtube_download/yt_dlp/YoutubeDL.py index ef42ba6..aceaa59 100644 --- a/plugins/youtube_download/yt_dlp/YoutubeDL.py +++ b/plugins/youtube_download/yt_dlp/YoutubeDL.py @@ -42,6 +42,8 @@ from .globals import ( plugin_pps, all_plugins_loaded, plugin_dirs, + supported_js_runtimes, + supported_remote_components, ) from .minicurses import format_text from .networking import HEADRequest, Request, RequestDirector @@ -533,6 +535,18 @@ class YoutubeDL: See "EXTRACTOR ARGUMENTS" for details. Argument values must always be a list of string(s). E.g. {'youtube': {'skip': ['dash', 'hls']}} + js_runtimes: A dictionary of JavaScript runtime keys (in lower case) to enable + and a dictionary of additional configuration for the runtime. + Currently supported runtimes are 'deno', 'node', 'bun', and 'quickjs'. + If None, the default runtime of "deno" will be enabled. + The runtime configuration dictionary can have the following keys: + - path: Path to the executable (optional) + E.g. {'deno': {'path': '/path/to/deno'} + remote_components: A list of remote components that are allowed to be fetched when required. + Supported components: + - ejs:npm (external JavaScript components from npm) + - ejs:github (external JavaScript components from yt-dlp-ejs GitHub) + By default, no remote components are allowed to be fetched. mark_watched: Mark videos watched (even with --simulate). Only for YouTube The following options are deprecated and may be removed in the future: @@ -581,7 +595,7 @@ class YoutubeDL: 'width', 'height', 'asr', 'audio_channels', 'fps', 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx', 'timestamp', 'release_timestamp', 'available_at', - 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', + 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'save_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', @@ -717,6 +731,13 @@ class YoutubeDL: else: raise + # Note: this must be after plugins are loaded + self.params['js_runtimes'] = self.params.get('js_runtimes', {'deno': {}}) + self._clean_js_runtimes(self.params['js_runtimes']) + + self.params['remote_components'] = set(self.params.get('remote_components', ())) + self._clean_remote_components(self.params['remote_components']) + self.params['compat_opts'] = set(self.params.get('compat_opts', ())) self.params['http_headers'] = HTTPHeaderDict(std_headers, self.params.get('http_headers')) self._load_cookies(self.params['http_headers'].get('Cookie')) # compat @@ -829,6 +850,36 @@ class YoutubeDL: self.archive = preload_download_archive(self.params.get('download_archive')) + def _clean_js_runtimes(self, runtimes): + if not ( + isinstance(runtimes, dict) + and all(isinstance(k, str) and (v is None or isinstance(v, dict)) for k, v in runtimes.items()) + ): + raise ValueError('Invalid js_runtimes format, expected a dict of {runtime: {config}}') + + if unsupported_runtimes := runtimes.keys() - supported_js_runtimes.value.keys(): + self.report_warning( + f'Ignoring unsupported JavaScript runtime(s): {", ".join(unsupported_runtimes)}.' + f' Supported runtimes: {", ".join(supported_js_runtimes.value.keys())}.') + for rt in unsupported_runtimes: + runtimes.pop(rt) + + def _clean_remote_components(self, remote_components: set): + if unsupported_remote_components := set(remote_components) - set(supported_remote_components.value): + self.report_warning( + f'Ignoring unsupported remote component(s): {", ".join(unsupported_remote_components)}.' + f' Supported remote components: {", ".join(supported_remote_components.value)}.') + for rt in unsupported_remote_components: + remote_components.remove(rt) + + @functools.cached_property + def _js_runtimes(self): + runtimes = {} + for name, config in self.params.get('js_runtimes', {}).items(): + runtime_cls = supported_js_runtimes.value.get(name) + runtimes[name] = runtime_cls(path=config.get('path')) if runtime_cls else None + return runtimes + def warn_if_short_id(self, argv): # short YouTube ID starting with dash? idxs = [ @@ -2975,6 +3026,10 @@ class YoutubeDL: format_selector = self.format_selector while True: if interactive_format_selection: + if not formats: + # Bypass interactive format selection if no formats & --ignore-no-formats-error + formats_to_download = None + break req_format = input(self._format_screen('\nEnter format selector ', self.Styles.EMPHASIS) + '(Press ENTER for default, or Ctrl+C to quit)' + self._format_screen(': ', self.Styles.EMPHASIS)) @@ -4064,6 +4119,18 @@ class YoutubeDL: join_nonempty(*get_package_info(m)) for m in available_dependencies.values() })) or 'none')) + if not self.params.get('js_runtimes'): + write_debug('JS runtimes: none (disabled)') + else: + write_debug('JS runtimes: %s' % (', '.join(sorted( + f'{name} (unknown)' if runtime is None + else join_nonempty( + runtime.info.name, + runtime.info.version + (' (unsupported)' if runtime.info.supported is False else ''), + ) + for name, runtime in self._js_runtimes.items() if runtime is None or runtime.info is not None + )) or 'none')) + write_debug(f'Proxy map: {self.proxies}') write_debug(f'Request Handlers: {", ".join(rh.RH_NAME for rh in self._request_director.handlers.values())}') diff --git a/plugins/youtube_download/yt_dlp/__init__.py b/plugins/youtube_download/yt_dlp/__init__.py index 8aee126..2f6ba47 100644 --- a/plugins/youtube_download/yt_dlp/__init__.py +++ b/plugins/youtube_download/yt_dlp/__init__.py @@ -61,8 +61,15 @@ from .utils import ( shell_quote, variadic, write_string, + ) from .utils._utils import _UnsafeExtensionError +from .utils._jsruntime import ( + BunJsRuntime as _BunJsRuntime, + DenoJsRuntime as _DenoJsRuntime, + NodeJsRuntime as _NodeJsRuntime, + QuickJsRuntime as _QuickJsRuntime, +) from .YoutubeDL import YoutubeDL @@ -773,6 +780,10 @@ def parse_options(argv=None): else opts.audioformat if (opts.extractaudio and opts.audioformat in FFmpegExtractAudioPP.SUPPORTED_EXTS) else None) + js_runtimes = { + runtime.lower(): {'path': path} for runtime, path in ( + [*arg.split(':', 1), None][:2] for arg in opts.js_runtimes)} + return ParsedOptions(parser, opts, urls, { 'usenetrc': opts.usenetrc, 'netrc_location': opts.netrc_location, @@ -940,6 +951,8 @@ def parse_options(argv=None): 'geo_bypass_country': opts.geo_bypass_country, 'geo_bypass_ip_block': opts.geo_bypass_ip_block, 'useid': opts.useid or None, + 'js_runtimes': js_runtimes, + 'remote_components': opts.remote_components, 'warn_when_outdated': opts.update_self is None, '_warnings': warnings, '_deprecation_warnings': deprecation_warnings, @@ -1081,6 +1094,16 @@ def main(argv=None): from .extractor import gen_extractors, list_extractors +# Register JS runtimes and remote components +from .globals import supported_js_runtimes, supported_remote_components +supported_js_runtimes.value['deno'] = _DenoJsRuntime +supported_js_runtimes.value['node'] = _NodeJsRuntime +supported_js_runtimes.value['bun'] = _BunJsRuntime +supported_js_runtimes.value['quickjs'] = _QuickJsRuntime + +supported_remote_components.value.append('ejs:github') +supported_remote_components.value.append('ejs:npm') + __all__ = [ 'YoutubeDL', 'gen_extractors', diff --git a/plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py b/plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py index 8e7f42f..0c4bf7d 100644 --- a/plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py +++ b/plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py @@ -34,3 +34,4 @@ print(f'Adding imports: {hiddenimports}') excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts', 'bundle'] datas = collect_data_files('curl_cffi', includes=['cacert.pem']) +datas += collect_data_files('yt_dlp_ejs', includes=['**/*.js']) diff --git a/plugins/youtube_download/yt_dlp/cookies.py b/plugins/youtube_download/yt_dlp/cookies.py index 4fdc0b8..23f90d6 100644 --- a/plugins/youtube_download/yt_dlp/cookies.py +++ b/plugins/youtube_download/yt_dlp/cookies.py @@ -125,7 +125,7 @@ def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), def _extract_firefox_cookies(profile, container, logger): - MAX_SUPPORTED_DB_SCHEMA_VERSION = 16 + MAX_SUPPORTED_DB_SCHEMA_VERSION = 17 logger.info('Extracting cookies from firefox') if not sqlite3: @@ -166,6 +166,8 @@ def _extract_firefox_cookies(profile, container, logger): db_schema_version = cursor.execute('PRAGMA user_version;').fetchone()[0] if db_schema_version > MAX_SUPPORTED_DB_SCHEMA_VERSION: logger.warning(f'Possibly unsupported firefox cookies database version: {db_schema_version}') + else: + logger.debug(f'Firefox cookies database version: {db_schema_version}') if isinstance(container_id, int): logger.debug( f'Only loading cookies from firefox container "{container}", ID {container_id}') @@ -210,9 +212,16 @@ def _firefox_browser_dirs(): else: yield from map(os.path.expanduser, ( + # New installations of FF147+ respect the XDG base directory specification + # Ref: https://bugzilla.mozilla.org/show_bug.cgi?id=259356 + os.path.join(_config_home(), 'mozilla/firefox'), + # Existing FF version<=146 installations '~/.mozilla/firefox', - '~/snap/firefox/common/.mozilla/firefox', + # Flatpak XDG: https://docs.flatpak.org/en/latest/conventions.html#xdg-base-directories + '~/.var/app/org.mozilla.firefox/config/mozilla/firefox', '~/.var/app/org.mozilla.firefox/.mozilla/firefox', + # Snap installations do not respect the XDG base directory specification + '~/snap/firefox/common/.mozilla/firefox', )) @@ -557,7 +566,7 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor): def _extract_safari_cookies(profile, logger): - if sys.platform != 'darwin': + if sys.platform not in ('darwin', 'ios'): raise ValueError(f'unsupported platform: {sys.platform}') if profile: diff --git a/plugins/youtube_download/yt_dlp/dependencies/__init__.py b/plugins/youtube_download/yt_dlp/dependencies/__init__.py index 0d58da2..cf2bcfb 100644 --- a/plugins/youtube_download/yt_dlp/dependencies/__init__.py +++ b/plugins/youtube_download/yt_dlp/dependencies/__init__.py @@ -81,6 +81,12 @@ except ImportError: from . import Cryptodome +try: + import yt_dlp_ejs +except ImportError: + yt_dlp_ejs = None + + all_dependencies = {k: v for k, v in globals().items() if not k.startswith('_')} available_dependencies = {k: v for k, v in all_dependencies.items() if v} diff --git a/plugins/youtube_download/yt_dlp/downloader/common.py b/plugins/youtube_download/yt_dlp/downloader/common.py index 122c479..6f15607 100644 --- a/plugins/youtube_download/yt_dlp/downloader/common.py +++ b/plugins/youtube_download/yt_dlp/downloader/common.py @@ -461,7 +461,8 @@ class FileDownloader: min_sleep_interval = self.params.get('sleep_interval') or 0 max_sleep_interval = self.params.get('max_sleep_interval') or 0 - if available_at := info_dict.get('available_at'): + requested_formats = info_dict.get('requested_formats') or [info_dict] + if available_at := max(f.get('available_at') or 0 for f in requested_formats): forced_sleep_interval = available_at - int(time.time()) if forced_sleep_interval > min_sleep_interval: sleep_note = 'as required by the site' diff --git a/plugins/youtube_download/yt_dlp/downloader/external.py b/plugins/youtube_download/yt_dlp/downloader/external.py index 3b8fd27..14879b3 100644 --- a/plugins/youtube_download/yt_dlp/downloader/external.py +++ b/plugins/youtube_download/yt_dlp/downloader/external.py @@ -457,6 +457,8 @@ class FFmpegFD(ExternalFD): @classmethod def available(cls, path=None): + # TODO: Fix path for ffmpeg + # Fixme: This may be wrong when --ffmpeg-location is used return FFmpegPostProcessor().available def on_process_started(self, proc, stdin): @@ -488,20 +490,6 @@ class FFmpegFD(ExternalFD): if not self.params.get('verbose'): args += ['-hide_banner'] - args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args', ...)) - - # These exists only for compatibility. Extractors should use - # info_dict['downloader_options']['ffmpeg_args'] instead - args += info_dict.get('_ffmpeg_args') or [] - seekable = info_dict.get('_seekable') - if seekable is not None: - # setting -seekable prevents ffmpeg from guessing if the server - # supports seeking(by adding the header `Range: bytes=0-`), which - # can cause problems in some cases - # https://github.com/ytdl-org/youtube-dl/issues/11800#issuecomment-275037127 - # http://trac.ffmpeg.org/ticket/6125#comment:10 - args += ['-seekable', '1' if seekable else '0'] - env = None proxy = self.params.get('proxy') if proxy: @@ -521,39 +509,10 @@ class FFmpegFD(ExternalFD): env['HTTP_PROXY'] = proxy env['http_proxy'] = proxy - protocol = info_dict.get('protocol') - - if protocol == 'rtmp': - player_url = info_dict.get('player_url') - page_url = info_dict.get('page_url') - app = info_dict.get('app') - play_path = info_dict.get('play_path') - tc_url = info_dict.get('tc_url') - flash_version = info_dict.get('flash_version') - live = info_dict.get('rtmp_live', False) - conn = info_dict.get('rtmp_conn') - if player_url is not None: - args += ['-rtmp_swfverify', player_url] - if page_url is not None: - args += ['-rtmp_pageurl', page_url] - if app is not None: - args += ['-rtmp_app', app] - if play_path is not None: - args += ['-rtmp_playpath', play_path] - if tc_url is not None: - args += ['-rtmp_tcurl', tc_url] - if flash_version is not None: - args += ['-rtmp_flashver', flash_version] - if live: - args += ['-rtmp_live', 'live'] - if isinstance(conn, list): - for entry in conn: - args += ['-rtmp_conn', entry] - elif isinstance(conn, str): - args += ['-rtmp_conn', conn] - start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end') + fallback_input_args = traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args', ...)) + selected_formats = info_dict.get('requested_formats') or [info_dict] for i, fmt in enumerate(selected_formats): is_http = re.match(r'https?://', fmt['url']) @@ -572,6 +531,44 @@ class FFmpegFD(ExternalFD): if end_time: args += ['-t', str(end_time - start_time)] + protocol = fmt.get('protocol') + + if protocol == 'rtmp': + player_url = fmt.get('player_url') + page_url = fmt.get('page_url') + app = fmt.get('app') + play_path = fmt.get('play_path') + tc_url = fmt.get('tc_url') + flash_version = fmt.get('flash_version') + live = fmt.get('rtmp_live', False) + conn = fmt.get('rtmp_conn') + if player_url is not None: + args += ['-rtmp_swfverify', player_url] + if page_url is not None: + args += ['-rtmp_pageurl', page_url] + if app is not None: + args += ['-rtmp_app', app] + if play_path is not None: + args += ['-rtmp_playpath', play_path] + if tc_url is not None: + args += ['-rtmp_tcurl', tc_url] + if flash_version is not None: + args += ['-rtmp_flashver', flash_version] + if live: + args += ['-rtmp_live', 'live'] + if isinstance(conn, list): + for entry in conn: + args += ['-rtmp_conn', entry] + elif isinstance(conn, str): + args += ['-rtmp_conn', conn] + + elif protocol == 'http_dash_segments' and info_dict.get('is_live'): + # ffmpeg may try to read past the latest available segments for + # live DASH streams unless we pass `-re`. In modern ffmpeg, this + # is an alias of `-readrate 1`, but `-readrate` was not added + # until ffmpeg 5.0, so we must stick to using `-re` + args += ['-re'] + url = fmt['url'] if self.params.get('enable_file_urls') and url.startswith('file:'): # The default protocol_whitelist is 'file,crypto,data' when reading local m3u8 URLs, @@ -586,6 +583,7 @@ class FFmpegFD(ExternalFD): # https://trac.ffmpeg.org/ticket/2702 url = re.sub(r'^file://(?:localhost)?/', 'file:' if os.name == 'nt' else 'file:/', url) + args += traverse_obj(fmt, ('downloader_options', 'ffmpeg_args', ...)) or fallback_input_args args += [*self._configuration_args((f'_i{i + 1}', '_i')), '-i', url] if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'): diff --git a/plugins/youtube_download/yt_dlp/extractor/_extractors.py b/plugins/youtube_download/yt_dlp/extractor/_extractors.py index 072169d..ea49a25 100644 --- a/plugins/youtube_download/yt_dlp/extractor/_extractors.py +++ b/plugins/youtube_download/yt_dlp/extractor/_extractors.py @@ -75,6 +75,7 @@ from .afreecatv import ( AfreecaTVLiveIE, AfreecaTVUserIE, ) +from .agalega import AGalegaIE from .agora import ( TokFMAuditionIE, TokFMPodcastIE, @@ -83,6 +84,7 @@ from .agora import ( ) from .airtv import AirTVIE from .aitube import AitubeKZVideoIE +from .alibaba import AlibabaIE from .aliexpress import AliExpressLiveIE from .aljazeera import AlJazeeraIE from .allocine import AllocineIE @@ -143,6 +145,8 @@ from .archiveorg import ( from .arcpublishing import ArcPublishingIE from .ard import ( ARDIE, + ARDAudiothekIE, + ARDAudiothekPlaylistIE, ARDBetaMediathekIE, ARDMediathekCollectionIE, ) @@ -266,6 +270,7 @@ from .bitchute import ( BitChuteChannelIE, BitChuteIE, ) +from .bitmovin import BitmovinIE from .blackboardcollaborate import ( BlackboardCollaborateIE, BlackboardCollaborateLaunchIE, @@ -426,6 +431,7 @@ from .cpac import ( ) from .cracked import CrackedIE from .craftsy import CraftsyIE +from .croatianfilm import CroatianFilmIE from .crooksandliars import CrooksAndLiarsIE from .crowdbunker import ( CrowdBunkerChannelIE, @@ -633,12 +639,16 @@ from .fc2 import ( ) from .fczenit import FczenitIE from .fifa import FifaIE +from .filmarchiv import FilmArchivIE from .filmon import ( FilmOnChannelIE, FilmOnIE, ) from .filmweb import FilmwebIE -from .firsttv import FirstTVIE +from .firsttv import ( + FirstTVIE, + FirstTVLiveIE, +) from .fivetv import FiveTVIE from .flextv import FlexTVIE from .flickr import FlickrIE @@ -685,6 +695,10 @@ from .frontendmasters import ( FrontendMastersIE, FrontendMastersLessonIE, ) +from .frontro import ( + TheChosenGroupIE, + TheChosenIE, +) from .fujitv import FujiTVFODPlus7IE from .funk import FunkIE from .funker530 import Funker530IE @@ -1074,11 +1088,6 @@ from .mangomolo import ( MangomoloLiveIE, MangomoloVideoIE, ) -from .manoto import ( - ManotoTVIE, - ManotoTVLiveIE, - ManotoTVShowIE, -) from .manyvids import ManyVidsIE from .maoritv import MaoriTVIE from .markiza import ( @@ -1088,7 +1097,10 @@ from .markiza import ( from .massengeschmacktv import MassengeschmackTVIE from .masters import MastersIE from .matchtv import MatchTVIE -from .mave import MaveIE +from .mave import ( + MaveChannelIE, + MaveIE, +) from .mbn import MBNIE from .mdr import MDRIE from .medaltv import MedalTVIE @@ -1195,6 +1207,7 @@ from .musicdex import ( MusicdexPlaylistIE, MusicdexSongIE, ) +from .mux import MuxIE from .mx3 import ( Mx3IE, Mx3NeoIE, @@ -1216,6 +1229,7 @@ from .n1 import ( N1InfoAssetIE, N1InfoIIE, ) +from .nascar import NascarClassicsIE from .nate import ( NateIE, NateProgramIE, @@ -1261,6 +1275,7 @@ from .nebula import ( NebulaChannelIE, NebulaClassIE, NebulaIE, + NebulaSeasonIE, NebulaSubscriptionsIE, ) from .nekohacker import NekoHackerIE @@ -1269,6 +1284,10 @@ from .nest import ( NestClipIE, NestIE, ) +from .netapp import ( + NetAppCollectionIE, + NetAppVideoIE, +) from .neteasemusic import ( NetEaseMusicAlbumIE, NetEaseMusicDjRadioIE, @@ -1291,12 +1310,6 @@ from .newgrounds import ( ) from .newspicks import NewsPicksIE from .newsy import NewsyIE -from .nextmedia import ( - AppleDailyIE, - NextMediaActionNewsIE, - NextMediaIE, - NextTVIE, -) from .nexx import ( NexxEmbedIE, NexxIE, @@ -1361,6 +1374,7 @@ from .nova import ( NovaIE, ) from .novaplay import NovaPlayIE +from .nowcanal import NowCanalIE from .nowness import ( NownessIE, NownessPlaylistIE, @@ -1464,6 +1478,7 @@ from .palcomp3 import ( PalcoMP3IE, PalcoMP3VideoIE, ) +from .pandatv import PandaTvIE from .panopto import ( PanoptoIE, PanoptoListIE, @@ -1812,10 +1827,6 @@ from .scrippsnetworks import ( ScrippsNetworksWatchIE, ) from .scrolller import ScrolllerIE -from .scte import ( - SCTEIE, - SCTECourseIE, -) from .sejmpl import SejmIE from .sen import SenIE from .senalcolombia import SenalColombiaLiveIE @@ -1997,6 +2008,11 @@ from .taptap import ( TapTapMomentIE, TapTapPostIntlIE, ) +from .tarangplus import ( + TarangPlusEpisodesIE, + TarangPlusPlaylistIE, + TarangPlusVideoIE, +) from .tass import TassIE from .tbs import TBSIE from .tbsjp import ( @@ -2514,6 +2530,7 @@ from .yappy import ( YappyIE, YappyProfileIE, ) +from .yfanefa import YfanefaIE from .yle_areena import YleAreenaIE from .youjizz import YouJizzIE from .youku import ( diff --git a/plugins/youtube_download/yt_dlp/extractor/abc.py b/plugins/youtube_download/yt_dlp/extractor/abc.py index 2e66178..cff54d3 100644 --- a/plugins/youtube_download/yt_dlp/extractor/abc.py +++ b/plugins/youtube_download/yt_dlp/extractor/abc.py @@ -321,6 +321,8 @@ class ABCIViewIE(InfoExtractor): entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) if formats: break + else: + formats = [] subtitles = {} src_vtt = stream.get('captions', {}).get('src-vtt') diff --git a/plugins/youtube_download/yt_dlp/extractor/agalega.py b/plugins/youtube_download/yt_dlp/extractor/agalega.py new file mode 100644 index 0000000..c02d4ae --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/agalega.py @@ -0,0 +1,91 @@ +import json +import time + +from .common import InfoExtractor +from ..utils import jwt_decode_hs256, url_or_none +from ..utils.traversal import traverse_obj + + +class AGalegaBaseIE(InfoExtractor): + _access_token = None + + @staticmethod + def _jwt_is_expired(token): + return jwt_decode_hs256(token)['exp'] - time.time() < 120 + + def _refresh_access_token(self, video_id): + AGalegaBaseIE._access_token = self._download_json( + 'https://www.agalega.gal/api/fetch-api/jwt/token', video_id, + note='Downloading access token', + data=json.dumps({ + 'username': None, + 'password': None, + 'client': 'crtvg', + 'checkExistsCookies': False, + }).encode())['access'] + + def _call_api(self, endpoint, display_id, note, fatal=True, query=None): + if not AGalegaBaseIE._access_token or self._jwt_is_expired(AGalegaBaseIE._access_token): + self._refresh_access_token(endpoint) + return self._download_json( + f'https://api-agalega.interactvty.com/api/2.0/contents/{endpoint}', display_id, + note=note, fatal=fatal, query=query, + headers={'Authorization': f'jwtok {AGalegaBaseIE._access_token}'}) + + +class AGalegaIE(AGalegaBaseIE): + IE_NAME = 'agalega:videos' + _VALID_URL = r'https?://(?:www\.)?agalega\.gal/videos/(?:detail/)?(?P[0-9]+)' + _TESTS = [{ + 'url': 'https://www.agalega.gal/videos/288664-lr-ninguencheconta', + 'md5': '04533a66c5f863d08dd9724b11d1c223', + 'info_dict': { + 'id': '288664', + 'title': 'Roberto e Ángel Martín atenden consultas dos espectadores', + 'description': 'O cómico ademais fai un repaso dalgúns momentos da súa traxectoria profesional', + 'thumbnail': 'https://crtvg-bucket.flumotion.cloud/content_cards/2ef32c3b9f6249d9868fd8f11d389d3d.png', + 'ext': 'mp4', + }, + }, { + 'url': 'https://www.agalega.gal/videos/detail/296152-pulso-activo-7', + 'md5': '26df7fdcf859f38ad92d837279d6b56d', + 'info_dict': { + 'id': '296152', + 'title': 'Pulso activo | 18-11-2025', + 'description': 'Anxo, Noemí, Silvia e Estrella comparten as sensacións da clase de Eddy.', + 'thumbnail': 'https://crtvg-bucket.flumotion.cloud/content_cards/a6bb7da6c8994b82bf961ac6cad1707b.png', + 'ext': 'mp4', + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + content_data = self._call_api( + f'content/{video_id}/', video_id, note='Downloading content data', fatal=False, + query={ + 'optional_fields': 'image,is_premium,short_description,has_subtitle', + }) + resource_data = self._call_api( + f'content_resources/{video_id}/', video_id, note='Downloading resource data', + query={ + 'optional_fields': 'media_url', + }) + + formats = [] + subtitles = {} + for m3u8_url in traverse_obj(resource_data, ('results', ..., 'media_url', {url_or_none})): + fmts, subs = self._extract_m3u8_formats_and_subtitles( + m3u8_url, video_id, ext='mp4', m3u8_id='hls') + formats.extend(fmts) + self._merge_subtitles(subs, target=subtitles) + + return { + 'id': video_id, + 'formats': formats, + 'subtitles': subtitles, + **traverse_obj(content_data, { + 'title': ('name', {str}), + 'description': (('description', 'short_description'), {str}, any), + 'thumbnail': ('image', {url_or_none}), + }), + } diff --git a/plugins/youtube_download/yt_dlp/extractor/alibaba.py b/plugins/youtube_download/yt_dlp/extractor/alibaba.py new file mode 100644 index 0000000..0912535 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/alibaba.py @@ -0,0 +1,42 @@ +from .common import InfoExtractor +from ..utils import int_or_none, str_or_none, url_or_none +from ..utils.traversal import traverse_obj + + +class AlibabaIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?alibaba\.com/product-detail/[\w-]+_(?P\d+)\.html' + _TESTS = [{ + 'url': 'https://www.alibaba.com/product-detail/Kids-Entertainment-Bouncer-Bouncy-Castle-Waterslide_1601271126969.html', + 'info_dict': { + 'id': '6000280444270', + 'display_id': '1601271126969', + 'ext': 'mp4', + 'title': 'Kids Entertainment Bouncer Bouncy Castle Waterslide Juex Gonflables Commercial Inflatable Tropical Water Slide', + 'duration': 30, + 'thumbnail': 'https://sc04.alicdn.com/kf/Hc5bb391974454af18c7a4f91cbe4062bg.jpg_120x120.jpg', + }, + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + product_data = self._search_json( + r'window\.detailData\s*=', webpage, 'detail data', display_id)['globalData']['product'] + + return { + **traverse_obj(product_data, ('mediaItems', lambda _, v: v['type'] == 'video' and v['videoId'], any, { + 'id': ('videoId', {int}, {str_or_none}), + 'duration': ('duration', {int_or_none}), + 'thumbnail': ('videoCoverUrl', {url_or_none}), + 'formats': ('videoUrl', lambda _, v: url_or_none(v['videoUrl']), { + 'url': 'videoUrl', + 'format_id': ('definition', {str_or_none}), + 'tbr': ('bitrate', {int_or_none}), + 'width': ('width', {int_or_none}), + 'height': ('height', {int_or_none}), + 'filesize': ('length', {int_or_none}), + }), + })), + 'title': traverse_obj(product_data, ('subject', {str})), + 'display_id': display_id, + } diff --git a/plugins/youtube_download/yt_dlp/extractor/archiveorg.py b/plugins/youtube_download/yt_dlp/extractor/archiveorg.py index 9a4e0b8..7bf5199 100644 --- a/plugins/youtube_download/yt_dlp/extractor/archiveorg.py +++ b/plugins/youtube_download/yt_dlp/extractor/archiveorg.py @@ -5,12 +5,9 @@ import re import urllib.parse from .common import InfoExtractor -from .youtube import YoutubeBaseInfoExtractor, YoutubeIE -from ..networking import HEADRequest -from ..networking.exceptions import HTTPError +from .youtube import YoutubeBaseInfoExtractor from ..utils import ( KNOWN_EXTENSIONS, - ExtractorError, bug_reports_message, clean_html, dict_get, @@ -21,18 +18,14 @@ from ..utils import ( join_nonempty, js_to_json, merge_dicts, - mimetype2ext, orderedSet, parse_duration, parse_qs, str_or_none, - str_to_int, traverse_obj, - try_get, unified_strdate, unified_timestamp, url_or_none, - urlhandle_detect_ext, ) @@ -286,7 +279,7 @@ class ArchiveOrgIE(InfoExtractor): 'url': 'https://archive.org/' + track['file'].lstrip('/'), } - metadata = self._download_json('http://archive.org/metadata/' + identifier, identifier) + metadata = self._download_json(f'https://archive.org/metadata/{identifier}', identifier) m = metadata['metadata'] identifier = m['identifier'] @@ -471,7 +464,7 @@ class YoutubeWebArchiveIE(InfoExtractor): 'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA', 'info_dict': { 'id': 'lTx3G6h2xyA', - 'ext': 'flv', + 'ext': 'mp4', 'title': 'Madeon - Pop Culture (live mashup)', 'upload_date': '20110711', 'uploader': 'Madeon', @@ -578,7 +571,7 @@ class YoutubeWebArchiveIE(InfoExtractor): 'url': 'https://web.archive.org/web/20110126141719/http://www.youtube.com/watch?v=Q_yjX80U7Yc', 'info_dict': { 'id': 'Q_yjX80U7Yc', - 'ext': 'flv', + 'ext': 'webm', 'title': 'Spray Paint Art by Clay Butler: Purple Fantasy Forest', 'uploader_id': 'claybutlermusic', 'description': 'md5:4595264559e3d0a0ceb3f011f6334543', @@ -680,6 +673,55 @@ class YoutubeWebArchiveIE(InfoExtractor): 'upload_date': '20120407', 'uploader_id': 'thecomputernerd01', }, + }, { + # Contains split audio/video formats + 'url': 'ytarchive:o_T_S_TU12M', + 'info_dict': { + 'id': 'o_T_S_TU12M', + 'ext': 'mp4', + 'title': 'Prairie Pulse 1218; Lin Enger, Paul Olson', + 'description': 'md5:36e7a34cdc8508e35a920ec042e799c7', + 'uploader': 'Prairie Public', + 'channel_id': 'UC4BOzQel6tvJm7OEDd3vZlw', + 'channel_url': 'https://www.youtube.com/channel/UC4BOzQel6tvJm7OEDd3vZlw', + 'duration': 1606, + 'upload_date': '20150213', + }, + }, { + # Video unavailable through wayback-fakeurl + 'url': 'ytarchive:SQCom7wjGDs', + 'info_dict': { + 'id': 'SQCom7wjGDs', + 'ext': 'mp4', + 'title': 'Jamin Warren from PBS Game/Show decides that Portal is a feminist Game [Top Hats and No Brain]', + 'description': 'md5:c0cb876dd075483ead9afcc86798efb0', + 'uploader': 'Top Hats and Champagne', + 'uploader_id': 'sparrowtm', + 'uploader_url': 'https://www.youtube.com/user/sparrowtm', + 'channel_id': 'UCW3T5nG4iEkI7HjG-Du3HQA', + 'channel_url': 'https://www.youtube.com/channel/UCW3T5nG4iEkI7HjG-Du3HQA', + 'duration': 1500, + 'thumbnail': 'https://web.archive.org/web/20160108040020if_/https://i.ytimg.com/vi/SQCom7wjGDs/maxresdefault.jpg', + 'upload_date': '20160107', + }, + }, { + # dmuxed formats + 'url': 'https://web.archive.org/web/20240922160632/https://www.youtube.com/watch?v=z7hzvTL3k1k', + 'info_dict': { + 'id': 'z7hzvTL3k1k', + 'ext': 'webm', + 'title': 'Praise the Lord and Pass the Ammunition (BARRXN REMIX)', + 'description': 'md5:45dbf2c71c23b0734c8dfb82dd1e94b6', + 'uploader': 'Barrxn', + 'uploader_id': 'TheRockstar6086', + 'uploader_url': 'https://www.youtube.com/user/TheRockstar6086', + 'channel_id': 'UCjJPGUTtvR9uizmawn2ThqA', + 'channel_url': 'https://www.youtube.com/channel/UCjJPGUTtvR9uizmawn2ThqA', + 'duration': 125, + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'upload_date': '20201207', + }, + 'params': {'format': 'bv'}, }, { 'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw', 'only_matching': True, @@ -724,6 +766,113 @@ class YoutubeWebArchiveIE(InfoExtractor): _OLDEST_CAPTURE_DATE = 20050214000000 _NEWEST_CAPTURE_DATE = 20500101000000 + _FORMATS = { + '5': {'ext': 'flv', 'width': 400, 'height': 240, 'acodec': 'mp3', 'vcodec': 'h263'}, + '6': {'ext': 'flv', 'width': 450, 'height': 270, 'acodec': 'mp3', 'vcodec': 'h263'}, + '13': {'ext': '3gp', 'acodec': 'aac', 'vcodec': 'mp4v'}, + '17': {'ext': '3gp', 'width': 176, 'height': 144, 'acodec': 'aac', 'vcodec': 'mp4v'}, + '18': {'ext': 'mp4', 'width': 640, 'height': 360, 'acodec': 'aac', 'vcodec': 'h264'}, + '22': {'ext': 'mp4', 'width': 1280, 'height': 720, 'acodec': 'aac', 'vcodec': 'h264'}, + '34': {'ext': 'flv', 'width': 640, 'height': 360, 'acodec': 'aac', 'vcodec': 'h264'}, + '35': {'ext': 'flv', 'width': 854, 'height': 480, 'acodec': 'aac', 'vcodec': 'h264'}, + # itag 36 videos are either 320x180 (BaW_jenozKc) or 320x240 (__2ABJjxzNo), abr varies as well + '36': {'ext': '3gp', 'width': 320, 'acodec': 'aac', 'vcodec': 'mp4v'}, + '37': {'ext': 'mp4', 'width': 1920, 'height': 1080, 'acodec': 'aac', 'vcodec': 'h264'}, + '38': {'ext': 'mp4', 'width': 4096, 'height': 3072, 'acodec': 'aac', 'vcodec': 'h264'}, + '43': {'ext': 'webm', 'width': 640, 'height': 360, 'acodec': 'vorbis', 'vcodec': 'vp8'}, + '44': {'ext': 'webm', 'width': 854, 'height': 480, 'acodec': 'vorbis', 'vcodec': 'vp8'}, + '45': {'ext': 'webm', 'width': 1280, 'height': 720, 'acodec': 'vorbis', 'vcodec': 'vp8'}, + '46': {'ext': 'webm', 'width': 1920, 'height': 1080, 'acodec': 'vorbis', 'vcodec': 'vp8'}, + '59': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'vcodec': 'h264'}, + '78': {'ext': 'mp4', 'width': 854, 'height': 480, 'acodec': 'aac', 'vcodec': 'h264'}, + + + # 3D videos + '82': {'ext': 'mp4', 'height': 360, 'format_note': '3D', 'acodec': 'aac', 'vcodec': 'h264', 'preference': -20}, + '83': {'ext': 'mp4', 'height': 480, 'format_note': '3D', 'acodec': 'aac', 'vcodec': 'h264', 'preference': -20}, + '84': {'ext': 'mp4', 'height': 720, 'format_note': '3D', 'acodec': 'aac', 'vcodec': 'h264', 'preference': -20}, + '85': {'ext': 'mp4', 'height': 1080, 'format_note': '3D', 'acodec': 'aac', 'vcodec': 'h264', 'preference': -20}, + '100': {'ext': 'webm', 'height': 360, 'format_note': '3D', 'acodec': 'vorbis', 'vcodec': 'vp8', 'preference': -20}, + '101': {'ext': 'webm', 'height': 480, 'format_note': '3D', 'acodec': 'vorbis', 'vcodec': 'vp8', 'preference': -20}, + '102': {'ext': 'webm', 'height': 720, 'format_note': '3D', 'acodec': 'vorbis', 'vcodec': 'vp8', 'preference': -20}, + + # Apple HTTP Live Streaming + '91': {'ext': 'mp4', 'height': 144, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + '92': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + '93': {'ext': 'mp4', 'height': 360, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + '94': {'ext': 'mp4', 'height': 480, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + '95': {'ext': 'mp4', 'height': 720, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + '96': {'ext': 'mp4', 'height': 1080, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + '132': {'ext': 'mp4', 'height': 240, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + '151': {'ext': 'mp4', 'height': 72, 'format_note': 'HLS', 'acodec': 'aac', 'vcodec': 'h264'}, + + # DASH mp4 video + '133': {'ext': 'mp4', 'height': 240, 'vcodec': 'h264', 'acodec': 'none'}, + '134': {'ext': 'mp4', 'height': 360, 'vcodec': 'h264', 'acodec': 'none'}, + '135': {'ext': 'mp4', 'height': 480, 'vcodec': 'h264', 'acodec': 'none'}, + '136': {'ext': 'mp4', 'height': 720, 'vcodec': 'h264', 'acodec': 'none'}, + '137': {'ext': 'mp4', 'height': 1080, 'vcodec': 'h264', 'acodec': 'none'}, + '138': {'ext': 'mp4', 'vcodec': 'h264', 'acodec': 'none'}, # Height can vary (https://github.com/ytdl-org/youtube-dl/issues/4559) + '160': {'ext': 'mp4', 'height': 144, 'vcodec': 'h264', 'acodec': 'none'}, + '212': {'ext': 'mp4', 'height': 480, 'vcodec': 'h264', 'acodec': 'none'}, + '264': {'ext': 'mp4', 'height': 1440, 'vcodec': 'h264', 'acodec': 'none'}, + '298': {'ext': 'mp4', 'height': 720, 'vcodec': 'h264', 'fps': 60, 'acodec': 'none'}, + '299': {'ext': 'mp4', 'height': 1080, 'vcodec': 'h264', 'fps': 60, 'acodec': 'none'}, + '266': {'ext': 'mp4', 'height': 2160, 'vcodec': 'h264', 'acodec': 'none'}, + + # Dash mp4 audio + '139': {'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none'}, + '140': {'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none'}, + '141': {'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none'}, + '256': {'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none'}, + '258': {'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none'}, + '325': {'ext': 'm4a', 'acodec': 'dtse', 'vcodec': 'none'}, + '328': {'ext': 'm4a', 'acodec': 'ec-3', 'vcodec': 'none'}, + + # Dash webm + '167': {'ext': 'webm', 'height': 360, 'width': 640, 'vcodec': 'vp8'}, + '168': {'ext': 'webm', 'height': 480, 'width': 854, 'vcodec': 'vp8'}, + '169': {'ext': 'webm', 'height': 720, 'width': 1280, 'vcodec': 'vp8'}, + '170': {'ext': 'webm', 'height': 1080, 'width': 1920, 'vcodec': 'vp8'}, + '218': {'ext': 'webm', 'height': 480, 'width': 854, 'vcodec': 'vp8'}, + '219': {'ext': 'webm', 'height': 480, 'width': 854, 'vcodec': 'vp8'}, + '278': {'ext': 'webm', 'height': 144, 'vcodec': 'vp9', 'acodec': 'none'}, + '242': {'ext': 'webm', 'height': 240, 'vcodec': 'vp9', 'acodec': 'none'}, + '243': {'ext': 'webm', 'height': 360, 'vcodec': 'vp9', 'acodec': 'none'}, + '244': {'ext': 'webm', 'height': 480, 'vcodec': 'vp9', 'acodec': 'none'}, + '245': {'ext': 'webm', 'height': 480, 'vcodec': 'vp9', 'acodec': 'none'}, + '246': {'ext': 'webm', 'height': 480, 'vcodec': 'vp9', 'acodec': 'none'}, + '247': {'ext': 'webm', 'height': 720, 'vcodec': 'vp9', 'acodec': 'none'}, + '248': {'ext': 'webm', 'height': 1080, 'vcodec': 'vp9', 'acodec': 'none'}, + '271': {'ext': 'webm', 'height': 1440, 'vcodec': 'vp9', 'acodec': 'none'}, + # itag 272 videos are either 3840x2160 (e.g. RtoitU2A-3E) or 7680x4320 (sLprVF6d7Ug) + '272': {'ext': 'webm', 'height': 2160, 'vcodec': 'vp9', 'acodec': 'none'}, + '302': {'ext': 'webm', 'height': 720, 'vcodec': 'vp9', 'fps': 60, 'acodec': 'none'}, + '303': {'ext': 'webm', 'height': 1080, 'vcodec': 'vp9', 'fps': 60, 'acodec': 'none'}, + '308': {'ext': 'webm', 'height': 1440, 'vcodec': 'vp9', 'fps': 60, 'acodec': 'none'}, + '313': {'ext': 'webm', 'height': 2160, 'vcodec': 'vp9', 'acodec': 'none'}, + '315': {'ext': 'webm', 'height': 2160, 'vcodec': 'vp9', 'fps': 60, 'acodec': 'none'}, + + # Dash webm audio + '171': {'ext': 'webm', 'acodec': 'vorbis', 'vcodec': 'none'}, + '172': {'ext': 'webm', 'acodec': 'vorbis', 'vcodec': 'none'}, + + # Dash webm audio with opus inside + '249': {'ext': 'webm', 'acodec': 'opus', 'vcodec': 'none'}, + '250': {'ext': 'webm', 'acodec': 'opus', 'vcodec': 'none'}, + '251': {'ext': 'webm', 'acodec': 'opus', 'vcodec': 'none'}, + + # av01 video only formats sometimes served with "unknown" codecs + '394': {'ext': 'mp4', 'height': 144, 'vcodec': 'av01.0.00M.08', 'acodec': 'none'}, + '395': {'ext': 'mp4', 'height': 240, 'vcodec': 'av01.0.00M.08', 'acodec': 'none'}, + '396': {'ext': 'mp4', 'height': 360, 'vcodec': 'av01.0.01M.08', 'acodec': 'none'}, + '397': {'ext': 'mp4', 'height': 480, 'vcodec': 'av01.0.04M.08', 'acodec': 'none'}, + '398': {'ext': 'mp4', 'height': 720, 'vcodec': 'av01.0.05M.08', 'acodec': 'none'}, + '399': {'ext': 'mp4', 'height': 1080, 'vcodec': 'av01.0.08M.08', 'acodec': 'none'}, + '400': {'ext': 'mp4', 'height': 1440, 'vcodec': 'av01.0.12M.08', 'acodec': 'none'}, + '401': {'ext': 'mp4', 'height': 2160, 'vcodec': 'av01.0.12M.08', 'acodec': 'none'}, + } + def _call_cdx_api(self, item_id, url, filters: list | None = None, collapse: list | None = None, query: dict | None = None, note=None, fatal=False): # CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md query = { @@ -929,27 +1078,30 @@ class YoutubeWebArchiveIE(InfoExtractor): capture_dates.extend([self._OLDEST_CAPTURE_DATE, self._NEWEST_CAPTURE_DATE]) return orderedSet(filter(None, capture_dates)) + def _parse_fmt(self, fmt, extra_info=None): + format_id = traverse_obj(fmt, ('url', {parse_qs}, 'itag', 0)) + return { + 'format_id': format_id, + **self._FORMATS.get(format_id, {}), + **traverse_obj(fmt, { + 'url': ('url', {lambda x: f'https://web.archive.org/web/2id_/{x}'}), + 'ext': ('ext', {str}), + 'filesize': ('url', {parse_qs}, 'clen', 0, {int_or_none}), + }), + **(extra_info or {}), + } + def _real_extract(self, url): video_id, url_date, url_date_2 = self._match_valid_url(url).group('id', 'date', 'date2') url_date = url_date or url_date_2 - urlh = None - retry_manager = self.RetryManager(fatal=False) - for retry in retry_manager: - try: - urlh = self._request_webpage( - HEADRequest(f'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/{video_id}'), - video_id, note='Fetching archived video file url', expected_status=True) - except ExtractorError as e: - # HTTP Error 404 is expected if the video is not saved. - if isinstance(e.cause, HTTPError) and e.cause.status == 404: - self.raise_no_formats( - 'The requested video is not archived, indexed, or there is an issue with web.archive.org (try again later)', expected=True) - else: - retry.error = e + video_info = self._download_json( + 'https://web.archive.org/__wb/videoinfo', video_id, + query={'vtype': 'youtube', 'vid': video_id}) - if retry_manager.error: - self.raise_no_formats(retry_manager.error, expected=True, video_id=video_id) + if not traverse_obj(video_info, 'formats'): + self.raise_no_formats( + 'The requested video is not archived or indexed', expected=True) capture_dates = self._get_capture_dates(video_id, int_or_none(url_date)) self.write_debug('Captures to try: ' + join_nonempty(*capture_dates, delim=', ')) @@ -968,25 +1120,15 @@ class YoutubeWebArchiveIE(InfoExtractor): info['thumbnails'] = self._extract_thumbnails(video_id) - if urlh: - url = urllib.parse.unquote(urlh.url) - video_file_url_qs = parse_qs(url) - # Attempt to recover any ext & format info from playback url & response headers - fmt = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))} - itag = try_get(video_file_url_qs, lambda x: x['itag'][0]) - if itag and itag in YoutubeIE._formats: - fmt.update(YoutubeIE._formats[itag]) - fmt.update({'format_id': itag}) - else: - mime = try_get(video_file_url_qs, lambda x: x['mime'][0]) - ext = (mimetype2ext(mime) - or urlhandle_detect_ext(urlh) - or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type'))) - fmt.update({'ext': ext}) - info['formats'] = [fmt] - if not info.get('duration'): - info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0])) + formats = [] + if video_info.get('dmux'): + for vf in traverse_obj(video_info, ('formats', 'video', lambda _, v: url_or_none(v['url']))): + formats.append(self._parse_fmt(vf, {'acodec': 'none'})) + for af in traverse_obj(video_info, ('formats', 'audio', lambda _, v: url_or_none(v['url']))): + formats.append(self._parse_fmt(af, {'vcodec': 'none'})) + else: + for fmt in traverse_obj(video_info, ('formats', lambda _, v: url_or_none(v['url']))): + formats.append(self._parse_fmt(fmt)) + info['formats'] = formats - if not info.get('title'): - info['title'] = video_id return info diff --git a/plugins/youtube_download/yt_dlp/extractor/ard.py b/plugins/youtube_download/yt_dlp/extractor/ard.py index 89d3299..5bcf74e 100644 --- a/plugins/youtube_download/yt_dlp/extractor/ard.py +++ b/plugins/youtube_download/yt_dlp/extractor/ard.py @@ -1,4 +1,5 @@ import functools +import json import re from .common import InfoExtractor @@ -15,11 +16,12 @@ from ..utils import ( remove_start, str_or_none, unified_strdate, + update_url, update_url_query, url_or_none, xpath_text, ) -from ..utils.traversal import traverse_obj +from ..utils.traversal import traverse_obj, value class ARDMediathekBaseIE(InfoExtractor): @@ -601,3 +603,163 @@ class ARDMediathekCollectionIE(InfoExtractor): return self.playlist_result( OnDemandPagedList(fetch_page, self._PAGE_SIZE), full_id, display_id=display_id, title=page_data.get('title'), description=page_data.get('synopsis')) + + +class ARDAudiothekBaseIE(InfoExtractor): + def _graphql_query(self, urn, query): + return self._download_json( + 'https://api.ardaudiothek.de/graphql', urn, + data=json.dumps({ + 'query': query, + 'variables': {'id': urn}, + }).encode(), headers={ + 'Content-Type': 'application/json', + })['data'] + + +class ARDAudiothekIE(ARDAudiothekBaseIE): + _VALID_URL = r'https:?//(?:www\.)?ardaudiothek\.de/episode/(?Purn:ard:(?:episode|section|extra):[a-f0-9]{16})' + + _TESTS = [{ + 'url': 'https://www.ardaudiothek.de/episode/urn:ard:episode:eabead1add170e93/', + 'info_dict': { + 'id': 'urn:ard:episode:eabead1add170e93', + 'ext': 'mp3', + 'upload_date': '20240717', + 'duration': 3339, + 'title': 'CAIMAN CLUB (S04E04): Cash Out', + 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:ed64411a07a4b405', + 'description': 'md5:0e5d127a3832ae59e8bab40a91a5dadc', + 'display_id': 'urn:ard:episode:eabead1add170e93', + 'timestamp': 1721181641, + 'series': '1LIVE Caiman Club', + 'channel': 'WDR', + 'episode': 'Episode 4', + 'episode_number': 4, + }, + }, { + 'url': 'https://www.ardaudiothek.de/episode/urn:ard:section:855c7a53dac72e0a/', + 'info_dict': { + 'id': 'urn:ard:section:855c7a53dac72e0a', + 'ext': 'mp4', + 'upload_date': '20241231', + 'duration': 3304, + 'title': 'Illegaler DDR-Detektiv: Doberschütz und die letzte Staatsjagd (1/2) - Wendezeit', + 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:b9b4f1e8b93da4dd', + 'description': 'md5:3552d571e1959754cff66c1da6c0fdae', + 'display_id': 'urn:ard:section:855c7a53dac72e0a', + 'timestamp': 1735629900, + 'series': 'Auf der Spur – Die ARD Ermittlerkrimis', + 'channel': 'ARD', + 'episode': 'Episode 1', + 'episode_number': 1, + }, + }, { + 'url': 'https://www.ardaudiothek.de/episode/urn:ard:extra:d2fe7303d2dcbf5d/', + 'info_dict': { + 'id': 'urn:ard:extra:d2fe7303d2dcbf5d', + 'ext': 'mp3', + 'title': 'Trailer: Fanta Vier Forever, Baby!?!', + 'description': 'md5:b64a586f2e976b8bb5ea0a79dbd8751c', + 'channel': 'SWR', + 'duration': 62, + 'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:48d3c255969be803', + 'series': 'Fanta Vier Forever, Baby!?!', + 'timestamp': 1732108217, + 'upload_date': '20241120', + }, + }] + + _QUERY_ITEM = '''\ + query($id: ID!) { + item(id: $id) { + audioList { + href + distributionType + audioBitrate + audioCodec + } + show { + title + } + image { + url1X1 + } + programSet { + publicationService { + organizationName + } + } + description + title + duration + startDate + episodeNumber + } + }''' + + def _real_extract(self, url): + urn = self._match_id(url) + item = self._graphql_query(urn, self._QUERY_ITEM)['item'] + return { + 'id': urn, + **traverse_obj(item, { + 'formats': ('audioList', lambda _, v: url_or_none(v['href']), { + 'url': 'href', + 'format_id': ('distributionType', {str}), + 'abr': ('audioBitrate', {int_or_none}), + 'acodec': ('audioCodec', {str}), + 'vcodec': {value('none')}, + }), + 'channel': ('programSet', 'publicationService', 'organizationName', {str}), + 'description': ('description', {str}), + 'duration': ('duration', {int_or_none}), + 'series': ('show', 'title', {str}), + 'episode_number': ('episodeNumber', {int_or_none}), + 'thumbnail': ('image', 'url1X1', {url_or_none}, {update_url(query=None)}), + 'timestamp': ('startDate', {parse_iso8601}), + 'title': ('title', {str}), + }), + } + + +class ARDAudiothekPlaylistIE(ARDAudiothekBaseIE): + _VALID_URL = r'https:?//(?:www\.)?ardaudiothek\.de/sendung/(?P[\w-]+)/(?Purn:ard:show:[a-f0-9]{16})' + + _TESTS = [{ + 'url': 'https://www.ardaudiothek.de/sendung/mia-insomnia/urn:ard:show:c405aa26d9a4060a/', + 'info_dict': { + 'display_id': 'mia-insomnia', + 'title': 'Mia Insomnia', + 'id': 'urn:ard:show:c405aa26d9a4060a', + 'description': 'md5:d9ceb7a6b4d26a4db3316573bb564292', + }, + 'playlist_mincount': 37, + }, { + 'url': 'https://www.ardaudiothek.de/sendung/100-berlin/urn:ard:show:4d248e0806ce37bc/', + 'only_matching': True, + }] + + _QUERY_PLAYLIST = ''' + query($id: ID!) { + show(id: $id) { + title + description + items(filter: { isPublished: { equalTo: true } }) { + nodes { + url + } + } + } + }''' + + def _real_extract(self, url): + urn, playlist = self._match_valid_url(url).group('id', 'playlist') + playlist_info = self._graphql_query(urn, self._QUERY_PLAYLIST)['show'] + entries = [] + for url in traverse_obj(playlist_info, ('items', 'nodes', ..., 'url', {url_or_none})): + entries.append(self.url_result(url, ie=ARDAudiothekIE)) + return self.playlist_result(entries, urn, display_id=playlist, **traverse_obj(playlist_info, { + 'title': ('title', {str}), + 'description': ('description', {str}), + })) diff --git a/plugins/youtube_download/yt_dlp/extractor/bandcamp.py b/plugins/youtube_download/yt_dlp/extractor/bandcamp.py index 0a8f88f..510fc5f 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bandcamp.py +++ b/plugins/youtube_download/yt_dlp/extractor/bandcamp.py @@ -5,16 +5,18 @@ import time from .common import InfoExtractor from ..utils import ( - KNOWN_EXTENSIONS, ExtractorError, clean_html, extract_attributes, float_or_none, + format_field, int_or_none, + join_nonempty, parse_filesize, + parse_qs, str_or_none, + strftime_or_none, try_get, - unified_strdate, unified_timestamp, update_url_query, url_or_none, @@ -411,70 +413,67 @@ class BandcampAlbumIE(BandcampIE): # XXX: Do not subclass from concrete IE class BandcampWeeklyIE(BandcampIE): # XXX: Do not subclass from concrete IE IE_NAME = 'Bandcamp:weekly' - _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P\d+)' + _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/radio/?\?(?:[^#]+&)?show=(?P\d+)' _TESTS = [{ - 'url': 'https://bandcamp.com/?show=224', + 'url': 'https://bandcamp.com/radio?show=224', 'md5': '61acc9a002bed93986b91168aa3ab433', 'info_dict': { 'id': '224', 'ext': 'mp3', - 'title': 'BC Weekly April 4th 2017 - Magic Moments', + 'title': 'Bandcamp Weekly, 2017-04-04', 'description': 'md5:5d48150916e8e02d030623a48512c874', - 'duration': 5829.77, - 'release_date': '20170404', + 'thumbnail': 'https://f4.bcbits.com/img/9982549_0.jpg', 'series': 'Bandcamp Weekly', - 'episode': 'Magic Moments', 'episode_id': '224', + 'release_timestamp': 1491264000, + 'release_date': '20170404', + 'duration': 5829.77, }, 'params': { 'format': 'mp3-128', }, }, { - 'url': 'https://bandcamp.com/?blah/blah@&show=228', + 'url': 'https://bandcamp.com/radio/?foo=bar&show=224', 'only_matching': True, }] def _real_extract(self, url): show_id = self._match_id(url) - webpage = self._download_webpage(url, show_id) + audio_data = self._download_json( + 'https://bandcamp.com/api/bcradio_api/1/get_show', + show_id, 'Downloading radio show JSON', + data=json.dumps({'id': show_id}).encode(), + headers={'Content-Type': 'application/json'})['radioShowAudio'] - blob = self._extract_data_attr(webpage, show_id, 'blob') + stream_url = audio_data['streamUrl'] + format_id = traverse_obj(stream_url, ({parse_qs}, 'enc', -1)) + encoding, _, bitrate_str = (format_id or '').partition('-') - show = blob['bcw_data'][show_id] + webpage = self._download_webpage(url, show_id, fatal=False) + metadata = traverse_obj( + self._extract_data_attr(webpage, show_id, 'blob', fatal=False), + ('appData', 'shows', lambda _, v: str(v['showId']) == show_id, any)) or {} - formats = [] - for format_id, format_url in show['audio_stream'].items(): - if not url_or_none(format_url): - continue - for known_ext in KNOWN_EXTENSIONS: - if known_ext in format_id: - ext = known_ext - break - else: - ext = None - formats.append({ - 'format_id': format_id, - 'url': format_url, - 'ext': ext, - 'vcodec': 'none', - }) - - title = show.get('audio_title') or 'Bandcamp Weekly' - subtitle = show.get('subtitle') - if subtitle: - title += f' - {subtitle}' + series_title = audio_data.get('title') or metadata.get('title') + release_timestamp = unified_timestamp(audio_data.get('date')) or unified_timestamp(metadata.get('date')) return { 'id': show_id, - 'title': title, - 'description': show.get('desc') or show.get('short_desc'), - 'duration': float_or_none(show.get('audio_duration')), - 'is_live': False, - 'release_date': unified_strdate(show.get('published_date')), - 'series': 'Bandcamp Weekly', - 'episode': show.get('subtitle'), 'episode_id': show_id, - 'formats': formats, + 'title': join_nonempty(series_title, strftime_or_none(release_timestamp, '%Y-%m-%d'), delim=', '), + 'series': series_title, + 'thumbnail': format_field(metadata, 'imageId', 'https://f4.bcbits.com/img/%s_0.jpg', default=None), + 'description': metadata.get('desc') or metadata.get('short_desc'), + 'duration': float_or_none(audio_data.get('duration')), + 'release_timestamp': release_timestamp, + 'formats': [{ + 'url': stream_url, + 'format_id': format_id, + 'ext': encoding or 'mp3', + 'acodec': encoding or None, + 'vcodec': 'none', + 'abr': int_or_none(bitrate_str), + }], } diff --git a/plugins/youtube_download/yt_dlp/extractor/bigo.py b/plugins/youtube_download/yt_dlp/extractor/bigo.py index b1c230f..3e51173 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bigo.py +++ b/plugins/youtube_download/yt_dlp/extractor/bigo.py @@ -1,5 +1,5 @@ from .common import InfoExtractor -from ..utils import ExtractorError, urlencode_postdata +from ..utils import ExtractorError, UserNotLive, urlencode_postdata class BigoIE(InfoExtractor): @@ -40,7 +40,7 @@ class BigoIE(InfoExtractor): info = info_raw.get('data') or {} if not info.get('alive'): - raise ExtractorError('This user is offline.', expected=True) + raise UserNotLive(video_id=user_id) formats, subs = self._extract_m3u8_formats_and_subtitles( info.get('hls_src'), user_id, 'mp4', 'm3u8') diff --git a/plugins/youtube_download/yt_dlp/extractor/bitmovin.py b/plugins/youtube_download/yt_dlp/extractor/bitmovin.py new file mode 100644 index 0000000..f999393 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/bitmovin.py @@ -0,0 +1,74 @@ +import re + +from .common import InfoExtractor +from ..utils.traversal import traverse_obj + + +class BitmovinIE(InfoExtractor): + _VALID_URL = r'https?://streams\.bitmovin\.com/(?P\w+)' + _EMBED_REGEX = [r']+\bsrc=["\'](?P(?:https?:)?//streams\.bitmovin\.com/(?P\w+)[^"\']+)'] + _TESTS = [{ + 'url': 'https://streams.bitmovin.com/cqkl1t5giv3lrce7pjbg/embed', + 'info_dict': { + 'id': 'cqkl1t5giv3lrce7pjbg', + 'ext': 'mp4', + 'title': 'Developing Osteopathic Residents as Faculty', + 'thumbnail': 'https://streams.bitmovin.com/cqkl1t5giv3lrce7pjbg/poster', + }, + 'params': {'skip_download': 'm3u8'}, + }, { + 'url': 'https://streams.bitmovin.com/cgl9rh94uvs51rqc8jhg/share', + 'info_dict': { + 'id': 'cgl9rh94uvs51rqc8jhg', + 'ext': 'mp4', + 'title': 'Big Buck Bunny (Streams Docs)', + 'thumbnail': 'https://streams.bitmovin.com/cgl9rh94uvs51rqc8jhg/poster', + }, + 'params': {'skip_download': 'm3u8'}, + }] + _WEBPAGE_TESTS = [{ + # bitmovin-stream web component + 'url': 'https://www.institutionalinvestor.com/article/2bsw1in1l9k68mp9kritc/video-war-stories-over-board-games/best-case-i-get-fired-war-stories', + 'info_dict': { + 'id': 'cuiumeil6g115lc4li3g', + 'ext': 'mp4', + 'title': '[media] War Stories over Board Games: ‚ÄúBest Case: I Get Fired‚Äù ', + 'thumbnail': 'https://streams.bitmovin.com/cuiumeil6g115lc4li3g/poster', + }, + 'params': {'skip_download': 'm3u8'}, + }, { + # iframe embed + 'url': 'https://www.clearblueionizer.com/en/pool-ionizers/mineral-pool-vs-saltwater-pool/', + 'info_dict': { + 'id': 'cvpvfsm1pf7itg7cfvtg', + 'ext': 'mp4', + 'title': 'Pool Ionizer vs. Salt Chlorinator', + 'thumbnail': 'https://streams.bitmovin.com/cvpvfsm1pf7itg7cfvtg/poster', + }, + 'params': {'skip_download': 'm3u8'}, + }] + + @classmethod + def _extract_embed_urls(cls, url, webpage): + yield from super()._extract_embed_urls(url, webpage) + for stream_id in re.findall(r']*\bstream-id=["\'](?P\w+)', webpage): + yield f'https://streams.bitmovin.com/{stream_id}' + + def _real_extract(self, url): + video_id = self._match_id(url) + + player_config = self._download_json( + f'https://streams.bitmovin.com/{video_id}/config', video_id)['sources'] + + formats, subtitles = self._extract_m3u8_formats_and_subtitles( + player_config['hls'], video_id, 'mp4') + + return { + 'id': video_id, + 'formats': formats, + 'subtitles': subtitles, + **traverse_obj(player_config, { + 'title': ('title', {str}), + 'thumbnail': ('poster', {str}), + }), + } diff --git a/plugins/youtube_download/yt_dlp/extractor/bunnycdn.py b/plugins/youtube_download/yt_dlp/extractor/bunnycdn.py index d787533..d4a4a23 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bunnycdn.py +++ b/plugins/youtube_download/yt_dlp/extractor/bunnycdn.py @@ -16,7 +16,7 @@ from ..utils.traversal import find_element, traverse_obj class BunnyCdnIE(InfoExtractor): - _VALID_URL = r'https?://(?:iframe\.mediadelivery\.net|video\.bunnycdn\.com)/(?:embed|play)/(?P\d+)/(?P[\da-f-]+)' + _VALID_URL = r'https?://(?:(?:iframe|player)\.mediadelivery\.net|video\.bunnycdn\.com)/(?:embed|play)/(?P\d+)/(?P[\da-f-]+)' _EMBED_REGEX = [rf']+src=[\'"](?P{_VALID_URL}[^\'"]*)[\'"]'] _TESTS = [{ 'url': 'https://iframe.mediadelivery.net/embed/113933/e73edec1-e381-4c8b-ae73-717a140e0924', @@ -39,7 +39,7 @@ class BunnyCdnIE(InfoExtractor): 'timestamp': 1691145748, 'thumbnail': r're:^https?://.*\.b-cdn\.net/32e34c4b-0d72-437c-9abb-05e67657da34/thumbnail_9172dc16\.jpg', 'duration': 106.0, - 'description': 'md5:981a3e899a5c78352b21ed8b2f1efd81', + 'description': 'md5:11452bcb31f379ee3eaf1234d3264e44', 'upload_date': '20230804', 'title': 'Sanela ist Teil der #arbeitsmarktkraft', }, @@ -58,6 +58,23 @@ class BunnyCdnIE(InfoExtractor): 'thumbnail': r're:^https?://.*\.b-cdn\.net/2e8545ec-509d-4571-b855-4cf0235ccd75/thumbnail\.jpg', }, 'params': {'skip_download': True}, + }, { + # Requires any Referer + 'url': 'https://iframe.mediadelivery.net/embed/289162/6372f5a3-68df-4ef7-a115-e1110186c477', + 'info_dict': { + 'id': '6372f5a3-68df-4ef7-a115-e1110186c477', + 'ext': 'mp4', + 'title': '12-Creating Small Asset Blockouts -Timelapse.mp4', + 'description': '', + 'duration': 263.0, + 'timestamp': 1724485440, + 'upload_date': '20240824', + 'thumbnail': r're:^https?://.*\.b-cdn\.net/6372f5a3-68df-4ef7-a115-e1110186c477/thumbnail\.jpg', + }, + 'params': {'skip_download': True}, + }, { + 'url': 'https://player.mediadelivery.net/embed/519128/875880a9-bcc2-4038-9e05-e5024bba9b70', + 'only_matching': True, }] _WEBPAGE_TESTS = [{ # Stream requires Referer @@ -100,7 +117,7 @@ class BunnyCdnIE(InfoExtractor): video_id, library_id = self._match_valid_url(url).group('id', 'library_id') webpage = self._download_webpage( f'https://iframe.mediadelivery.net/embed/{library_id}/{video_id}', video_id, - headers=traverse_obj(smuggled_data, {'Referer': 'Referer'}), + headers={'Referer': smuggled_data.get('Referer') or 'https://iframe.mediadelivery.net/'}, query=traverse_obj(parse_qs(url), {'token': 'token', 'expires': 'expires'})) if html_title := self._html_extract_title(webpage, default=None) == '403': diff --git a/plugins/youtube_download/yt_dlp/extractor/cda.py b/plugins/youtube_download/yt_dlp/extractor/cda.py index 027b37d..f8fb606 100644 --- a/plugins/youtube_download/yt_dlp/extractor/cda.py +++ b/plugins/youtube_download/yt_dlp/extractor/cda.py @@ -27,7 +27,7 @@ from ..utils.traversal import traverse_obj class CDAIE(InfoExtractor): - _VALID_URL = r'https?://(?:(?:www\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P[0-9a-z]+)' + _VALID_URL = r'https?://(?:(?:(?:www|m)\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P[0-9a-z]+)' _NETRC_MACHINE = 'cdapl' _BASE_URL = 'https://www.cda.pl' @@ -110,6 +110,9 @@ class CDAIE(InfoExtractor): }, { 'url': 'http://ebd.cda.pl/0x0/5749950c', 'only_matching': True, + }, { + 'url': 'https://m.cda.pl/video/617297677', + 'only_matching': True, }] def _download_age_confirm_page(self, url, video_id, *args, **kwargs): @@ -367,35 +370,35 @@ class CDAIE(InfoExtractor): class CDAFolderIE(InfoExtractor): _MAX_PAGE_SIZE = 36 - _VALID_URL = r'https?://(?:www\.)?cda\.pl/(?P[\w-]+)/folder/(?P\d+)' - _TESTS = [ - { - 'url': 'https://www.cda.pl/domino264/folder/31188385', - 'info_dict': { - 'id': '31188385', - 'title': 'SERIA DRUGA', - }, - 'playlist_mincount': 13, + _VALID_URL = r'https?://(?:(?:www|m)\.)?cda\.pl/(?P[\w-]+)/folder/(?P\d+)' + _TESTS = [{ + 'url': 'https://www.cda.pl/domino264/folder/31188385', + 'info_dict': { + 'id': '31188385', + 'title': 'SERIA DRUGA', }, - { - 'url': 'https://www.cda.pl/smiechawaTV/folder/2664592/vfilm', - 'info_dict': { - 'id': '2664592', - 'title': 'VideoDowcipy - wszystkie odcinki', - }, - 'playlist_mincount': 71, + 'playlist_mincount': 13, + }, { + 'url': 'https://www.cda.pl/smiechawaTV/folder/2664592/vfilm', + 'info_dict': { + 'id': '2664592', + 'title': 'VideoDowcipy - wszystkie odcinki', }, - { - 'url': 'https://www.cda.pl/DeliciousBeauty/folder/19129979/vfilm', - 'info_dict': { - 'id': '19129979', - 'title': 'TESTY KOSMETYKÓW', - }, - 'playlist_mincount': 139, - }, { - 'url': 'https://www.cda.pl/FILMY-SERIALE-ANIME-KRESKOWKI-BAJKI/folder/18493422', - 'only_matching': True, - }] + 'playlist_mincount': 71, + }, { + 'url': 'https://www.cda.pl/DeliciousBeauty/folder/19129979/vfilm', + 'info_dict': { + 'id': '19129979', + 'title': 'TESTY KOSMETYKÓW', + }, + 'playlist_mincount': 139, + }, { + 'url': 'https://www.cda.pl/FILMY-SERIALE-ANIME-KRESKOWKI-BAJKI/folder/18493422', + 'only_matching': True, + }, { + 'url': 'https://m.cda.pl/smiechawaTV/folder/2664592/vfilm', + 'only_matching': True, + }] def _real_extract(self, url): folder_id, channel = self._match_valid_url(url).group('id', 'channel') diff --git a/plugins/youtube_download/yt_dlp/extractor/common.py b/plugins/youtube_download/yt_dlp/extractor/common.py index d6e2596..70f1433 100644 --- a/plugins/youtube_download/yt_dlp/extractor/common.py +++ b/plugins/youtube_download/yt_dlp/extractor/common.py @@ -348,6 +348,7 @@ class InfoExtractor: duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. concurrent_view_count: How many users are currently watching the video on the platform. + save_count: Number of times the video has been saved or bookmarked like_count: Number of positive ratings of the video dislike_count: Number of negative ratings of the video repost_count: Number of reposts of the video diff --git a/plugins/youtube_download/yt_dlp/extractor/croatianfilm.py b/plugins/youtube_download/yt_dlp/extractor/croatianfilm.py new file mode 100644 index 0000000..de68829 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/croatianfilm.py @@ -0,0 +1,79 @@ +from .common import InfoExtractor +from .vimeo import VimeoIE +from ..utils import ( + ExtractorError, + join_nonempty, +) +from ..utils.traversal import traverse_obj + + +class CroatianFilmIE(InfoExtractor): + IE_NAME = 'croatian.film' + _VALID_URL = r'https://?(?:www\.)?croatian\.film/[a-z]{2}/[^/?#]+/(?P\d+)' + _GEO_COUNTRIES = ['HR'] + + _TESTS = [{ + 'url': 'https://www.croatian.film/hr/films/72472', + 'info_dict': { + 'id': '1078340774', + 'ext': 'mp4', + 'title': '“ŠKAFETIN”, r. Paško Vukasović', + 'uploader': 'croatian.film', + 'uploader_id': 'user94192658', + 'uploader_url': 'https://vimeo.com/user94192658', + 'duration': 1357, + 'thumbnail': 'https://i.vimeocdn.com/video/2008556407-40eb1315ec11be5fcb8dda4d7059675b0881e182b9fc730892e267db72cb57f5-d', + }, + 'params': {'skip_download': 'm3u8'}, + 'expected_warnings': ['Failed to parse XML: not well-formed'], + }, { + # geo-restricted but works with xff + 'url': 'https://www.croatian.film/en/films/77144', + 'info_dict': { + 'id': '1144997795', + 'ext': 'mp4', + 'title': '“ROKO” r. Ivana Marinić Kragić', + 'uploader': 'croatian.film', + 'uploader_id': 'user94192658', + 'uploader_url': 'https://vimeo.com/user94192658', + 'duration': 1023, + 'thumbnail': 'https://i.vimeocdn.com/video/2093793231-11c2928698ff8347489e679b4d563a576e7acd0681ce95b383a9a25f6adb5e8f-d', + }, + 'params': {'skip_download': 'm3u8'}, + 'expected_warnings': ['Failed to parse XML: not well-formed'], + }, { + 'url': 'https://www.croatian.film/en/films/75904/watch', + 'info_dict': { + 'id': '1134883757', + 'ext': 'mp4', + 'title': '"CARPE DIEM" r. Nina Damjanović', + 'uploader': 'croatian.film', + 'uploader_id': 'user94192658', + 'uploader_url': 'https://vimeo.com/user94192658', + 'duration': 1123, + 'thumbnail': 'https://i.vimeocdn.com/video/2080022187-bb691c470c28c4d979258cf235e594bf9a11c14b837a0784326c25c95edd83f9-d', + }, + 'params': {'skip_download': 'm3u8'}, + 'expected_warnings': ['Failed to parse XML: not well-formed'], + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + api_data = self._download_json( + f'https://api.croatian.film/api/videos/{display_id}', + display_id) + + if errors := traverse_obj(api_data, ('errors', lambda _, v: v['code'])): + codes = traverse_obj(errors, (..., 'code', {str})) + if 'INVALID_COUNTRY' in codes: + self.raise_geo_restricted(countries=self._GEO_COUNTRIES) + raise ExtractorError(join_nonempty( + *(traverse_obj(errors, (..., 'details', {str})) or codes), + delim='; ')) + + vimeo_id = self._search_regex( + r'/videos/(\d+)', api_data['video']['vimeoURL'], 'vimeo ID') + + return self.url_result( + VimeoIE._smuggle_referrer(f'https://player.vimeo.com/video/{vimeo_id}', url), + VimeoIE, vimeo_id) diff --git a/plugins/youtube_download/yt_dlp/extractor/digiteka.py b/plugins/youtube_download/yt_dlp/extractor/digiteka.py index e56ec63..1bbec62 100644 --- a/plugins/youtube_download/yt_dlp/extractor/digiteka.py +++ b/plugins/youtube_download/yt_dlp/extractor/digiteka.py @@ -1,5 +1,6 @@ from .common import InfoExtractor -from ..utils import int_or_none +from ..utils import int_or_none, url_or_none +from ..utils.traversal import traverse_obj class DigitekaIE(InfoExtractor): @@ -25,74 +26,56 @@ class DigitekaIE(InfoExtractor): )/(?P[\d+a-z]+)''' _EMBED_REGEX = [r'<(?:iframe|script)[^>]+src=["\'](?P(?:https?:)?//(?:www\.)?ultimedia\.com/deliver/(?:generic|musique)(?:/[^/]+)*/(?:src|article)/[\d+a-z]+)'] _TESTS = [{ - # news - 'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r', - 'md5': '276a0e49de58c7e85d32b057837952a2', + 'url': 'https://www.ultimedia.com/default/index/videogeneric/id/3x5x55k', 'info_dict': { - 'id': 's8uk0r', + 'id': '3x5x55k', 'ext': 'mp4', - 'title': 'Loi sur la fin de vie: le texte prévoit un renforcement des directives anticipées', + 'title': 'Il est passionné de DS', 'thumbnail': r're:^https?://.*\.jpg', - 'duration': 74, - 'upload_date': '20150317', - 'timestamp': 1426604939, - 'uploader_id': '3fszv', + 'duration': 89, + 'upload_date': '20251012', + 'timestamp': 1760285363, + 'uploader_id': '3pz33', }, - }, { - # music - 'url': 'https://www.ultimedia.com/default/index/videomusic/id/xvpfp8', - 'md5': '2ea3513813cf230605c7e2ffe7eca61c', - 'info_dict': { - 'id': 'xvpfp8', - 'ext': 'mp4', - 'title': 'Two - C\'est La Vie (clip)', - 'thumbnail': r're:^https?://.*\.jpg', - 'duration': 233, - 'upload_date': '20150224', - 'timestamp': 1424760500, - 'uploader_id': '3rfzk', - }, - }, { - 'url': 'https://www.digiteka.net/deliver/generic/iframe/mdtk/01637594/src/lqm3kl/zone/1/showtitle/1/autoplay/yes', - 'only_matching': True, + 'params': {'skip_download': True}, }] + _IFRAME_MD_ID = '01836272' # One static ID working for Ultimedia iframes def _real_extract(self, url): - mobj = self._match_valid_url(url) - video_id = mobj.group('id') - video_type = mobj.group('embed_type') or mobj.group('site_type') - if video_type == 'music': - video_type = 'musique' + video_id = self._match_id(url) - deliver_info = self._download_json( - f'http://www.ultimedia.com/deliver/video?video={video_id}&topic={video_type}', - video_id) - - yt_id = deliver_info.get('yt_id') - if yt_id: - return self.url_result(yt_id, 'Youtube') - - jwconf = deliver_info['jwconf'] + video_info = self._download_json( + f'https://www.ultimedia.com/player/getConf/{self._IFRAME_MD_ID}/1/{video_id}', video_id, + note='Downloading player configuration')['video'] formats = [] - for source in jwconf['playlist'][0]['sources']: - formats.append({ - 'url': source['file'], - 'format_id': source.get('label'), - }) + subtitles = {} - title = deliver_info['title'] - thumbnail = jwconf.get('image') - duration = int_or_none(deliver_info.get('duration')) - timestamp = int_or_none(deliver_info.get('release_time')) - uploader_id = deliver_info.get('owner_id') + if hls_url := traverse_obj(video_info, ('media_sources', 'hls', 'hls_auto', {url_or_none})): + fmts, subs = self._extract_m3u8_formats_and_subtitles( + hls_url, video_id, 'mp4', m3u8_id='hls', fatal=False) + formats.extend(fmts) + self._merge_subtitles(subs, target=subtitles) + + for format_id, mp4_url in traverse_obj(video_info, ('media_sources', 'mp4', {dict.items}, ...)): + if not mp4_url: + continue + formats.append({ + 'url': mp4_url, + 'format_id': format_id, + 'height': int_or_none(format_id.partition('_')[2]), + 'ext': 'mp4', + }) return { 'id': video_id, - 'title': title, - 'thumbnail': thumbnail, - 'duration': duration, - 'timestamp': timestamp, - 'uploader_id': uploader_id, 'formats': formats, + 'subtitles': subtitles, + **traverse_obj(video_info, { + 'title': ('title', {str}), + 'thumbnail': ('image', {url_or_none}), + 'duration': ('duration', {int_or_none}), + 'timestamp': ('creationDate', {int_or_none}), + 'uploader_id': ('ownerId', {str}), + }), } diff --git a/plugins/youtube_download/yt_dlp/extractor/dplay.py b/plugins/youtube_download/yt_dlp/extractor/dplay.py index 86950b2..798c3f0 100644 --- a/plugins/youtube_download/yt_dlp/extractor/dplay.py +++ b/plugins/youtube_download/yt_dlp/extractor/dplay.py @@ -13,6 +13,7 @@ from ..utils import ( try_get, unified_timestamp, ) +from ..utils.traversal import traverse_obj class DPlayBaseIE(InfoExtractor): @@ -1053,7 +1054,7 @@ class DiscoveryPlusIndiaIE(DiscoveryPlusBaseIE): class DiscoveryNetworksDeIE(DiscoveryPlusBaseIE): - _VALID_URL = r'https?://(?:www\.)?(?P(?:tlc|dmax)\.de|dplay\.co\.uk)/(?:programme|show|sendungen)/(?P[^/]+)/(?:video/)?(?P[^/]+)' + _VALID_URL = r'https?://(?:www\.)?(?P(?:tlc|dmax)\.de)/(?:programme|show|sendungen)/(?P[^/?#]+)/(?:video/)?(?P[^/?#]+)' _TESTS = [{ 'url': 'https://dmax.de/sendungen/goldrausch-in-australien/german-gold', @@ -1074,6 +1075,7 @@ class DiscoveryNetworksDeIE(DiscoveryPlusBaseIE): 'creators': ['DMAX'], 'thumbnail': 'https://eu1-prod-images.disco-api.com/2023/05/09/f72fb510-7992-3b12-af7f-f16a2c22d1e3.jpeg', 'tags': ['schatzsucher', 'schatz', 'nugget', 'bodenschätze', 'down under', 'australien', 'goldrausch'], + 'categories': ['Gold', 'Schatzsucher'], }, 'params': {'skip_download': 'm3u8'}, }, { @@ -1100,20 +1102,96 @@ class DiscoveryNetworksDeIE(DiscoveryPlusBaseIE): }, { 'url': 'https://www.dmax.de/programme/dmax-highlights/video/tuning-star-sidney-hoffmann-exklusiv-bei-dmax/191023082312316', 'only_matching': True, - }, { - 'url': 'https://www.dplay.co.uk/show/ghost-adventures/video/hotel-leger-103620/EHD_280313B', - 'only_matching': True, }, { 'url': 'https://tlc.de/sendungen/breaking-amish/die-welt-da-drauen/', 'only_matching': True, + }, { + 'url': 'https://dmax.de/sendungen/feuerwache-3-alarm-in-muenchen/24-stunden-auf-der-feuerwache-3', + 'info_dict': { + 'id': '8873549', + 'ext': 'mp4', + 'title': '24 Stunden auf der Feuerwache 3', + 'description': 'md5:f3084ef6170bfb79f9a6e0c030e09330', + 'display_id': 'feuerwache-3-alarm-in-muenchen/24-stunden-auf-der-feuerwache-3', + 'episode': 'Episode 1', + 'episode_number': 1, + 'season': 'Season 1', + 'season_number': 1, + 'series': 'Feuerwache 3 - Alarm in München', + 'duration': 2632.0, + 'upload_date': '20251016', + 'timestamp': 1760645100, + 'creators': ['DMAX'], + 'thumbnail': 'https://eu1-prod-images.disco-api.com/2025/10/14/0bdee68c-a8d8-33d9-9204-16eb61108552.jpeg', + 'tags': [], + 'categories': ['DMAX Originals', 'Jobs', 'Blaulicht'], + }, + 'params': {'skip_download': 'm3u8'}, + }, { + 'url': 'https://tlc.de/sendungen/ghost-adventures/der-poltergeist-im-kostumladen', + 'info_dict': { + 'id': '4550602', + 'ext': 'mp4', + 'title': 'Der Poltergeist im Kostümladen', + 'description': 'md5:20b52b9736a0a3a7873d19a238fad7fc', + 'display_id': 'ghost-adventures/der-poltergeist-im-kostumladen', + 'episode': 'Episode 1', + 'episode_number': 1, + 'season': 'Season 25', + 'season_number': 25, + 'series': 'Ghost Adventures', + 'duration': 2493.0, + 'upload_date': '20241223', + 'timestamp': 1734948900, + 'creators': ['TLC'], + 'thumbnail': 'https://eu1-prod-images.disco-api.com/2023/04/05/59941d26-a81b-365f-829f-69d8cd81fd0f.jpeg', + 'tags': [], + 'categories': ['Paranormal', 'Gruselig!'], + }, + 'params': {'skip_download': 'm3u8'}, + }, { + 'url': 'https://tlc.de/sendungen/evil-gesichter-des-boesen/das-geheimnis-meines-bruders', + 'info_dict': { + 'id': '7792288', + 'ext': 'mp4', + 'title': 'Das Geheimnis meines Bruders', + 'description': 'md5:3167550bb582eb9c92875c86a0a20882', + 'display_id': 'evil-gesichter-des-boesen/das-geheimnis-meines-bruders', + 'episode': 'Episode 1', + 'episode_number': 1, + 'season': 'Season 1', + 'season_number': 1, + 'series': 'Evil - Gesichter des Bösen', + 'duration': 2626.0, + 'upload_date': '20240926', + 'timestamp': 1727388000, + 'creators': ['TLC'], + 'thumbnail': 'https://eu1-prod-images.disco-api.com/2024/11/29/e9f3e3ae-74ec-3631-81b7-fc7bbe844741.jpeg', + 'tags': 'count:13', + 'categories': ['True Crime', 'Mord'], + }, + 'params': {'skip_download': 'm3u8'}, }] def _real_extract(self, url): domain, programme, alternate_id = self._match_valid_url(url).groups() - country = 'GB' if domain == 'dplay.co.uk' else 'DE' - realm = 'questuk' if country == 'GB' else domain.replace('.', '') - return self._get_disco_api_info( - url, f'{programme}/{alternate_id}', 'eu1-prod.disco-api.com', realm, country) + display_id = f'{programme}/{alternate_id}' + meta = self._download_json( + f'https://de-api.loma-cms.com/feloma/videos/{alternate_id}/', + display_id, query={ + 'environment': domain.split('.')[0], + 'v': '2', + 'filter[show.slug]': programme, + }, fatal=False) + video_id = traverse_obj(meta, ('uid', {str}, {lambda s: s[-7:]})) or display_id + + disco_api_info = self._get_disco_api_info( + url, video_id, 'eu1-prod.disco-api.com', domain.replace('.', ''), 'DE') + disco_api_info['display_id'] = display_id + disco_api_info['categories'] = traverse_obj(meta, ( + 'taxonomies', lambda _, v: v['category'] == 'genre', 'title', {str.strip}, filter, all, filter)) + + return disco_api_info def _update_disco_api_headers(self, headers, disco_base, display_id, realm): headers.update({ diff --git a/plugins/youtube_download/yt_dlp/extractor/dropbox.py b/plugins/youtube_download/yt_dlp/extractor/dropbox.py index ce8435c..2c1f1a4 100644 --- a/plugins/youtube_download/yt_dlp/extractor/dropbox.py +++ b/plugins/youtube_download/yt_dlp/extractor/dropbox.py @@ -14,7 +14,7 @@ from ..utils import ( class DropboxIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?dropbox\.com/(?:(?:e/)?scl/fi|sh?)/(?P\w+)' + _VALID_URL = r'https?://(?:www\.)?dropbox\.com/(?:(?:e/)?scl/f[io]|sh?)/(?P\w+)' _TESTS = [ { 'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0', @@ -35,6 +35,9 @@ class DropboxIE(InfoExtractor): }, { 'url': 'https://www.dropbox.com/e/scl/fi/r2kd2skcy5ylbbta5y1pz/DJI_0003.MP4?dl=0&rlkey=wcdgqangn7t3lnmmv6li9mu9h', 'only_matching': True, + }, { + 'url': 'https://www.dropbox.com/scl/fo/zjfqse5txqfd7twa8iewj/AOfZzSYWUSKle2HD7XF7kzQ/A-BEAT%20C.mp4?rlkey=6tg3jkp4tv6a5vt58a6dag0mm&dl=0', + 'only_matching': True, }, ] diff --git a/plugins/youtube_download/yt_dlp/extractor/facebook.py b/plugins/youtube_download/yt_dlp/extractor/facebook.py index 2c35013..99f6427 100644 --- a/plugins/youtube_download/yt_dlp/extractor/facebook.py +++ b/plugins/youtube_download/yt_dlp/extractor/facebook.py @@ -4,8 +4,6 @@ import urllib.parse from .common import InfoExtractor from ..compat import compat_etree_fromstring -from ..networking import Request -from ..networking.exceptions import network_exceptions from ..utils import ( ExtractorError, clean_html, @@ -64,9 +62,6 @@ class FacebookIE(InfoExtractor): class=(?P[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+ data-href=(?P[\'"])(?P(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', ] - _LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1' - _CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1' - _NETRC_MACHINE = 'facebook' IE_NAME = 'facebook' _VIDEO_PAGE_TEMPLATE = 'https://www.facebook.com/video/video.php?v=%s' @@ -469,65 +464,6 @@ class FacebookIE(InfoExtractor): 'graphURI': '/api/graphql/', } - def _perform_login(self, username, password): - login_page_req = Request(self._LOGIN_URL) - self._set_cookie('facebook.com', 'locale', 'en_US') - login_page = self._download_webpage(login_page_req, None, - note='Downloading login page', - errnote='Unable to download login page') - lsd = self._search_regex( - r'', login_results) is not None: - error = self._html_search_regex( - r'(?s)]+class=(["\']).*?login_error_box.*?\1[^>]*>]*>.*?]*>(?P.+?)', - login_results, 'login error', default=None, group='error') - if error: - raise ExtractorError(f'Unable to login: {error}', expected=True) - self.report_warning('unable to log in: bad username/password, or exceeded login rate limit (~3/min). Check credentials or wait.') - return - - fb_dtsg = self._search_regex( - r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg', default=None) - h = self._search_regex( - r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h', default=None) - - if not fb_dtsg or not h: - return - - check_form = { - 'fb_dtsg': fb_dtsg, - 'h': h, - 'name_action_selected': 'dont_save', - } - check_req = Request(self._CHECKPOINT_URL, urlencode_postdata(check_form)) - check_req.headers['Content-Type'] = 'application/x-www-form-urlencoded' - check_response = self._download_webpage(check_req, None, - note='Confirming login') - if re.search(r'id="checkpointSubmitButton"', check_response) is not None: - self.report_warning('Unable to confirm login, you have to login in your browser and authorize the login.') - except network_exceptions as err: - self.report_warning(f'unable to log in: {err}') - return - def _extract_from_url(self, url, video_id): webpage = self._download_webpage( url.replace('://m.facebook.com/', '://www.facebook.com/'), video_id) diff --git a/plugins/youtube_download/yt_dlp/extractor/fc2.py b/plugins/youtube_download/yt_dlp/extractor/fc2.py index d343069..aa6ff63 100644 --- a/plugins/youtube_download/yt_dlp/extractor/fc2.py +++ b/plugins/youtube_download/yt_dlp/extractor/fc2.py @@ -5,6 +5,7 @@ from .common import InfoExtractor from ..networking import Request from ..utils import ( ExtractorError, + UserNotLive, js_to_json, traverse_obj, update_url_query, @@ -205,6 +206,9 @@ class FC2LiveIE(InfoExtractor): 'client_app': 'browser_hls', 'ipv6': '', }), headers={'X-Requested-With': 'XMLHttpRequest'}) + # A non-zero 'status' indicates the stream is not live, so check truthiness + if traverse_obj(control_server, ('status', {int})) and 'control_token' not in control_server: + raise UserNotLive(video_id=video_id) self._set_cookie('live.fc2.com', 'l_ortkn', control_server['orz_raw']) ws_url = update_url_query(control_server['url'], {'control_token': control_server['control_token']}) diff --git a/plugins/youtube_download/yt_dlp/extractor/filmarchiv.py b/plugins/youtube_download/yt_dlp/extractor/filmarchiv.py new file mode 100644 index 0000000..50fde2a --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/filmarchiv.py @@ -0,0 +1,52 @@ +from .common import InfoExtractor +from ..utils import clean_html +from ..utils.traversal import ( + find_element, + find_elements, + traverse_obj, +) + + +class FilmArchivIE(InfoExtractor): + IE_DESC = 'FILMARCHIV ON' + _VALID_URL = r'https?://(?:www\.)?filmarchiv\.at/de/filmarchiv-on/video/(?Pf_[0-9a-zA-Z]{5,})' + _TESTS = [{ + 'url': 'https://www.filmarchiv.at/de/filmarchiv-on/video/f_0305p7xKrXUPBwoNE9x6mh', + 'md5': '54a6596f6a84624531866008a77fa27a', + 'info_dict': { + 'id': 'f_0305p7xKrXUPBwoNE9x6mh', + 'ext': 'mp4', + 'title': 'Der Wurstelprater zur Kaiserzeit', + 'description': 'md5:9843f92df5cc9a4975cee7aabcf6e3b2', + 'thumbnail': r're:https://cdn\.filmarchiv\.at/f_0305/p7xKrXUPBwoNE9x6mh_v1/poster\.jpg', + }, + }, { + 'url': 'https://www.filmarchiv.at/de/filmarchiv-on/video/f_0306vI3wO0tJIsfrqYFQXF', + 'md5': '595385d7f54cb6529140ee8de7d1c3c7', + 'info_dict': { + 'id': 'f_0306vI3wO0tJIsfrqYFQXF', + 'ext': 'mp4', + 'title': 'Vor 70 Jahren: Wettgehen der Briefträger in Wien', + 'description': 'md5:b2a2e4230923cd1969d471c552e62811', + 'thumbnail': r're:https://cdn\.filmarchiv\.at/f_0306/vI3wO0tJIsfrqYFQXF_v1/poster\.jpg', + }, + }] + + def _real_extract(self, url): + media_id = self._match_id(url) + webpage = self._download_webpage(url, media_id) + path = '/'.join((media_id[:6], media_id[6:])) + formats, subtitles = self._extract_m3u8_formats_and_subtitles( + f'https://cdn.filmarchiv.at/{path}_v1_sv1/playlist.m3u8', media_id) + + return { + 'id': media_id, + 'title': traverse_obj(webpage, ({find_element(tag='title-div')}, {clean_html})), + 'description': traverse_obj(webpage, ( + {find_elements(tag='div', attr='class', value=r'.*\bborder-base-content\b', regex=True)}, ..., + {find_elements(tag='div', attr='class', value=r'.*\bprose\b', html=False, regex=True)}, ..., + {clean_html}, any)), + 'thumbnail': f'https://cdn.filmarchiv.at/{path}_v1/poster.jpg', + 'formats': formats, + 'subtitles': subtitles, + } diff --git a/plugins/youtube_download/yt_dlp/extractor/firsttv.py b/plugins/youtube_download/yt_dlp/extractor/firsttv.py index 878732c..86ad7d7 100644 --- a/plugins/youtube_download/yt_dlp/extractor/firsttv.py +++ b/plugins/youtube_download/yt_dlp/extractor/firsttv.py @@ -10,7 +10,7 @@ from ..utils import ( unified_strdate, url_or_none, ) -from ..utils.traversal import traverse_obj +from ..utils.traversal import require, traverse_obj class FirstTVIE(InfoExtractor): @@ -129,3 +129,36 @@ class FirstTVIE(InfoExtractor): return self.playlist_result( self._entries(items), display_id, self._og_search_title(webpage, default=None), thumbnail=self._og_search_thumbnail(webpage, default=None)) + + +class FirstTVLiveIE(InfoExtractor): + IE_NAME = '1tv:live' + IE_DESC = 'Первый канал (прямой эфир)' + _VALID_URL = r'https?://(?:www\.)?1tv\.ru/live' + + _TESTS = [{ + 'url': 'https://www.1tv.ru/live', + 'info_dict': { + 'id': 'live', + 'ext': 'mp4', + 'title': r're:ПЕРВЫЙ КАНАЛ ПРЯМОЙ ЭФИР СМОТРЕТЬ ОНЛАЙН \d{4}-\d{2}-\d{2} \d{2}:\d{2}$', + 'live_status': 'is_live', + }, + 'params': {'skip_download': 'livestream'}, + }] + + def _real_extract(self, url): + display_id = 'live' + webpage = self._download_webpage(url, display_id, fatal=False) + + streams_list = self._download_json('https://stream.1tv.ru/api/playlist/1tvch-v1_as_array.json', display_id) + mpd_url = traverse_obj(streams_list, ('mpd', ..., {url_or_none}, any, {require('mpd url')})) + # FFmpeg needs to be passed -re to not seek past live window. This is handled by core + formats, _ = self._extract_mpd_formats_and_subtitles(mpd_url, display_id, mpd_id='dash') + + return { + 'id': display_id, + 'title': self._html_extract_title(webpage), + 'formats': formats, + 'is_live': True, + } diff --git a/plugins/youtube_download/yt_dlp/extractor/floatplane.py b/plugins/youtube_download/yt_dlp/extractor/floatplane.py index 7dd3b0e..31723c2 100644 --- a/plugins/youtube_download/yt_dlp/extractor/floatplane.py +++ b/plugins/youtube_download/yt_dlp/extractor/floatplane.py @@ -6,15 +6,15 @@ from ..utils import ( OnDemandPagedList, clean_html, determine_ext, + float_or_none, format_field, int_or_none, join_nonempty, - parse_codecs, parse_iso8601, url_or_none, urljoin, ) -from ..utils.traversal import traverse_obj +from ..utils.traversal import require, traverse_obj class FloatplaneBaseIE(InfoExtractor): @@ -50,37 +50,31 @@ class FloatplaneBaseIE(InfoExtractor): media_id = media['id'] media_typ = media.get('type') or 'video' - metadata = self._download_json( - f'{self._BASE_URL}/api/v3/content/{media_typ}', media_id, query={'id': media_id}, - note=f'Downloading {media_typ} metadata', impersonate=self._IMPERSONATE_TARGET) - stream = self._download_json( - f'{self._BASE_URL}/api/v2/cdn/delivery', media_id, query={ - 'type': 'vod' if media_typ == 'video' else 'aod', - 'guid': metadata['guid'], - }, note=f'Downloading {media_typ} stream data', + f'{self._BASE_URL}/api/v3/delivery/info', media_id, + query={'scenario': 'onDemand', 'entityId': media_id}, + note=f'Downloading {media_typ} stream data', impersonate=self._IMPERSONATE_TARGET) - path_template = traverse_obj(stream, ('resource', 'uri', {str})) + metadata = self._download_json( + f'{self._BASE_URL}/api/v3/content/{media_typ}', media_id, + f'Downloading {media_typ} metadata', query={'id': media_id}, + fatal=False, impersonate=self._IMPERSONATE_TARGET) - def format_path(params): - path = path_template - for i, val in (params or {}).items(): - path = path.replace(f'{{qualityLevelParams.{i}}}', val) - return path + cdn_base_url = traverse_obj(stream, ( + 'groups', 0, 'origins', ..., 'url', {url_or_none}, any, {require('cdn base url')})) formats = [] - for quality in traverse_obj(stream, ('resource', 'data', 'qualityLevels', ...)): - url = urljoin(stream['cdn'], format_path(traverse_obj( - stream, ('resource', 'data', 'qualityLevelParams', quality['name'], {dict})))) - format_id = traverse_obj(quality, ('name', {str})) + for variant in traverse_obj(stream, ('groups', 0, 'variants', lambda _, v: v['url'])): + format_url = urljoin(cdn_base_url, variant['url']) + format_id = traverse_obj(variant, ('name', {str})) hls_aes = {} m3u8_data = None # If we need impersonation for the API, then we need it for HLS keys too: extract in advance if self._IMPERSONATE_TARGET is not None: m3u8_data = self._download_webpage( - url, media_id, fatal=False, impersonate=self._IMPERSONATE_TARGET, headers=self._HEADERS, + format_url, media_id, fatal=False, impersonate=self._IMPERSONATE_TARGET, headers=self._HEADERS, note=join_nonempty('Downloading', format_id, 'm3u8 information', delim=' '), errnote=join_nonempty('Failed to download', format_id, 'm3u8 information', delim=' ')) if not m3u8_data: @@ -98,18 +92,34 @@ class FloatplaneBaseIE(InfoExtractor): hls_aes['key'] = urlh.read().hex() formats.append({ - **traverse_obj(quality, { + **traverse_obj(variant, { 'format_note': ('label', {str}), - 'width': ('width', {int}), - 'height': ('height', {int}), + 'width': ('meta', 'video', 'width', {int_or_none}), + 'height': ('meta', 'video', 'height', {int_or_none}), + 'vcodec': ('meta', 'video', 'codec', {str}), + 'acodec': ('meta', 'audio', 'codec', {str}), + 'vbr': ('meta', 'video', 'bitrate', 'average', {int_or_none(scale=1000)}), + 'abr': ('meta', 'audio', 'bitrate', 'average', {int_or_none(scale=1000)}), + 'audio_channels': ('meta', 'audio', 'channelCount', {int_or_none}), + 'fps': ('meta', 'video', 'fps', {float_or_none}), }), - **parse_codecs(quality.get('codecs')), - 'url': url, - 'ext': determine_ext(url.partition('/chunk.m3u8')[0], 'mp4'), + 'url': format_url, + 'ext': determine_ext(format_url.partition('/chunk.m3u8')[0], 'mp4'), 'format_id': format_id, 'hls_media_playlist_data': m3u8_data, 'hls_aes': hls_aes or None, }) + + subtitles = {} + automatic_captions = {} + for sub_data in traverse_obj(metadata, ('textTracks', lambda _, v: url_or_none(v['src']))): + sub_lang = sub_data.get('language') or 'en' + sub_entry = {'url': sub_data['src']} + if sub_data.get('generated'): + automatic_captions.setdefault(sub_lang, []).append(sub_entry) + else: + subtitles.setdefault(sub_lang, []).append(sub_entry) + items.append({ **common_info, 'id': media_id, @@ -119,6 +129,8 @@ class FloatplaneBaseIE(InfoExtractor): 'thumbnail': ('thumbnail', 'path', {url_or_none}), }), 'formats': formats, + 'subtitles': subtitles, + 'automatic_captions': automatic_captions, }) post_info = { diff --git a/plugins/youtube_download/yt_dlp/extractor/frontro.py b/plugins/youtube_download/yt_dlp/extractor/frontro.py new file mode 100644 index 0000000..e86c4af --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/frontro.py @@ -0,0 +1,164 @@ +import json + +from .common import InfoExtractor +from ..utils import int_or_none, parse_iso8601, url_or_none +from ..utils.traversal import traverse_obj + + +class FrontoBaseIE(InfoExtractor): + def _get_auth_headers(self, url): + return traverse_obj(self._get_cookies(url), { + 'authorization': ('frAccessToken', 'value', {lambda token: f'Bearer {token}' if token else None}), + }) + + +class FrontroVideoBaseIE(FrontoBaseIE): + _CHANNEL_ID = None + + def _real_extract(self, url): + video_id = self._match_id(url) + + metadata = self._download_json( + 'https://api.frontrow.cc/query', video_id, data=json.dumps({ + 'operationName': 'Video', + 'variables': {'channelID': self._CHANNEL_ID, 'videoID': video_id}, + 'query': '''query Video($channelID: ID!, $videoID: ID!) { + video(ChannelID: $channelID, VideoID: $videoID) { + ... on Video {title description updatedAt thumbnail createdAt duration likeCount comments views url hasAccess} + } + }''', + }).encode(), headers={ + 'content-type': 'application/json', + **self._get_auth_headers(url), + })['data']['video'] + if not traverse_obj(metadata, 'hasAccess'): + self.raise_login_required() + + formats, subtitles = self._extract_m3u8_formats_and_subtitles(metadata['url'], video_id) + + return { + 'id': video_id, + 'formats': formats, + 'subtitles': subtitles, + **traverse_obj(metadata, { + 'title': ('title', {str}), + 'description': ('description', {str}), + 'thumbnail': ('thumbnail', {url_or_none}), + 'timestamp': ('createdAt', {parse_iso8601}), + 'modified_timestamp': ('updatedAt', {parse_iso8601}), + 'duration': ('duration', {int_or_none}), + 'like_count': ('likeCount', {int_or_none}), + 'comment_count': ('comments', {int_or_none}), + 'view_count': ('views', {int_or_none}), + }), + } + + +class FrontroGroupBaseIE(FrontoBaseIE): + _CHANNEL_ID = None + _VIDEO_EXTRACTOR = None + _VIDEO_URL_TMPL = None + + def _real_extract(self, url): + group_id = self._match_id(url) + + metadata = self._download_json( + 'https://api.frontrow.cc/query', group_id, note='Downloading playlist metadata', + data=json.dumps({ + 'operationName': 'PaginatedStaticPageContainer', + 'variables': {'channelID': self._CHANNEL_ID, 'first': 500, 'pageContainerID': group_id}, + 'query': '''query PaginatedStaticPageContainer($channelID: ID!, $pageContainerID: ID!) { + pageContainer(ChannelID: $channelID, PageContainerID: $pageContainerID) { + ... on StaticPageContainer { id title updatedAt createdAt itemRefs {edges {node { + id contentItem { ... on ItemVideo { videoItem: item { + id + }}} + }}} + } + } + }''', + }).encode(), headers={ + 'content-type': 'application/json', + **self._get_auth_headers(url), + })['data']['pageContainer'] + + entries = [] + for video_id in traverse_obj(metadata, ( + 'itemRefs', 'edges', ..., 'node', 'contentItem', 'videoItem', 'id', {str}), + ): + entries.append(self.url_result( + self._VIDEO_URL_TMPL % video_id, self._VIDEO_EXTRACTOR, video_id)) + + return { + '_type': 'playlist', + 'id': group_id, + 'entries': entries, + **traverse_obj(metadata, { + 'title': ('title', {str}), + 'timestamp': ('createdAt', {parse_iso8601}), + 'modified_timestamp': ('updatedAt', {parse_iso8601}), + }), + } + + +class TheChosenIE(FrontroVideoBaseIE): + _CHANNEL_ID = '12884901895' + + _VALID_URL = r'https?://(?:www\.)?watch\.thechosen\.tv/video/(?P[0-9]+)' + _TESTS = [{ + 'url': 'https://watch.thechosen.tv/video/184683594325', + 'md5': '3f878b689588c71b38ec9943c54ff5b0', + 'info_dict': { + 'id': '184683594325', + 'ext': 'mp4', + 'title': 'Season 3 Episode 2: Two by Two', + 'description': 'md5:174c373756ecc8df46b403f4fcfbaf8c', + 'comment_count': int, + 'view_count': int, + 'like_count': int, + 'duration': 4212, + 'thumbnail': r're:https://fastly\.frontrowcdn\.com/channels/12884901895/VIDEO_THUMBNAIL/184683594325/', + 'timestamp': 1698954546, + 'upload_date': '20231102', + 'modified_timestamp': int, + 'modified_date': str, + }, + }, { + 'url': 'https://watch.thechosen.tv/video/184683596189', + 'md5': 'd581562f9d29ce82f5b7770415334151', + 'info_dict': { + 'id': '184683596189', + 'ext': 'mp4', + 'title': 'Season 4 Episode 8: Humble', + 'description': 'md5:20a57bead43da1cf77cd5b0fe29bbc76', + 'comment_count': int, + 'view_count': int, + 'like_count': int, + 'duration': 5092, + 'thumbnail': r're:https://fastly\.frontrowcdn\.com/channels/12884901895/VIDEO_THUMBNAIL/184683596189/', + 'timestamp': 1715019474, + 'upload_date': '20240506', + 'modified_timestamp': int, + 'modified_date': str, + }, + }] + + +class TheChosenGroupIE(FrontroGroupBaseIE): + _CHANNEL_ID = '12884901895' + _VIDEO_EXTRACTOR = TheChosenIE + _VIDEO_URL_TMPL = 'https://watch.thechosen.tv/video/%s' + + _VALID_URL = r'https?://(?:www\.)?watch\.thechosen\.tv/group/(?P[0-9]+)' + _TESTS = [{ + 'url': 'https://watch.thechosen.tv/group/309237658592', + 'info_dict': { + 'id': '309237658592', + 'title': 'Season 3', + 'timestamp': 1746203969, + 'upload_date': '20250502', + 'modified_timestamp': int, + 'modified_date': str, + }, + 'playlist_count': 8, + }] diff --git a/plugins/youtube_download/yt_dlp/extractor/generic.py b/plugins/youtube_download/yt_dlp/extractor/generic.py index d44e6d3..7c7bb71 100644 --- a/plugins/youtube_download/yt_dlp/extractor/generic.py +++ b/plugins/youtube_download/yt_dlp/extractor/generic.py @@ -821,13 +821,17 @@ class GenericIE(InfoExtractor): 'Referer': smuggled_data.get('referer'), }), impersonate=impersonate) except ExtractorError as e: - if not (isinstance(e.cause, HTTPError) and e.cause.status == 403 - and e.cause.response.get_header('cf-mitigated') == 'challenge' - and e.cause.response.extensions.get('impersonate') is None): + if not isinstance(e.cause, HTTPError) or e.cause.status != 403: + raise + res = e.cause.response + already_impersonating = res.extensions.get('impersonate') is not None + if already_impersonating or ( + res.get_header('cf-mitigated') != 'challenge' + and b'Attention Required! | Cloudflare' not in res.read() + ): raise cf_cookie_domain = traverse_obj( - LenientSimpleCookie(e.cause.response.get_header('set-cookie')), - ('__cf_bm', 'domain')) + LenientSimpleCookie(res.get_header('set-cookie')), ('__cf_bm', 'domain')) if cf_cookie_domain: self.write_debug(f'Clearing __cf_bm cookie for {cf_cookie_domain}') self.cookiejar.clear(domain=cf_cookie_domain, path='/', name='__cf_bm') diff --git a/plugins/youtube_download/yt_dlp/extractor/gofile.py b/plugins/youtube_download/yt_dlp/extractor/gofile.py index a9777a5..e2144d7 100644 --- a/plugins/youtube_download/yt_dlp/extractor/gofile.py +++ b/plugins/youtube_download/yt_dlp/extractor/gofile.py @@ -46,6 +46,7 @@ class GofileIE(InfoExtractor): 'videopassword': 'password', }, }] + _STATIC_TOKEN = '4fd6sg89d7s6' # From https://gofile.io/dist/js/config.js _TOKEN = None def _real_initialize(self): @@ -60,13 +61,16 @@ class GofileIE(InfoExtractor): self._set_cookie('.gofile.io', 'accountToken', self._TOKEN) def _entries(self, file_id): - query_params = {'wt': '4fd6sg89d7s6'} # From https://gofile.io/dist/js/alljs.js - password = self.get_param('videopassword') - if password: + query_params = {} + if password := self.get_param('videopassword'): query_params['password'] = hashlib.sha256(password.encode()).hexdigest() + files = self._download_json( f'https://api.gofile.io/contents/{file_id}', file_id, 'Getting filelist', - query=query_params, headers={'Authorization': f'Bearer {self._TOKEN}'}) + query=query_params, headers={ + 'Authorization': f'Bearer {self._TOKEN}', + 'X-Website-Token': self._STATIC_TOKEN, + }) status = files['status'] if status == 'error-passwordRequired': diff --git a/plugins/youtube_download/yt_dlp/extractor/googledrive.py b/plugins/youtube_download/yt_dlp/extractor/googledrive.py index 0c84f0b..91c9f60 100644 --- a/plugins/youtube_download/yt_dlp/extractor/googledrive.py +++ b/plugins/youtube_download/yt_dlp/extractor/googledrive.py @@ -1,21 +1,20 @@ import re -import urllib.parse from .common import InfoExtractor -from .youtube import YoutubeIE from ..utils import ( - ExtractorError, - bug_reports_message, determine_ext, extract_attributes, + filter_dict, get_element_by_class, get_element_html_by_id, int_or_none, - lowercase_escape, - parse_qs, - try_get, + mimetype2ext, + parse_duration, + str_or_none, update_url_query, + url_or_none, ) +from ..utils.traversal import traverse_obj, value class GoogleDriveIE(InfoExtractor): @@ -38,8 +37,8 @@ class GoogleDriveIE(InfoExtractor): 'id': '0ByeS4oOUV-49Zzh4R1J6R09zazQ', 'ext': 'mp4', 'title': 'Big Buck Bunny.mp4', - 'duration': 45, - 'thumbnail': 'https://drive.google.com/thumbnail?id=0ByeS4oOUV-49Zzh4R1J6R09zazQ', + 'duration': 45.069, + 'thumbnail': r're:https://lh3\.googleusercontent\.com/drive-storage/', }, }, { # has itag 50 which is not in YoutubeIE._formats (royalty Free music from 1922) @@ -49,8 +48,29 @@ class GoogleDriveIE(InfoExtractor): 'id': '1IP0o8dHcQrIHGgVyp0Ofvx2cGfLzyO1x', 'ext': 'mp3', 'title': 'My Buddy - Henry Burr - Gus Kahn - Walter Donaldson.mp3', - 'duration': 184, - 'thumbnail': 'https://drive.google.com/thumbnail?id=1IP0o8dHcQrIHGgVyp0Ofvx2cGfLzyO1x', + 'duration': 184.68, + }, + }, { + # Has subtitle track + 'url': 'https://drive.google.com/file/d/1RAGWRgzn85TXCaCk4gxnwF6TGUaZatzE/view', + 'md5': '05488c528da6ef737ec8c962bfa9724e', + 'info_dict': { + 'id': '1RAGWRgzn85TXCaCk4gxnwF6TGUaZatzE', + 'ext': 'mp4', + 'title': 'test.mp4', + 'duration': 9.999, + 'thumbnail': r're:https://lh3\.googleusercontent\.com/drive-storage/', + }, + }, { + # Has subtitle track with kind 'asr' + 'url': 'https://drive.google.com/file/d/1Prvv9-mtDDfN_gkJgtt1OFvIULK8c3Ev/view', + 'md5': 'ccae12d07f18b5988900b2c8b92801fc', + 'info_dict': { + 'id': '1Prvv9-mtDDfN_gkJgtt1OFvIULK8c3Ev', + 'ext': 'mp4', + 'title': 'LEE NA GYUNG-3410-VOICE_MESSAGE.mp4', + 'duration': 8.766, + 'thumbnail': r're:https://lh3\.googleusercontent\.com/drive-storage/', }, }, { # video can't be watched anonymously due to view count limit reached, @@ -71,17 +91,6 @@ class GoogleDriveIE(InfoExtractor): 'url': 'https://drive.usercontent.google.com/download?id=0ByeS4oOUV-49Zzh4R1J6R09zazQ', 'only_matching': True, }] - _FORMATS_EXT = { - **{k: v['ext'] for k, v in YoutubeIE._formats.items() if v.get('ext')}, - '50': 'm4a', - } - _BASE_URL_CAPTIONS = 'https://drive.google.com/timedtext' - _CAPTIONS_ENTRY_TAG = { - 'subtitles': 'track', - 'automatic_captions': 'target', - } - _caption_formats_ext = [] - _captions_xml = None @classmethod def _extract_embed_urls(cls, url, webpage): @@ -91,129 +100,73 @@ class GoogleDriveIE(InfoExtractor): if mobj: yield 'https://drive.google.com/file/d/{}'.format(mobj.group('id')) - def _download_subtitles_xml(self, video_id, subtitles_id, hl): - if self._captions_xml: - return - self._captions_xml = self._download_xml( - self._BASE_URL_CAPTIONS, video_id, query={ - 'id': video_id, - 'vid': subtitles_id, - 'hl': hl, + @staticmethod + def _construct_subtitle_url(base_url, video_id, language, fmt, kind): + return update_url_query( + base_url, filter_dict({ + 'hl': 'en-US', 'v': video_id, + 'type': 'track', + 'lang': language, + 'fmt': fmt, + 'kind': kind, + })) + + def _get_subtitles(self, video_id, video_info): + subtitles = {} + timed_text_base_url = traverse_obj(video_info, ('timedTextDetails', 'timedTextBaseUrl', {url_or_none})) + if not timed_text_base_url: + return subtitles + subtitle_data = self._download_xml( + timed_text_base_url, video_id, 'Downloading subtitles XML', fatal=False, query={ + 'hl': 'en-US', 'type': 'list', - 'tlangs': '1', - 'fmts': '1', - 'vssids': '1', - }, note='Downloading subtitles XML', - errnote='Unable to download subtitles XML', fatal=False) - if self._captions_xml: - for f in self._captions_xml.findall('format'): - if f.attrib.get('fmt_code') and not f.attrib.get('default'): - self._caption_formats_ext.append(f.attrib['fmt_code']) - - def _get_captions_by_type(self, video_id, subtitles_id, caption_type, - origin_lang_code=None, origin_lang_name=None): - if not subtitles_id or not caption_type: - return - captions = {} - for caption_entry in self._captions_xml.findall( - self._CAPTIONS_ENTRY_TAG[caption_type]): - caption_lang_code = caption_entry.attrib.get('lang_code') - caption_name = caption_entry.attrib.get('name') or origin_lang_name - if not caption_lang_code or not caption_name: - self.report_warning(f'Missing necessary caption metadata. ' - f'Need lang_code and name attributes. ' - f'Found: {caption_entry.attrib}') - continue - caption_format_data = [] - for caption_format in self._caption_formats_ext: - query = { - 'vid': subtitles_id, - 'v': video_id, - 'fmt': caption_format, - 'lang': (caption_lang_code if origin_lang_code is None - else origin_lang_code), - 'type': 'track', - 'name': caption_name, - 'kind': '', - } - if origin_lang_code is not None: - query.update({'tlang': caption_lang_code}) - caption_format_data.append({ - 'url': update_url_query(self._BASE_URL_CAPTIONS, query), - 'ext': caption_format, - }) - captions[caption_lang_code] = caption_format_data - return captions - - def _get_subtitles(self, video_id, subtitles_id, hl): - if not subtitles_id or not hl: - return - self._download_subtitles_xml(video_id, subtitles_id, hl) - if not self._captions_xml: - return - return self._get_captions_by_type(video_id, subtitles_id, 'subtitles') - - def _get_automatic_captions(self, video_id, subtitles_id, hl): - if not subtitles_id or not hl: - return - self._download_subtitles_xml(video_id, subtitles_id, hl) - if not self._captions_xml: - return - track = next((t for t in self._captions_xml.findall('track') if t.attrib.get('cantran') == 'true'), None) - if track is None: - return - origin_lang_code = track.attrib.get('lang_code') - origin_lang_name = track.attrib.get('name') - if not origin_lang_code or not origin_lang_name: - return - return self._get_captions_by_type( - video_id, subtitles_id, 'automatic_captions', origin_lang_code, origin_lang_name) + 'tlangs': 1, + 'v': video_id, + 'vssids': 1, + }) + subtitle_formats = traverse_obj(subtitle_data, (lambda _, v: v.tag == 'format', {lambda x: x.get('fmt_code')}, {str})) + for track in traverse_obj(subtitle_data, (lambda _, v: v.tag == 'track' and v.get('lang_code'))): + language = track.get('lang_code') + subtitles.setdefault(language, []).extend([{ + 'url': self._construct_subtitle_url( + timed_text_base_url, video_id, language, sub_fmt, track.get('kind')), + 'name': track.get('lang_original'), + 'ext': sub_fmt, + } for sub_fmt in subtitle_formats]) + return subtitles def _real_extract(self, url): video_id = self._match_id(url) - video_info = urllib.parse.parse_qs(self._download_webpage( - 'https://drive.google.com/get_video_info', - video_id, 'Downloading video webpage', query={'docid': video_id})) - - def get_value(key): - return try_get(video_info, lambda x: x[key][0]) - - reason = get_value('reason') - title = get_value('title') + video_info = self._download_json( + f'https://content-workspacevideo-pa.googleapis.com/v1/drive/media/{video_id}/playback', + video_id, 'Downloading video webpage', query={'key': 'AIzaSyDVQw45DwoYh632gvsP5vPDqEKvb-Ywnb8'}, + headers={'Referer': 'https://drive.google.com/'}) formats = [] - fmt_stream_map = (get_value('fmt_stream_map') or '').split(',') - fmt_list = (get_value('fmt_list') or '').split(',') - if fmt_stream_map and fmt_list: - resolutions = {} - for fmt in fmt_list: - mobj = re.search( - r'^(?P\d+)/(?P\d+)[xX](?P\d+)', fmt) - if mobj: - resolutions[mobj.group('format_id')] = ( - int(mobj.group('width')), int(mobj.group('height'))) + for fmt in traverse_obj(video_info, ( + 'mediaStreamingData', 'formatStreamingData', ('adaptiveTranscodes', 'progressiveTranscodes'), + lambda _, v: url_or_none(v['url']))): + formats.append({ + **traverse_obj(fmt, { + 'url': 'url', + 'format_id': ('itag', {int}, {str_or_none}), + }), + **traverse_obj(fmt, ('transcodeMetadata', { + 'ext': ('mimeType', {mimetype2ext}), + 'width': ('width', {int_or_none}), + 'height': ('height', {int_or_none}), + 'fps': ('videoFps', {int_or_none}), + 'filesize': ('contentLength', {int_or_none}), + 'vcodec': ((('videoCodecString', {str}), {value('none')}), any), + 'acodec': ((('audioCodecString', {str}), {value('none')}), any), + })), + 'downloader_options': { + 'http_chunk_size': 10 << 20, + }, + }) - for fmt_stream in fmt_stream_map: - fmt_stream_split = fmt_stream.split('|') - if len(fmt_stream_split) < 2: - continue - format_id, format_url = fmt_stream_split[:2] - ext = self._FORMATS_EXT.get(format_id) - if not ext: - self.report_warning(f'Unknown format {format_id}{bug_reports_message()}') - f = { - 'url': lowercase_escape(format_url), - 'format_id': format_id, - 'ext': ext, - } - resolution = resolutions.get(format_id) - if resolution: - f.update({ - 'width': resolution[0], - 'height': resolution[1], - }) - formats.append(f) + title = traverse_obj(video_info, ('mediaMetadata', 'title', {str})) source_url = update_url_query( 'https://drive.usercontent.google.com/download', { @@ -264,30 +217,20 @@ class GoogleDriveIE(InfoExtractor): or get_element_by_class('uc-error-caption', confirmation_webpage) or 'unable to extract confirmation code') - if not formats and reason: - if title: - self.raise_no_formats(reason, expected=True) - else: - raise ExtractorError(reason, expected=True) - - hl = get_value('hl') - subtitles_id = None - ttsurl = get_value('ttsurl') - if ttsurl: - # the subtitles ID is the vid param of the ttsurl query - subtitles_id = parse_qs(ttsurl).get('vid', [None])[-1] - - self.cookiejar.clear(domain='.google.com', path='/', name='NID') - return { 'id': video_id, 'title': title, - 'thumbnail': 'https://drive.google.com/thumbnail?id=' + video_id, - 'duration': int_or_none(get_value('length_seconds')), + **traverse_obj(video_info, { + 'duration': ('mediaMetadata', 'duration', {parse_duration}), + 'thumbnails': ('thumbnails', lambda _, v: url_or_none(v['url']), { + 'url': 'url', + 'ext': ('mimeType', {mimetype2ext}), + 'width': ('width', {int}), + 'height': ('height', {int}), + }), + }), 'formats': formats, - 'subtitles': self.extract_subtitles(video_id, subtitles_id, hl), - 'automatic_captions': self.extract_automatic_captions( - video_id, subtitles_id, hl), + 'subtitles': self.extract_subtitles(video_id, video_info), } diff --git a/plugins/youtube_download/yt_dlp/extractor/goplay.py b/plugins/youtube_download/yt_dlp/extractor/goplay.py index 2e959ce..fb9b046 100644 --- a/plugins/youtube_download/yt_dlp/extractor/goplay.py +++ b/plugins/youtube_download/yt_dlp/extractor/goplay.py @@ -13,12 +13,14 @@ from ..utils.traversal import get_first, traverse_obj class GoPlayIE(InfoExtractor): - _VALID_URL = r'https?://(www\.)?goplay\.be/video/([^/?#]+/[^/?#]+/|)(?P[^/#]+)' + IE_NAME = 'play.tv' + IE_DESC = 'PLAY (formerly goplay.be)' + _VALID_URL = r'https?://(www\.)?play\.tv/video/([^/?#]+/[^/?#]+/|)(?P[^/#]+)' _NETRC_MACHINE = 'goplay' _TESTS = [{ - 'url': 'https://www.goplay.be/video/de-slimste-mens-ter-wereld/de-slimste-mens-ter-wereld-s22/de-slimste-mens-ter-wereld-s22-aflevering-1', + 'url': 'https://www.play.tv/video/de-slimste-mens-ter-wereld/de-slimste-mens-ter-wereld-s22/de-slimste-mens-ter-wereld-s22-aflevering-1', 'info_dict': { 'id': '2baa4560-87a0-421b-bffc-359914e3c387', 'ext': 'mp4', @@ -33,7 +35,7 @@ class GoPlayIE(InfoExtractor): 'params': {'skip_download': True}, 'skip': 'This video is only available for registered users', }, { - 'url': 'https://www.goplay.be/video/1917', + 'url': 'https://www.play.tv/video/1917', 'info_dict': { 'id': '40cac41d-8d29-4ef5-aa11-75047b9f0907', 'ext': 'mp4', @@ -43,7 +45,7 @@ class GoPlayIE(InfoExtractor): 'params': {'skip_download': True}, 'skip': 'This video is only available for registered users', }, { - 'url': 'https://www.goplay.be/video/de-mol/de-mol-s11/de-mol-s11-aflevering-1#autoplay', + 'url': 'https://www.play.tv/video/de-mol/de-mol-s11/de-mol-s11-aflevering-1#autoplay', 'info_dict': { 'id': 'ecb79672-92b9-4cd9-a0d7-e2f0250681ee', 'ext': 'mp4', @@ -101,7 +103,7 @@ class GoPlayIE(InfoExtractor): break api = self._download_json( - f'https://api.goplay.be/web/v1/videos/long-form/{video_id}', + f'https://api.play.tv/web/v1/videos/long-form/{video_id}', video_id, headers={ 'Authorization': f'Bearer {self._id_token}', **self.geo_verification_headers(), diff --git a/plugins/youtube_download/yt_dlp/extractor/hotstar.py b/plugins/youtube_download/yt_dlp/extractor/hotstar.py index 2ae527a..6bbb538 100644 --- a/plugins/youtube_download/yt_dlp/extractor/hotstar.py +++ b/plugins/youtube_download/yt_dlp/extractor/hotstar.py @@ -27,7 +27,7 @@ class HotStarBaseIE(InfoExtractor): _TOKEN_NAME = 'userUP' _BASE_URL = 'https://www.hotstar.com' _API_URL = 'https://api.hotstar.com' - _API_URL_V2 = 'https://apix.hotstar.com/v2' + _API_URL_V2 = 'https://www.hotstar.com/api/internal/bff/v2' _AKAMAI_ENCRYPTION_KEY = b'\x05\xfc\x1a\x01\xca\xc9\x4b\xc4\x12\xfc\x53\x12\x07\x75\xf9\xee' _FREE_HEADERS = { diff --git a/plugins/youtube_download/yt_dlp/extractor/iqiyi.py b/plugins/youtube_download/yt_dlp/extractor/iqiyi.py index 735b446..f8b4afa 100644 --- a/plugins/youtube_download/yt_dlp/extractor/iqiyi.py +++ b/plugins/youtube_download/yt_dlp/extractor/iqiyi.py @@ -9,14 +9,12 @@ from .openload import PhantomJSwrapper from ..utils import ( ExtractorError, clean_html, - decode_packed_codes, float_or_none, format_field, get_element_by_attribute, get_element_by_id, int_or_none, js_to_json, - ohdave_rsa_encrypt, parse_age_limit, parse_duration, parse_iso8601, @@ -33,143 +31,12 @@ def md5_text(text): return hashlib.md5(text.encode()).hexdigest() -class IqiyiSDK: - def __init__(self, target, ip, timestamp): - self.target = target - self.ip = ip - self.timestamp = timestamp - - @staticmethod - def split_sum(data): - return str(sum(int(p, 16) for p in data)) - - @staticmethod - def digit_sum(num): - if isinstance(num, int): - num = str(num) - return str(sum(map(int, num))) - - def even_odd(self): - even = self.digit_sum(str(self.timestamp)[::2]) - odd = self.digit_sum(str(self.timestamp)[1::2]) - return even, odd - - def preprocess(self, chunksize): - self.target = md5_text(self.target) - chunks = [] - for i in range(32 // chunksize): - chunks.append(self.target[chunksize * i:chunksize * (i + 1)]) - if 32 % chunksize: - chunks.append(self.target[32 - 32 % chunksize:]) - return chunks, list(map(int, self.ip.split('.'))) - - def mod(self, modulus): - chunks, ip = self.preprocess(32) - self.target = chunks[0] + ''.join(str(p % modulus) for p in ip) - - def split(self, chunksize): - modulus_map = { - 4: 256, - 5: 10, - 8: 100, - } - - chunks, ip = self.preprocess(chunksize) - ret = '' - for i in range(len(chunks)): - ip_part = str(ip[i] % modulus_map[chunksize]) if i < 4 else '' - if chunksize == 8: - ret += ip_part + chunks[i] - else: - ret += chunks[i] + ip_part - self.target = ret - - def handle_input16(self): - self.target = md5_text(self.target) - self.target = self.split_sum(self.target[:16]) + self.target + self.split_sum(self.target[16:]) - - def handle_input8(self): - self.target = md5_text(self.target) - ret = '' - for i in range(4): - part = self.target[8 * i:8 * (i + 1)] - ret += self.split_sum(part) + part - self.target = ret - - def handleSum(self): - self.target = md5_text(self.target) - self.target = self.split_sum(self.target) + self.target - - def date(self, scheme): - self.target = md5_text(self.target) - d = time.localtime(self.timestamp) - strings = { - 'y': str(d.tm_year), - 'm': '%02d' % d.tm_mon, - 'd': '%02d' % d.tm_mday, - } - self.target += ''.join(strings[c] for c in scheme) - - def split_time_even_odd(self): - even, odd = self.even_odd() - self.target = odd + md5_text(self.target) + even - - def split_time_odd_even(self): - even, odd = self.even_odd() - self.target = even + md5_text(self.target) + odd - - def split_ip_time_sum(self): - chunks, ip = self.preprocess(32) - self.target = str(sum(ip)) + chunks[0] + self.digit_sum(self.timestamp) - - def split_time_ip_sum(self): - chunks, ip = self.preprocess(32) - self.target = self.digit_sum(self.timestamp) + chunks[0] + str(sum(ip)) - - -class IqiyiSDKInterpreter: - def __init__(self, sdk_code): - self.sdk_code = sdk_code - - def run(self, target, ip, timestamp): - self.sdk_code = decode_packed_codes(self.sdk_code) - - functions = re.findall(r'input=([a-zA-Z0-9]+)\(input', self.sdk_code) - - sdk = IqiyiSDK(target, ip, timestamp) - - other_functions = { - 'handleSum': sdk.handleSum, - 'handleInput8': sdk.handle_input8, - 'handleInput16': sdk.handle_input16, - 'splitTimeEvenOdd': sdk.split_time_even_odd, - 'splitTimeOddEven': sdk.split_time_odd_even, - 'splitIpTimeSum': sdk.split_ip_time_sum, - 'splitTimeIpSum': sdk.split_time_ip_sum, - } - for function in functions: - if re.match(r'mod\d+', function): - sdk.mod(int(function[3:])) - elif re.match(r'date[ymd]{3}', function): - sdk.date(function[4:]) - elif re.match(r'split\d+', function): - sdk.split(int(function[5:])) - elif function in other_functions: - other_functions[function]() - else: - raise ExtractorError(f'Unknown function {function}') - - return sdk.target - - class IqiyiIE(InfoExtractor): IE_NAME = 'iqiyi' IE_DESC = '爱奇艺' _VALID_URL = r'https?://(?:(?:[^.]+\.)?iqiyi\.com|www\.pps\.tv)/.+\.html' - _NETRC_MACHINE = 'iqiyi' - _TESTS = [{ 'url': 'http://www.iqiyi.com/v_19rrojlavg.html', # MD5 checksum differs on my machine and Travis CI @@ -234,57 +101,6 @@ class IqiyiIE(InfoExtractor): '18': 7, # 1080p } - @staticmethod - def _rsa_fun(data): - # public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js - N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd - e = 65537 - - return ohdave_rsa_encrypt(data, e, N) - - def _perform_login(self, username, password): - - data = self._download_json( - 'http://kylin.iqiyi.com/get_token', None, - note='Get token for logging', errnote='Unable to get token for logging') - sdk = data['sdk'] - timestamp = int(time.time()) - target = ( - f'/apis/reglogin/login.action?lang=zh_TW&area_code=null&email={username}' - f'&passwd={self._rsa_fun(password.encode())}&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1') - - interp = IqiyiSDKInterpreter(sdk) - sign = interp.run(target, data['ip'], timestamp) - - validation_params = { - 'target': target, - 'server': 'BEA3AA1908656AABCCFF76582C4C6660', - 'token': data['token'], - 'bird_src': 'f8d91d57af224da7893dd397d52d811a', - 'sign': sign, - 'bird_t': timestamp, - } - validation_result = self._download_json( - 'http://kylin.iqiyi.com/validate?' + urllib.parse.urlencode(validation_params), None, - note='Validate credentials', errnote='Unable to validate credentials') - - MSG_MAP = { - 'P00107': 'please login via the web interface and enter the CAPTCHA code', - 'P00117': 'bad username or password', - } - - code = validation_result['code'] - if code != 'A00000': - msg = MSG_MAP.get(code) - if not msg: - msg = f'error {code}' - if validation_result.get('msg'): - msg += ': ' + validation_result['msg'] - self.report_warning('unable to log in: ' + msg) - return False - - return True - def get_raw_data(self, tvid, video_id): tm = int(time.time() * 1000) diff --git a/plugins/youtube_download/yt_dlp/extractor/jtbc.py b/plugins/youtube_download/yt_dlp/extractor/jtbc.py index 573f749..7532983 100644 --- a/plugins/youtube_download/yt_dlp/extractor/jtbc.py +++ b/plugins/youtube_download/yt_dlp/extractor/jtbc.py @@ -98,7 +98,7 @@ class JTBCIE(InfoExtractor): formats = [] for stream_url in traverse_obj(playback_data, ('sources', 'HLS', ..., 'file', {url_or_none})): - stream_url = re.sub(r'/playlist(?:_pd\d+)?\.m3u8', '/index.m3u8', stream_url) + stream_url = re.sub(r'/playlist_pd\d+\.m3u8', '/playlist.m3u8', stream_url) formats.extend(self._extract_m3u8_formats(stream_url, video_id, fatal=False)) metadata = self._download_json( diff --git a/plugins/youtube_download/yt_dlp/extractor/kika.py b/plugins/youtube_download/yt_dlp/extractor/kika.py index e277564..94798b9 100644 --- a/plugins/youtube_download/yt_dlp/extractor/kika.py +++ b/plugins/youtube_download/yt_dlp/extractor/kika.py @@ -17,57 +17,60 @@ class KikaIE(InfoExtractor): _GEO_COUNTRIES = ['DE'] _TESTS = [{ - 'url': 'https://www.kika.de/logo/videos/logo-vom-samstag-einunddreissig-august-zweitausendvierundzwanzig-100', - 'md5': 'fbfc8da483719ef06f396e5e5b938c69', + # Video without season/episode info + 'url': 'https://www.kika.de/logo/videos/logo-vom-dienstag-achtundzwanzig-oktober-zweitausendfuenfundzwanzig-100', + 'md5': '4a9f6e0f9c6bfcc82394c294f186d6db', 'info_dict': { - 'id': 'logo-vom-samstag-einunddreissig-august-zweitausendvierundzwanzig-100', + 'id': 'logo-vom-dienstag-achtundzwanzig-oktober-zweitausendfuenfundzwanzig-100', 'ext': 'mp4', - 'upload_date': '20240831', - 'timestamp': 1725126600, - 'season_number': 2024, - 'modified_date': '20240831', - 'episode': 'Episode 476', - 'episode_number': 476, - 'season': 'Season 2024', - 'duration': 634, - 'title': 'logo! vom Samstag, 31. August 2024', - 'modified_timestamp': 1725129983, + 'title': 'logo! vom Dienstag, 28. Oktober 2025', + 'description': 'md5:4d28b92cef423bec99740ffaa3e7ec04', + 'duration': 651, + 'timestamp': 1761678000, + 'upload_date': '20251028', + 'modified_timestamp': 1761682624, + 'modified_date': '20251028', }, }, { + # Video with season/episode info + # Also: Video with subtitles 'url': 'https://www.kika.de/kaltstart/videos/video92498', - 'md5': '710ece827e5055094afeb474beacb7aa', + 'md5': 'e58073070acb195906c55c4ad31dceb3', 'info_dict': { 'id': 'video92498', 'ext': 'mp4', 'title': '7. Wo ist Leo?', 'description': 'md5:fb48396a5b75068bcac1df74f1524920', 'duration': 436, + 'season': 'Season 1', + 'season_number': 1, + 'episode': 'Episode 7', + 'episode_number': 7, 'timestamp': 1702926876, 'upload_date': '20231218', - 'episode_number': 7, - 'modified_date': '20240319', 'modified_timestamp': 1710880610, - 'episode': 'Episode 7', - 'season_number': 1, - 'season': 'Season 1', + 'modified_date': '20240319', + 'subtitles': 'count:1', }, }, { - 'url': 'https://www.kika.de/bernd-das-brot/astrobrot/videos/video90088', - 'md5': 'ffd1b700d7de0a6616a1d08544c77294', + # Video without subtitles + 'url': 'https://www.kika.de/die-pfefferkoerner/videos/abgezogen-102', + 'md5': '62e97961ce5343c19f0f330a1b6dd736', 'info_dict': { - 'id': 'video90088', + 'id': 'abgezogen-102', 'ext': 'mp4', - 'upload_date': '20221102', - 'timestamp': 1667390580, - 'duration': 197, - 'modified_timestamp': 1711093771, - 'episode_number': 8, - 'title': 'Es ist nicht leicht, ein Astrobrot zu sein', - 'modified_date': '20240322', - 'description': 'md5:d3641deaf1b5515a160788b2be4159a9', - 'season_number': 1, - 'episode': 'Episode 8', + 'title': '1. Abgezogen', + 'description': 'md5:42d87963364391f9f8eba8affcb30bd2', + 'duration': 1574, 'season': 'Season 1', + 'season_number': 1, + 'episode': 'Episode 1', + 'episode_number': 1, + 'timestamp': 1735382700, + 'upload_date': '20241228', + 'modified_timestamp': 1757344051, + 'modified_date': '20250908', + 'subtitles': 'count:0', }, }] @@ -78,16 +81,19 @@ class KikaIE(InfoExtractor): video_assets = self._download_json(doc['assets']['url'], video_id) subtitles = {} - if ttml_resource := url_or_none(video_assets.get('videoSubtitle')): - subtitles['de'] = [{ - 'url': ttml_resource, - 'ext': 'ttml', - }] - if webvtt_resource := url_or_none(video_assets.get('webvttUrl')): - subtitles.setdefault('de', []).append({ - 'url': webvtt_resource, - 'ext': 'vtt', - }) + # Subtitle API endpoints may be present in the JSON even if there are no subtitles. + # They then return HTTP 200 with invalid data. So we must check explicitly. + if doc.get('hasSubtitle'): + if ttml_resource := url_or_none(video_assets.get('videoSubtitle')): + subtitles['de'] = [{ + 'url': ttml_resource, + 'ext': 'ttml', + }] + if webvtt_resource := url_or_none(video_assets.get('webvttUrl')): + subtitles.setdefault('de', []).append({ + 'url': webvtt_resource, + 'ext': 'vtt', + }) return { 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/loom.py b/plugins/youtube_download/yt_dlp/extractor/loom.py index b0878c3..ad989fc 100644 --- a/plugins/youtube_download/yt_dlp/extractor/loom.py +++ b/plugins/youtube_download/yt_dlp/extractor/loom.py @@ -8,12 +8,10 @@ from ..utils import ( ExtractorError, determine_ext, filter_dict, - get_first, int_or_none, parse_iso8601, update_url, url_or_none, - variadic, ) from ..utils.traversal import traverse_obj @@ -51,7 +49,7 @@ class LoomIE(InfoExtractor): }, { # m3u8 raw-url, mp4 transcoded-url, cdn url == raw-url, vtt sub and json subs 'url': 'https://www.loom.com/share/9458bcbf79784162aa62ffb8dd66201b', - 'md5': '51737ec002969dd28344db4d60b9cbbb', + 'md5': '7b6bfdef8181c4ffc376e18919a4dcc2', 'info_dict': { 'id': '9458bcbf79784162aa62ffb8dd66201b', 'ext': 'mp4', @@ -71,12 +69,13 @@ class LoomIE(InfoExtractor): 'ext': 'webm', 'title': 'OMFG clown', 'description': 'md5:285c5ee9d62aa087b7e3271b08796815', - 'uploader': 'MrPumkin B', + 'uploader': 'Brailey Bragg', 'upload_date': '20210924', 'timestamp': 1632519618, 'duration': 210, }, 'params': {'skip_download': 'dash'}, + 'expected_warnings': ['Failed to parse JSON'], # transcoded-url no longer available }, { # password-protected 'url': 'https://www.loom.com/share/50e26e8aeb7940189dff5630f95ce1f4', @@ -91,10 +90,11 @@ class LoomIE(InfoExtractor): 'duration': 35, }, 'params': {'videopassword': 'seniorinfants2'}, + 'expected_warnings': ['Failed to parse JSON'], # transcoded-url no longer available }, { # embed, transcoded-url endpoint sends empty JSON response, split video and audio HLS formats 'url': 'https://www.loom.com/embed/ddcf1c1ad21f451ea7468b1e33917e4e', - 'md5': 'b321d261656848c184a94e3b93eae28d', + 'md5': 'f983a0f02f24331738b2f43aecb05256', 'info_dict': { 'id': 'ddcf1c1ad21f451ea7468b1e33917e4e', 'ext': 'mp4', @@ -119,11 +119,12 @@ class LoomIE(InfoExtractor): 'duration': 247, 'timestamp': 1676274030, }, + 'skip': '404 Not Found', }] _GRAPHQL_VARIABLES = { 'GetVideoSource': { - 'acceptableMimes': ['DASH', 'M3U8', 'MP4'], + 'acceptableMimes': ['DASH', 'M3U8', 'MP4', 'WEBM'], }, } _GRAPHQL_QUERIES = { @@ -192,6 +193,12 @@ class LoomIE(InfoExtractor): id nullableRawCdnUrl(acceptableMimes: $acceptableMimes, password: $password) { url + credentials { + Policy + Signature + KeyPairId + __typename + } __typename } __typename @@ -240,9 +247,9 @@ class LoomIE(InfoExtractor): } }\n'''), } - _APOLLO_GRAPHQL_VERSION = '0a1856c' + _APOLLO_GRAPHQL_VERSION = '45a5bd4' - def _call_graphql_api(self, operations, video_id, note=None, errnote=None): + def _call_graphql_api(self, operation_name, video_id, note=None, errnote=None, fatal=True): password = self.get_param('videopassword') return self._download_json( 'https://www.loom.com/graphql', video_id, note or 'Downloading GraphQL JSON', @@ -252,7 +259,9 @@ class LoomIE(InfoExtractor): 'x-loom-request-source': f'loom_web_{self._APOLLO_GRAPHQL_VERSION}', 'apollographql-client-name': 'web', 'apollographql-client-version': self._APOLLO_GRAPHQL_VERSION, - }, data=json.dumps([{ + 'graphql-operation-name': operation_name, + 'Origin': 'https://www.loom.com', + }, data=json.dumps({ 'operationName': operation_name, 'variables': { 'videoId': video_id, @@ -260,7 +269,7 @@ class LoomIE(InfoExtractor): **self._GRAPHQL_VARIABLES.get(operation_name, {}), }, 'query': self._GRAPHQL_QUERIES[operation_name], - } for operation_name in variadic(operations)], separators=(',', ':')).encode()) + }, separators=(',', ':')).encode(), fatal=fatal) def _call_url_api(self, endpoint, video_id): response = self._download_json( @@ -275,7 +284,7 @@ class LoomIE(InfoExtractor): }, separators=(',', ':')).encode()) return traverse_obj(response, ('url', {url_or_none})) - def _extract_formats(self, video_id, metadata, gql_data): + def _extract_formats(self, video_id, metadata, video_data): formats = [] video_properties = traverse_obj(metadata, ('video_properties', { 'width': ('width', {int_or_none}), @@ -330,7 +339,7 @@ class LoomIE(InfoExtractor): transcoded_url = self._call_url_api('transcoded-url', video_id) formats.extend(get_formats(transcoded_url, 'transcoded', quality=-1)) # transcoded quality - cdn_url = get_first(gql_data, ('data', 'getVideo', 'nullableRawCdnUrl', 'url', {url_or_none})) + cdn_url = traverse_obj(video_data, ('data', 'getVideo', 'nullableRawCdnUrl', 'url', {url_or_none})) # cdn_url is usually a dupe, but the raw-url/transcoded-url endpoints could return errors valid_urls = [update_url(url, query=None) for url in (raw_url, transcoded_url) if url] if cdn_url and update_url(cdn_url, query=None) not in valid_urls: @@ -338,10 +347,21 @@ class LoomIE(InfoExtractor): return formats + def _get_subtitles(self, video_id): + subs_data = self._call_graphql_api( + 'FetchVideoTranscript', video_id, 'Downloading GraphQL subtitles JSON', fatal=False) + return filter_dict({ + 'en': traverse_obj(subs_data, ( + 'data', 'fetchVideoTranscript', + ('source_url', 'captions_source_url'), { + 'url': {url_or_none}, + })) or None, + }) + def _real_extract(self, url): video_id = self._match_id(url) - metadata = get_first( - self._call_graphql_api('GetVideoSSR', video_id, 'Downloading GraphQL metadata JSON'), + metadata = traverse_obj( + self._call_graphql_api('GetVideoSSR', video_id, 'Downloading GraphQL metadata JSON', fatal=False), ('data', 'getVideo', {dict})) or {} if metadata.get('__typename') == 'VideoPasswordMissingOrIncorrect': @@ -350,22 +370,19 @@ class LoomIE(InfoExtractor): 'This video is password-protected, use the --video-password option', expected=True) raise ExtractorError('Invalid video password', expected=True) - gql_data = self._call_graphql_api(['FetchChapters', 'FetchVideoTranscript', 'GetVideoSource'], video_id) + video_data = self._call_graphql_api( + 'GetVideoSource', video_id, 'Downloading GraphQL video JSON') + chapter_data = self._call_graphql_api( + 'FetchChapters', video_id, 'Downloading GraphQL chapters JSON', fatal=False) duration = traverse_obj(metadata, ('video_properties', 'duration', {int_or_none})) return { 'id': video_id, 'duration': duration, 'chapters': self._extract_chapters_from_description( - get_first(gql_data, ('data', 'fetchVideoChapters', 'content', {str})), duration) or None, - 'formats': self._extract_formats(video_id, metadata, gql_data), - 'subtitles': filter_dict({ - 'en': traverse_obj(gql_data, ( - ..., 'data', 'fetchVideoTranscript', - ('source_url', 'captions_source_url'), { - 'url': {url_or_none}, - })) or None, - }), + traverse_obj(chapter_data, ('data', 'fetchVideoChapters', 'content', {str})), duration) or None, + 'formats': self._extract_formats(video_id, metadata, video_data), + 'subtitles': self.extract_subtitles(video_id), **traverse_obj(metadata, { 'title': ('name', {str}), 'description': ('description', {str}), @@ -376,6 +393,7 @@ class LoomIE(InfoExtractor): class LoomFolderIE(InfoExtractor): + _WORKING = False IE_NAME = 'loom:folder' _VALID_URL = r'https?://(?:www\.)?loom\.com/share/folder/(?P[\da-f]{32})' _TESTS = [{ diff --git a/plugins/youtube_download/yt_dlp/extractor/manoto.py b/plugins/youtube_download/yt_dlp/extractor/manoto.py deleted file mode 100644 index 1dd0b15..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/manoto.py +++ /dev/null @@ -1,128 +0,0 @@ -from .common import InfoExtractor -from ..utils import clean_html, int_or_none, traverse_obj - -_API_URL = 'https://dak1vd5vmi7x6.cloudfront.net/api/v1/publicrole/{}/{}?id={}' - - -class ManotoTVIE(InfoExtractor): - IE_DESC = 'Manoto TV (Episode)' - _VALID_URL = r'https?://(?:www\.)?manototv\.com/episode/(?P[0-9]+)' - _TESTS = [{ - 'url': 'https://www.manototv.com/episode/8475', - 'info_dict': { - 'id': '8475', - 'series': 'خانه های رویایی با برادران اسکات', - 'season_number': 7, - 'episode_number': 25, - 'episode_id': 'My Dream Home S7: Carol & John', - 'duration': 3600, - 'categories': ['سرگرمی'], - 'title': 'کارول و جان', - 'description': 'md5:d0fff1f8ba5c6775d312a00165d1a97e', - 'thumbnail': r're:^https?://.*\.(jpeg|png|jpg)$', - 'ext': 'mp4', - }, - 'params': { - 'skip_download': 'm3u8', - }, - }, { - 'url': 'https://www.manototv.com/episode/12576', - 'info_dict': { - 'id': '12576', - 'series': 'فیلم های ایرانی', - 'episode_id': 'Seh Mah Taatili', - 'duration': 5400, - 'view_count': int, - 'categories': ['سرگرمی'], - 'title': 'سه ماه تعطیلی', - 'description': 'سه ماه تعطیلی فیلمی به کارگردانی و نویسندگی شاپور قریب ساختهٔ سال ۱۳۵۶ است.', - 'thumbnail': r're:^https?://.*\.(jpeg|png|jpg)$', - 'ext': 'mp4', - }, - 'params': { - 'skip_download': 'm3u8', - }, - }] - - def _real_extract(self, url): - video_id = self._match_id(url) - episode_json = self._download_json(_API_URL.format('showmodule', 'episodedetails', video_id), video_id) - details = episode_json.get('details', {}) - formats = self._extract_m3u8_formats(details.get('videoM3u8Url'), video_id, 'mp4') - return { - 'id': video_id, - 'series': details.get('showTitle'), - 'season_number': int_or_none(details.get('analyticsSeasonNumber')), - 'episode_number': int_or_none(details.get('episodeNumber')), - 'episode_id': details.get('analyticsEpisodeTitle'), - 'duration': int_or_none(details.get('durationInMinutes'), invscale=60), - 'view_count': details.get('viewCount'), - 'categories': [details.get('videoCategory')], - 'title': details.get('episodeTitle'), - 'description': clean_html(details.get('episodeDescription')), - 'thumbnail': details.get('episodelandscapeImgIxUrl'), - 'formats': formats, - } - - -class ManotoTVShowIE(InfoExtractor): - IE_DESC = 'Manoto TV (Show)' - _VALID_URL = r'https?://(?:www\.)?manototv\.com/show/(?P[0-9]+)' - _TESTS = [{ - 'url': 'https://www.manototv.com/show/2526', - 'playlist_mincount': 68, - 'info_dict': { - 'id': '2526', - 'title': 'فیلم های ایرانی', - 'description': 'مجموعه ای از فیلم های سینمای کلاسیک ایران', - }, - }] - - def _real_extract(self, url): - show_id = self._match_id(url) - show_json = self._download_json(_API_URL.format('showmodule', 'details', show_id), show_id) - show_details = show_json.get('details', {}) - title = show_details.get('showTitle') - description = show_details.get('showSynopsis') - - series_json = self._download_json(_API_URL.format('showmodule', 'serieslist', show_id), show_id) - playlist_id = str(traverse_obj(series_json, ('details', 'list', 0, 'id'))) - - playlist_json = self._download_json(_API_URL.format('showmodule', 'episodelist', playlist_id), playlist_id) - playlist = traverse_obj(playlist_json, ('details', 'list')) or [] - - entries = [ - self.url_result( - 'https://www.manototv.com/episode/{}'.format(item['slideID']), ie=ManotoTVIE.ie_key(), video_id=item['slideID']) - for item in playlist] - return self.playlist_result(entries, show_id, title, description) - - -class ManotoTVLiveIE(InfoExtractor): - IE_DESC = 'Manoto TV (Live)' - _VALID_URL = r'https?://(?:www\.)?manototv\.com/live/' - _TEST = { - 'url': 'https://www.manototv.com/live/', - 'info_dict': { - 'id': 'live', - 'title': 'Manoto TV Live', - 'ext': 'mp4', - 'is_live': True, - }, - 'params': { - 'skip_download': 'm3u8', - }, - } - - def _real_extract(self, url): - video_id = 'live' - json = self._download_json(_API_URL.format('livemodule', 'details', ''), video_id) - details = json.get('details', {}) - video_url = details.get('liveUrl') - formats = self._extract_m3u8_formats(video_url, video_id, 'mp4', live=True) - return { - 'id': video_id, - 'title': 'Manoto TV Live', - 'is_live': True, - 'formats': formats, - } diff --git a/plugins/youtube_download/yt_dlp/extractor/mave.py b/plugins/youtube_download/yt_dlp/extractor/mave.py index 86d8d8b..aa026f8 100644 --- a/plugins/youtube_download/yt_dlp/extractor/mave.py +++ b/plugins/youtube_download/yt_dlp/extractor/mave.py @@ -1,7 +1,9 @@ -import re +import functools +import math from .common import InfoExtractor from ..utils import ( + InAdvancePagedList, clean_html, int_or_none, parse_iso8601, @@ -10,15 +12,64 @@ from ..utils import ( from ..utils.traversal import require, traverse_obj -class MaveIE(InfoExtractor): - _VALID_URL = r'https?://(?P[\w-]+)\.mave\.digital/(?Pep-\d+)' +class MaveBaseIE(InfoExtractor): + _API_BASE_URL = 'https://api.mave.digital/v1/website' + _API_BASE_STORAGE_URL = 'https://store.cloud.mts.ru/mave/' + + def _load_channel_meta(self, channel_id, display_id): + return traverse_obj(self._download_json( + f'{self._API_BASE_URL}/{channel_id}/', display_id, + note='Downloading channel metadata'), 'podcast') + + def _load_episode_meta(self, channel_id, episode_code, display_id): + return self._download_json( + f'{self._API_BASE_URL}/{channel_id}/episodes/{episode_code}', + display_id, note='Downloading episode metadata') + + def _create_entry(self, channel_id, channel_meta, episode_meta): + episode_code = traverse_obj(episode_meta, ('code', {int}, {require('episode code')})) + return { + 'display_id': f'{channel_id}-{episode_code}', + 'extractor_key': MaveIE.ie_key(), + 'extractor': MaveIE.IE_NAME, + 'webpage_url': f'https://{channel_id}.mave.digital/ep-{episode_code}', + 'channel_id': channel_id, + 'channel_url': f'https://{channel_id}.mave.digital/', + 'vcodec': 'none', + **traverse_obj(episode_meta, { + 'id': ('id', {str}), + 'url': ('audio', {urljoin(self._API_BASE_STORAGE_URL)}), + 'title': ('title', {str}), + 'description': ('description', {clean_html}), + 'thumbnail': ('image', {urljoin(self._API_BASE_STORAGE_URL)}), + 'duration': ('duration', {int_or_none}), + 'season_number': ('season', {int_or_none}), + 'episode_number': ('number', {int_or_none}), + 'view_count': ('listenings', {int_or_none}), + 'like_count': ('reactions', lambda _, v: v['type'] == 'like', 'count', {int_or_none}, any), + 'dislike_count': ('reactions', lambda _, v: v['type'] == 'dislike', 'count', {int_or_none}, any), + 'age_limit': ('is_explicit', {bool}, {lambda x: 18 if x else None}), + 'timestamp': ('publish_date', {parse_iso8601}), + }), + **traverse_obj(channel_meta, { + 'series_id': ('id', {str}), + 'series': ('title', {str}), + 'channel': ('title', {str}), + 'uploader': ('author', {str}), + }), + } + + +class MaveIE(MaveBaseIE): + IE_NAME = 'mave' + _VALID_URL = r'https?://(?P[\w-]+)\.mave\.digital/ep-(?P\d+)' _TESTS = [{ 'url': 'https://ochenlichnoe.mave.digital/ep-25', 'md5': 'aa3e513ef588b4366df1520657cbc10c', 'info_dict': { 'id': '4035f587-914b-44b6-aa5a-d76685ad9bc2', 'ext': 'mp3', - 'display_id': 'ochenlichnoe-ep-25', + 'display_id': 'ochenlichnoe-25', 'title': 'Между мной и миром: психология самооценки', 'description': 'md5:4b7463baaccb6982f326bce5c700382a', 'uploader': 'Самарский университет', @@ -45,7 +96,7 @@ class MaveIE(InfoExtractor): 'info_dict': { 'id': '41898bb5-ff57-4797-9236-37a8e537aa21', 'ext': 'mp3', - 'display_id': 'budem-ep-12', + 'display_id': 'budem-12', 'title': 'Екатерина Михайлова: "Горе от ума" не про женщин написана', 'description': 'md5:fa3bdd59ee829dfaf16e3efcb13f1d19', 'uploader': 'Полина Цветкова+Евгения Акопова', @@ -68,40 +119,72 @@ class MaveIE(InfoExtractor): 'upload_date': '20241230', }, }] - _API_BASE_URL = 'https://api.mave.digital/' def _real_extract(self, url): - channel_id, slug = self._match_valid_url(url).group('channel', 'id') - display_id = f'{channel_id}-{slug}' - webpage = self._download_webpage(url, display_id) - data = traverse_obj( - self._search_nuxt_json(webpage, display_id), - ('data', lambda _, v: v['activeEpisodeData'], any, {require('podcast data')})) + channel_id, episode_code = self._match_valid_url(url).group( + 'channel_id', 'episode_code') + display_id = f'{channel_id}-{episode_code}' + + channel_meta = self._load_channel_meta(channel_id, display_id) + episode_meta = self._load_episode_meta(channel_id, episode_code, display_id) + + return self._create_entry(channel_id, channel_meta, episode_meta) + + +class MaveChannelIE(MaveBaseIE): + IE_NAME = 'mave:channel' + _VALID_URL = r'https?://(?P[\w-]+)\.mave\.digital/?(?:$|[?#])' + _TESTS = [{ + 'url': 'https://budem.mave.digital/', + 'info_dict': { + 'id': 'budem', + 'title': 'Все там будем', + 'description': 'md5:f04ae12a42be0f1d765c5e326b41987a', + }, + 'playlist_mincount': 15, + }, { + 'url': 'https://ochenlichnoe.mave.digital/', + 'info_dict': { + 'id': 'ochenlichnoe', + 'title': 'Очень личное', + 'description': 'md5:ee36a6a52546b91b487fe08c552fdbb2', + }, + 'playlist_mincount': 20, + }, { + 'url': 'https://geekcity.mave.digital/', + 'info_dict': { + 'id': 'geekcity', + 'title': 'Мужчины в трико', + 'description': 'md5:4164d425d60a0d97abdce9d1f6f8e049', + }, + 'playlist_mincount': 80, + }] + _PAGE_SIZE = 50 + + def _entries(self, channel_id, channel_meta, page_num): + page_data = self._download_json( + f'{self._API_BASE_URL}/{channel_id}/episodes', channel_id, query={ + 'view': 'all', + 'page': page_num + 1, + 'sort': 'newest', + 'format': 'all', + }, note=f'Downloading page {page_num + 1}') + for ep in traverse_obj(page_data, ('episodes', lambda _, v: v['audio'] and v['id'])): + yield self._create_entry(channel_id, channel_meta, ep) + + def _real_extract(self, url): + channel_id = self._match_id(url) + + channel_meta = self._load_channel_meta(channel_id, channel_id) return { - 'display_id': display_id, - 'channel_id': channel_id, - 'channel_url': f'https://{channel_id}.mave.digital/', - 'vcodec': 'none', - 'thumbnail': re.sub(r'_\d+(?=\.(?:jpg|png))', '', self._og_search_thumbnail(webpage, default='')) or None, - **traverse_obj(data, ('activeEpisodeData', { - 'url': ('audio', {urljoin(self._API_BASE_URL)}), - 'id': ('id', {str}), + '_type': 'playlist', + 'id': channel_id, + **traverse_obj(channel_meta, { 'title': ('title', {str}), - 'description': ('description', {clean_html}), - 'duration': ('duration', {int_or_none}), - 'season_number': ('season', {int_or_none}), - 'episode_number': ('number', {int_or_none}), - 'view_count': ('listenings', {int_or_none}), - 'like_count': ('reactions', lambda _, v: v['type'] == 'like', 'count', {int_or_none}, any), - 'dislike_count': ('reactions', lambda _, v: v['type'] == 'dislike', 'count', {int_or_none}, any), - 'age_limit': ('is_explicit', {bool}, {lambda x: 18 if x else None}), - 'timestamp': ('publish_date', {parse_iso8601}), - })), - **traverse_obj(data, ('podcast', 'podcast', { - 'series_id': ('id', {str}), - 'series': ('title', {str}), - 'channel': ('title', {str}), - 'uploader': ('author', {str}), - })), + 'description': ('description', {str}), + }), + 'entries': InAdvancePagedList( + functools.partial(self._entries, channel_id, channel_meta), + math.ceil(channel_meta['episodes_count'] / self._PAGE_SIZE), self._PAGE_SIZE), } diff --git a/plugins/youtube_download/yt_dlp/extractor/medaltv.py b/plugins/youtube_download/yt_dlp/extractor/medaltv.py index 94c51ed..d294d8d 100644 --- a/plugins/youtube_download/yt_dlp/extractor/medaltv.py +++ b/plugins/youtube_download/yt_dlp/extractor/medaltv.py @@ -1,14 +1,9 @@ -import re - from .common import InfoExtractor from ..utils import ( - ExtractorError, - float_or_none, - format_field, int_or_none, - str_or_none, - traverse_obj, + url_or_none, ) +from ..utils.traversal import traverse_obj class MedalTVIE(InfoExtractor): @@ -30,25 +25,8 @@ class MedalTVIE(InfoExtractor): 'view_count': int, 'like_count': int, 'duration': 13, - }, - }, { - 'url': 'https://medal.tv/games/cod-cold-war/clips/2mA60jWAGQCBH', - 'md5': 'fc7a3e4552ae8993c1c4006db46be447', - 'info_dict': { - 'id': '2mA60jWAGQCBH', - 'ext': 'mp4', - 'title': 'Quad Cold', - 'description': 'Medal,https://medal.tv/desktop/', - 'uploader': 'MowgliSB', - 'timestamp': 1603165266, - 'upload_date': '20201020', - 'uploader_id': '10619174', - 'thumbnail': 'https://cdn.medal.tv/10619174/thumbnail-34934644-720p.jpg?t=1080p&c=202042&missing', - 'uploader_url': 'https://medal.tv/users/10619174', - 'comment_count': int, - 'view_count': int, - 'like_count': int, - 'duration': 23, + 'thumbnail': r're:https://cdn\.medal\.tv/ugcp/content-thumbnail/.*\.jpg', + 'tags': ['headshot', 'valorant', '4k', 'clutch', 'mornu'], }, }, { 'url': 'https://medal.tv/games/cod-cold-war/clips/2um24TWdty0NA', @@ -57,12 +35,12 @@ class MedalTVIE(InfoExtractor): 'id': '2um24TWdty0NA', 'ext': 'mp4', 'title': 'u tk me i tk u bigger', - 'description': 'Medal,https://medal.tv/desktop/', - 'uploader': 'Mimicc', + 'description': '', + 'uploader': 'zahl', 'timestamp': 1605580939, 'upload_date': '20201117', 'uploader_id': '5156321', - 'thumbnail': 'https://cdn.medal.tv/5156321/thumbnail-36787208-360p.jpg?t=1080p&c=202046&missing', + 'thumbnail': r're:https://cdn\.medal\.tv/source/.*\.png', 'uploader_url': 'https://medal.tv/users/5156321', 'comment_count': int, 'view_count': int, @@ -70,91 +48,77 @@ class MedalTVIE(InfoExtractor): 'duration': 9, }, }, { - 'url': 'https://medal.tv/games/valorant/clips/37rMeFpryCC-9', - 'only_matching': True, - }, { + # API requires auth 'url': 'https://medal.tv/games/valorant/clips/2WRj40tpY_EU9', + 'md5': '6c6bb6569777fd8b4ef7b33c09de8dcf', + 'info_dict': { + 'id': '2WRj40tpY_EU9', + 'ext': 'mp4', + 'title': '1v5 clutch', + 'description': '', + 'uploader': 'adny', + 'uploader_id': '6256941', + 'uploader_url': 'https://medal.tv/users/6256941', + 'comment_count': int, + 'view_count': int, + 'like_count': int, + 'duration': 25, + 'thumbnail': r're:https://cdn\.medal\.tv/source/.*\.jpg', + 'timestamp': 1612896680, + 'upload_date': '20210209', + }, + 'expected_warnings': ['Video formats are not available through API'], + }, { + 'url': 'https://medal.tv/games/valorant/clips/37rMeFpryCC-9', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id, query={'mobilebypass': 'true'}) - - hydration_data = self._search_json( - r']*>[^<]*\bhydrationData\s*=', webpage, - 'next data', video_id, end_pattern='', fatal=False) - - clip = traverse_obj(hydration_data, ('clips', ...), get_all=False) - if not clip: - raise ExtractorError( - 'Could not find video information.', video_id=video_id) - - title = clip['contentTitle'] - - source_width = int_or_none(clip.get('sourceWidth')) - source_height = int_or_none(clip.get('sourceHeight')) - - aspect_ratio = source_width / source_height if source_width and source_height else 16 / 9 - - def add_item(container, item_url, height, id_key='format_id', item_id=None): - item_id = item_id or '%dp' % height - if item_id not in item_url: - return - container.append({ - 'url': item_url, - id_key: item_id, - 'width': round(aspect_ratio * height), - 'height': height, - }) + content_data = self._download_json( + f'https://medal.tv/api/content/{video_id}', video_id, + headers={'Accept': 'application/json'}) formats = [] - thumbnails = [] - for k, v in clip.items(): - if not (v and isinstance(v, str)): - continue - mobj = re.match(r'(contentUrl|thumbnail)(?:(\d+)p)?$', k) - if not mobj: - continue - prefix = mobj.group(1) - height = int_or_none(mobj.group(2)) - if prefix == 'contentUrl': - add_item( - formats, v, height or source_height, - item_id=None if height else 'source') - elif prefix == 'thumbnail': - add_item(thumbnails, v, height, 'id') - - error = clip.get('error') - if not formats and error: - if error == 404: - self.raise_no_formats( - 'That clip does not exist.', - expected=True, video_id=video_id) - else: - self.raise_no_formats( - f'An unknown error occurred ({error}).', - video_id=video_id) - - # Necessary because the id of the author is not known in advance. - # Won't raise an issue if no profile can be found as this is optional. - author = traverse_obj(hydration_data, ('profiles', ...), get_all=False) or {} - author_id = str_or_none(author.get('userId')) - author_url = format_field(author_id, None, 'https://medal.tv/users/%s') + if m3u8_url := url_or_none(content_data.get('contentUrlHls')): + formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', m3u8_id='hls')) + if http_url := url_or_none(content_data.get('contentUrl')): + formats.append({ + 'url': http_url, + 'format_id': 'http-source', + 'ext': 'mp4', + 'quality': 1, + }) + formats = [fmt for fmt in formats if 'video/privacy-protected-guest' not in fmt['url']] + if not formats: + # Fallback, does not require auth + self.report_warning('Video formats are not available through API, falling back to social video URL') + urlh = self._request_webpage( + f'https://medal.tv/api/content/{video_id}/socialVideoUrl', video_id, + note='Checking social video URL') + formats.append({ + 'url': urlh.url, + 'format_id': 'social-video', + 'ext': 'mp4', + 'quality': -1, + }) return { 'id': video_id, - 'title': title, 'formats': formats, - 'thumbnails': thumbnails, - 'description': clip.get('contentDescription'), - 'uploader': author.get('displayName'), - 'timestamp': float_or_none(clip.get('created'), 1000), - 'uploader_id': author_id, - 'uploader_url': author_url, - 'duration': int_or_none(clip.get('videoLengthSeconds')), - 'view_count': int_or_none(clip.get('views')), - 'like_count': int_or_none(clip.get('likes')), - 'comment_count': int_or_none(clip.get('comments')), + **traverse_obj(content_data, { + 'title': ('contentTitle', {str}), + 'description': ('contentDescription', {str}), + 'timestamp': ('created', {int_or_none(scale=1000)}), + 'duration': ('videoLengthSeconds', {int_or_none}), + 'view_count': ('views', {int_or_none}), + 'like_count': ('likes', {int_or_none}), + 'comment_count': ('comments', {int_or_none}), + 'uploader': ('poster', 'displayName', {str}), + 'uploader_id': ('poster', 'userId', {str}), + 'uploader_url': ('poster', 'userId', {str}, filter, {lambda x: x and f'https://medal.tv/users/{x}'}), + 'tags': ('tags', ..., {str}), + 'thumbnail': ('thumbnailUrl', {url_or_none}), + }), } diff --git a/plugins/youtube_download/yt_dlp/extractor/mux.py b/plugins/youtube_download/yt_dlp/extractor/mux.py new file mode 100644 index 0000000..34a56f4 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/mux.py @@ -0,0 +1,92 @@ +import re + +from .common import InfoExtractor +from ..utils import ( + extract_attributes, + filter_dict, + parse_qs, + smuggle_url, + unsmuggle_url, + update_url_query, +) +from ..utils.traversal import traverse_obj + + +class MuxIE(InfoExtractor): + _VALID_URL = r'https?://(?:stream\.new/v|player\.mux\.com)/(?P[A-Za-z0-9-]+)' + _EMBED_REGEX = [r']+\bsrc=["\'](?P(?:https?:)?//(?:stream\.new/v|player\.mux\.com)/(?P[A-Za-z0-9-]+)[^"\']+)'] + _TESTS = [{ + 'url': 'https://stream.new/v/OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j/embed', + 'info_dict': { + 'ext': 'mp4', + 'id': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', + 'title': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', + }, + }, { + 'url': 'https://player.mux.com/OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', + 'info_dict': { + 'ext': 'mp4', + 'id': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', + 'title': 'OCtRWZiZqKvLbnZ32WSEYiGNvHdAmB01j', + }, + }] + _WEBPAGE_TESTS = [{ + # iframe embed + 'url': 'https://www.redbrickai.com/blog/2025-07-14-FAST-brush', + 'info_dict': { + 'ext': 'mp4', + 'id': 'cXhzAiW1AmsHY01eRbEYFcTEAn0102aGN8sbt8JprP6Dfw', + 'title': 'cXhzAiW1AmsHY01eRbEYFcTEAn0102aGN8sbt8JprP6Dfw', + }, + }, { + # mux-player embed + 'url': 'https://muxvideo.2coders.com/download/', + 'info_dict': { + 'ext': 'mp4', + 'id': 'JBuasdg35Hw7tYmTe9k68QLPQKixL300YsWHDz5Flit8', + 'title': 'JBuasdg35Hw7tYmTe9k68QLPQKixL300YsWHDz5Flit8', + }, + }, { + # mux-player with title metadata + 'url': 'https://datastar-todomvc.cross.stream/', + 'info_dict': { + 'ext': 'mp4', + 'id': 'KX01ZSZ8CXv5SVfVwMZKJTcuBcUQmo1ReS9U5JjoHm4k', + 'title': 'TodoMVC with Datastar Tutorial', + }, + }] + + @classmethod + def _extract_embed_urls(cls, url, webpage): + yield from super()._extract_embed_urls(url, webpage) + for mux_player in re.findall(r']*\bplayback-id=[^>]+>', webpage): + attrs = extract_attributes(mux_player) + playback_id = attrs.get('playback-id') + if not playback_id: + continue + token = attrs.get('playback-token') or traverse_obj(playback_id, ({parse_qs}, 'token', -1)) + playback_id = playback_id.partition('?')[0] + + embed_url = update_url_query( + f'https://player.mux.com/{playback_id}', + filter_dict({'playback-token': token})) + if title := attrs.get('metadata-video-title'): + embed_url = smuggle_url(embed_url, {'title': title}) + yield embed_url + + def _real_extract(self, url): + url, smuggled_data = unsmuggle_url(url, {}) + video_id = self._match_id(url) + + token = traverse_obj(parse_qs(url), ('playback-token', -1)) + + formats, subtitles = self._extract_m3u8_formats_and_subtitles( + f'https://stream.mux.com/{video_id}.m3u8', video_id, 'mp4', + query=filter_dict({'token': token})) + + return { + 'id': video_id, + 'title': smuggled_data.get('title') or video_id, + 'formats': formats, + 'subtitles': subtitles, + } diff --git a/plugins/youtube_download/yt_dlp/extractor/nascar.py b/plugins/youtube_download/yt_dlp/extractor/nascar.py new file mode 100644 index 0000000..b14a3b0 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/nascar.py @@ -0,0 +1,60 @@ +from .common import InfoExtractor +from ..utils import ( + float_or_none, + parse_iso8601, + url_or_none, +) +from ..utils.traversal import traverse_obj + + +class NascarClassicsIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?classics\.nascar\.com/video/(?P[\w~-]+)' + _TESTS = [{ + 'url': 'https://classics.nascar.com/video/Ka5qGuxzZ~SIvJii7uAC~wszPshklHN', + 'md5': '81d712eccffa7169c328281b8cc28f77', + 'info_dict': { + 'id': 'Ka5qGuxzZ~SIvJii7uAC~wszPshklHN', + 'ext': 'mp4', + 'title': 'Cook Out 400 2023', + 'thumbnail': 'https://va.aws.nascar.com/IMAGES/CUP_2023_22_RICHMOND_THUMB_NCD.jpg', + 'timestamp': 1690732800, + 'upload_date': '20230730', + 'tags': ['2023', 'race #22', 'richmond', 'chris buescher', 'cup'], + 'chapters': 'count:18', + }, + }, { + 'url': 'https://classics.nascar.com/video/UASvPDOwEha~SIvJii7uAC~wszPshklHN', + 'md5': 'a5e8d6ec6005da3857d25ba2df5e7133', + 'info_dict': { + 'id': 'UASvPDOwEha~SIvJii7uAC~wszPshklHN', + 'ext': 'mp4', + 'title': 'I Love New York 355 at the Glen 2017', + 'thumbnail': 'https://va.aws.nascar.com/IMAGES/CUP_2017_22_WATKINSGLEN_THUMB_NCD.jpg', + 'timestamp': 1501995600, + 'upload_date': '20170806', + 'tags': ['watkins glen', 'race #22', '2017', 'martin truex jr.', 'cup'], + 'chapters': 'count:13', + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + content_data = self._search_nextjs_data( + webpage, video_id)['props']['pageProps']['contentData'] + + return { + 'id': video_id, + 'formats': self._extract_m3u8_formats(content_data['input']['src'], video_id, 'mp4'), + **traverse_obj(content_data, { + 'title': ('input', 'name', {str}), + 'description': ('input', 'description', {str}, filter), + 'thumbnail': ('input', 'thumbnail', {url_or_none}), + 'tags': ('input', 'settings', 'tags', ..., {str}), + 'timestamp': ('input', 'start_time', {parse_iso8601}), + 'chapters': ('overlay', 'data', 'timelines', 0, 'events', lambda _, v: float(v['timestamp']) is not None, { + 'start_time': ('timestamp', {float_or_none}), + 'title': ('name', {str}), + }), + }), + } diff --git a/plugins/youtube_download/yt_dlp/extractor/nbc.py b/plugins/youtube_download/yt_dlp/extractor/nbc.py index caa9dc0..41811b8 100644 --- a/plugins/youtube_download/yt_dlp/extractor/nbc.py +++ b/plugins/youtube_download/yt_dlp/extractor/nbc.py @@ -63,7 +63,7 @@ class NBCUniversalBaseIE(ThePlatformBaseIE): # formats='mpeg4' will return either a working m3u8 URL or an m3u8 template for non-DRM HLS # formats='m3u+none,mpeg4' may return DRM HLS but w/the "folders" needed for non-DRM template query['formats'] = 'm3u+none,mpeg4' - m3u8_url = self._download_nbcu_smil_and_extract_m3u8_url(tp_path, video_id, query) + orig_m3u8_url = m3u8_url = self._download_nbcu_smil_and_extract_m3u8_url(tp_path, video_id, query) if mobj := re.fullmatch(self._M3U8_RE, m3u8_url): query['formats'] = 'mpeg4' @@ -76,7 +76,17 @@ class NBCUniversalBaseIE(ThePlatformBaseIE): if '/mpeg_cenc' in m3u8_url or '/mpeg_cbcs' in m3u8_url: self.report_drm(video_id) - return self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, 'mp4', m3u8_id='hls') + formats, subtitles = self._extract_m3u8_formats_and_subtitles( + m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False) + + if not formats and m3u8_url != orig_m3u8_url: + orig_fmts, subtitles = self._extract_m3u8_formats_and_subtitles( + orig_m3u8_url, video_id, 'mp4', m3u8_id='hls', fatal=False) + formats = [f for f in orig_fmts if not f.get('has_drm')] + if orig_fmts and not formats: + self.report_drm(video_id) + + return formats, subtitles def _extract_nbcu_video(self, url, display_id, old_ie_key=None): webpage = self._download_webpage(url, display_id) diff --git a/plugins/youtube_download/yt_dlp/extractor/nebula.py b/plugins/youtube_download/yt_dlp/extractor/nebula.py index 6ced19d..2708c72 100644 --- a/plugins/youtube_download/yt_dlp/extractor/nebula.py +++ b/plugins/youtube_download/yt_dlp/extractor/nebula.py @@ -478,3 +478,64 @@ class NebulaChannelIE(NebulaBaseIE): playlist_id=collection_slug, playlist_title=channel.get('title'), playlist_description=channel.get('description')) + + +class NebulaSeasonIE(NebulaBaseIE): + IE_NAME = 'nebula:season' + _VALID_URL = rf'{_BASE_URL_RE}/(?P[\w-]+)/season/(?P[\w-]+)' + _TESTS = [{ + 'url': 'https://nebula.tv/jetlag/season/15', + 'info_dict': { + 'id': 'jetlag_15', + 'title': 'Tag: All Stars', + 'description': 'md5:5aa5b8abf3de71756448dc44ffebb674', + }, + 'playlist_count': 8, + }, { + 'url': 'https://nebula.tv/jetlag/season/14', + 'info_dict': { + 'id': 'jetlag_14', + 'title': 'Snake', + 'description': 'md5:6da9040f1c2ac559579738bfb6919d1e', + }, + 'playlist_count': 8, + }, { + 'url': 'https://nebula.tv/jetlag/season/13-5', + 'info_dict': { + 'id': 'jetlag_13-5', + 'title': 'Hide + Seek Across NYC', + 'description': 'md5:5b87bb9acc6dcdff289bb4c71a2ad59f', + }, + 'playlist_count': 3, + }] + + def _build_url_result(self, item): + url = ( + traverse_obj(item, ('share_url', {url_or_none})) + or urljoin('https://nebula.tv/', item.get('app_path')) + or f'https://nebula.tv/videos/{item["slug"]}') + return self.url_result( + smuggle_url(url, {'id': item['id']}), + NebulaIE, url_transparent=True, + **self._extract_video_metadata(item)) + + def _entries(self, data): + for episode in traverse_obj(data, ('episodes', lambda _, v: v['video']['id'], 'video')): + yield self._build_url_result(episode) + for extra in traverse_obj(data, ('extras', ..., 'items', lambda _, v: v['id'])): + yield self._build_url_result(extra) + for trailer in traverse_obj(data, ('trailers', lambda _, v: v['id'])): + yield self._build_url_result(trailer) + + def _real_extract(self, url): + series, season_id = self._match_valid_url(url).group('series', 'season_number') + playlist_id = f'{series}_{season_id}' + data = self._call_api( + f'https://content.api.nebula.app/content/{series}/season/{season_id}', playlist_id) + + return self.playlist_result( + self._entries(data), playlist_id, + **traverse_obj(data, { + 'title': ('title', {str}), + 'description': ('description', {str}), + })) diff --git a/plugins/youtube_download/yt_dlp/extractor/netapp.py b/plugins/youtube_download/yt_dlp/extractor/netapp.py new file mode 100644 index 0000000..a665472 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/netapp.py @@ -0,0 +1,79 @@ +from .brightcove import BrightcoveNewIE +from .common import InfoExtractor +from ..utils import parse_iso8601 +from ..utils.traversal import require, traverse_obj + + +class NetAppBaseIE(InfoExtractor): + _BC_URL = 'https://players.brightcove.net/6255154784001/default_default/index.html?videoId={}' + + @staticmethod + def _parse_metadata(item): + return traverse_obj(item, { + 'title': ('name', {str}), + 'description': ('description', {str}), + 'timestamp': ('createdAt', {parse_iso8601}), + }) + + +class NetAppVideoIE(NetAppBaseIE): + _VALID_URL = r'https?://media\.netapp\.com/video-detail/(?P[0-9a-f-]+)' + + _TESTS = [{ + 'url': 'https://media.netapp.com/video-detail/da25fc01-82ad-5284-95bc-26920200a222/seamless-storage-for-modern-kubernetes-deployments', + 'info_dict': { + 'id': '1843620950167202073', + 'ext': 'mp4', + 'title': 'Seamless storage for modern Kubernetes deployments', + 'description': 'md5:1ee39e315243fe71fb90af2796037248', + 'uploader_id': '6255154784001', + 'duration': 2159.41, + 'thumbnail': r're:https://house-fastly-signed-us-east-1-prod\.brightcovecdn\.com/image/.*\.jpg', + 'tags': 'count:15', + 'timestamp': 1758213949, + 'upload_date': '20250918', + }, + }, { + 'url': 'https://media.netapp.com/video-detail/45593e5d-cf1c-5996-978c-c9081906e69f/unleash-ai-innovation-with-your-data-with-the-netapp-platform', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_uuid = self._match_id(url) + metadata = self._download_json( + f'https://api.media.netapp.com/client/detail/{video_uuid}', video_uuid) + + brightcove_video_id = traverse_obj(metadata, ( + 'sections', lambda _, v: v['type'] == 'Player', 'video', {str}, any, {require('brightcove video id')})) + + video_item = traverse_obj(metadata, ('sections', lambda _, v: v['type'] == 'VideoDetail', any)) + + return self.url_result( + self._BC_URL.format(brightcove_video_id), BrightcoveNewIE, brightcove_video_id, + url_transparent=True, **self._parse_metadata(video_item)) + + +class NetAppCollectionIE(NetAppBaseIE): + _VALID_URL = r'https?://media\.netapp\.com/collection/(?P[0-9a-f-]+)' + _TESTS = [{ + 'url': 'https://media.netapp.com/collection/9820e190-f2a6-47ac-9c0a-98e5e64234a4', + 'info_dict': { + 'title': 'Featured sessions', + 'id': '9820e190-f2a6-47ac-9c0a-98e5e64234a4', + }, + 'playlist_count': 4, + }] + + def _entries(self, metadata): + for item in traverse_obj(metadata, ('items', lambda _, v: v['brightcoveVideoId'])): + brightcove_video_id = item['brightcoveVideoId'] + yield self.url_result( + self._BC_URL.format(brightcove_video_id), BrightcoveNewIE, brightcove_video_id, + url_transparent=True, **self._parse_metadata(item)) + + def _real_extract(self, url): + collection_uuid = self._match_id(url) + metadata = self._download_json( + f'https://api.media.netapp.com/client/collection/{collection_uuid}', collection_uuid) + + return self.playlist_result(self._entries(metadata), collection_uuid, playlist_title=metadata.get('name')) diff --git a/plugins/youtube_download/yt_dlp/extractor/neteasemusic.py b/plugins/youtube_download/yt_dlp/extractor/neteasemusic.py index 6c47086..8f3a7d2 100644 --- a/plugins/youtube_download/yt_dlp/extractor/neteasemusic.py +++ b/plugins/youtube_download/yt_dlp/extractor/neteasemusic.py @@ -528,7 +528,7 @@ class NetEaseMusicMvIE(NetEaseMusicBaseIE): class NetEaseMusicProgramIE(NetEaseMusicBaseIE): IE_NAME = 'netease:program' IE_DESC = '网易云音乐 - 电台节目' - _VALID_URL = r'https?://music\.163\.com/(?:#/)?program\?id=(?P[0-9]+)' + _VALID_URL = r'https?://music\.163\.com/(?:#/)?(?:dj|program)\?id=(?P[0-9]+)' _TESTS = [{ 'url': 'http://music.163.com/#/program?id=10109055', 'info_dict': { @@ -572,6 +572,9 @@ class NetEaseMusicProgramIE(NetEaseMusicBaseIE): 'params': { 'noplaylist': True, }, + }, { + 'url': 'https://music.163.com/#/dj?id=3706179315', + 'only_matching': True, }] def _real_extract(self, url): diff --git a/plugins/youtube_download/yt_dlp/extractor/netzkino.py b/plugins/youtube_download/yt_dlp/extractor/netzkino.py index c07b171..05f6c23 100644 --- a/plugins/youtube_download/yt_dlp/extractor/netzkino.py +++ b/plugins/youtube_download/yt_dlp/extractor/netzkino.py @@ -2,84 +2,59 @@ from .common import InfoExtractor from ..utils import ( clean_html, int_or_none, - js_to_json, - parse_iso8601, + url_or_none, + urljoin, ) +from ..utils.traversal import traverse_obj class NetzkinoIE(InfoExtractor): - _WORKING = False - _VALID_URL = r'https?://(?:www\.)?netzkino\.de/\#!/[^/]+/(?P[^/]+)' - + _GEO_COUNTRIES = ['DE'] + _VALID_URL = r'https?://(?:www\.)?netzkino\.de/details/(?P[^/?#]+)' _TESTS = [{ - 'url': 'https://www.netzkino.de/#!/scifikino/rakete-zum-mond', - 'md5': '92a3f8b76f8d7220acce5377ea5d4873', + 'url': 'https://www.netzkino.de/details/snow-beast', + 'md5': '1a4c90fe40d3ccabce163287e45e56dd', 'info_dict': { - 'id': 'rakete-zum-mond', + 'id': 'snow-beast', 'ext': 'mp4', - 'title': 'Rakete zum Mond \u2013 Jules Verne', - 'description': 'md5:f0a8024479618ddbfa450ff48ffa6c60', - 'upload_date': '20120813', - 'thumbnail': r're:https?://.*\.jpg$', - 'timestamp': 1344858571, + 'title': 'Snow Beast', 'age_limit': 12, - }, - 'params': { - 'skip_download': 'Download only works from Germany', - }, - }, { - 'url': 'https://www.netzkino.de/#!/filme/dr-jekyll-mrs-hyde-2', - 'md5': 'c7728b2dadd04ff6727814847a51ef03', - 'info_dict': { - 'id': 'dr-jekyll-mrs-hyde-2', - 'ext': 'mp4', - 'title': 'Dr. Jekyll & Mrs. Hyde 2', - 'description': 'md5:c2e9626ebd02de0a794b95407045d186', - 'upload_date': '20190130', - 'thumbnail': r're:https?://.*\.jpg$', - 'timestamp': 1548849437, - 'age_limit': 18, - }, - 'params': { - 'skip_download': 'Download only works from Germany', + 'alt_title': 'Snow Beast', + 'cast': 'count:3', + 'categories': 'count:7', + 'creators': 'count:2', + 'description': 'md5:e604a954a7f827a80e96a3a97d48b269', + 'location': 'US', + 'release_year': 2011, + 'thumbnail': r're:https?://.+\.jpg', }, }] def _real_extract(self, url): - mobj = self._match_valid_url(url) - video_id = mobj.group('id') + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + next_js_data = self._search_nextjs_data(webpage, video_id) - api_url = f'https://api.netzkino.de.simplecache.net/capi-2.0a/movies/{video_id}.json?d=www' - info = self._download_json(api_url, video_id) - custom_fields = info['custom_fields'] - - production_js = self._download_webpage( - 'http://www.netzkino.de/beta/dist/production.min.js', video_id, - note='Downloading player code') - avo_js = self._search_regex( - r'var urlTemplate=(\{.*?"\})', - production_js, 'URL templates') - templates = self._parse_json( - avo_js, video_id, transform_source=js_to_json) - - suffix = { - 'hds': '.mp4/manifest.f4m', - 'hls': '.mp4/master.m3u8', - 'pmd': '.mp4', - } - film_fn = custom_fields['Streaming'][0] - formats = [{ - 'format_id': key, - 'ext': 'mp4', - 'url': tpl.replace('{}', film_fn) + suffix[key], - } for key, tpl in templates.items()] + query = traverse_obj(next_js_data, ( + 'props', '__dehydratedState', 'queries', ..., 'state', + 'data', 'data', lambda _, v: v['__typename'] == 'CmsMovie', any)) + if 'DRM' in traverse_obj(query, ('licenses', 'nodes', ..., 'properties', {str})): + self.report_drm(video_id) return { 'id': video_id, - 'formats': formats, - 'title': info['title'], - 'age_limit': int_or_none(custom_fields.get('FSK')[0]), - 'timestamp': parse_iso8601(info.get('date'), delimiter=' '), - 'description': clean_html(info.get('content')), - 'thumbnail': info.get('thumbnail'), + **traverse_obj(query, { + 'title': ('originalTitle', {clean_html}), + 'age_limit': ('fskRating', {int_or_none}), + 'alt_title': ('originalTitle', {clean_html}, filter), + 'cast': ('cast', 'nodes', ..., 'person', 'name', {clean_html}, filter), + 'creators': (('directors', 'writers'), 'nodes', ..., 'person', 'name', {clean_html}, filter), + 'categories': ('categories', 'nodes', ..., 'category', 'title', {clean_html}, filter), + 'description': ('longSynopsis', {clean_html}, filter), + 'duration': ('runtimeInSeconds', {int_or_none}), + 'location': ('productionCountry', {clean_html}, filter), + 'release_year': ('productionYear', {int_or_none}), + 'thumbnail': ('coverImage', 'masterUrl', {url_or_none}), + 'url': ('videoSource', 'pmdUrl', {urljoin('https://pmd.netzkino-seite.netzkino.de/')}), + }), } diff --git a/plugins/youtube_download/yt_dlp/extractor/nextmedia.py b/plugins/youtube_download/yt_dlp/extractor/nextmedia.py deleted file mode 100644 index 81da3ff..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/nextmedia.py +++ /dev/null @@ -1,238 +0,0 @@ -import urllib.parse - -from .common import InfoExtractor -from ..utils import ( - clean_html, - get_element_by_class, - int_or_none, - parse_iso8601, - remove_start, - unified_timestamp, -) - - -class NextMediaIE(InfoExtractor): - IE_DESC = '蘋果日報' - _VALID_URL = r'https?://hk\.apple\.nextmedia\.com/[^/]+/[^/]+/(?P\d+)/(?P\d+)' - _TESTS = [{ - 'url': 'http://hk.apple.nextmedia.com/realtime/news/20141108/53109199', - 'md5': 'dff9fad7009311c421176d1ac90bfe4f', - 'info_dict': { - 'id': '53109199', - 'ext': 'mp4', - 'title': '【佔領金鐘】50外國領事議員撐場 讚學生勇敢香港有希望', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:28222b9912b6665a21011b034c70fcc7', - 'timestamp': 1415456273, - 'upload_date': '20141108', - }, - }] - - _URL_PATTERN = r'\{ url: \'(.+)\' \}' - - def _real_extract(self, url): - news_id = self._match_id(url) - page = self._download_webpage(url, news_id) - return self._extract_from_nextmedia_page(news_id, url, page) - - def _extract_from_nextmedia_page(self, news_id, url, page): - redirection_url = self._search_regex( - r'window\.location\.href\s*=\s*([\'"])(?P(?!\1).+)\1', - page, 'redirection URL', default=None, group='url') - if redirection_url: - return self.url_result(urllib.parse.urljoin(url, redirection_url)) - - title = self._fetch_title(page) - video_url = self._search_regex(self._URL_PATTERN, page, 'video url') - - attrs = { - 'id': news_id, - 'title': title, - 'url': video_url, # ext can be inferred from url - 'thumbnail': self._fetch_thumbnail(page), - 'description': self._fetch_description(page), - } - - timestamp = self._fetch_timestamp(page) - if timestamp: - attrs['timestamp'] = timestamp - else: - attrs['upload_date'] = self._fetch_upload_date(url) - - return attrs - - def _fetch_title(self, page): - return self._og_search_title(page) - - def _fetch_thumbnail(self, page): - return self._og_search_thumbnail(page) - - def _fetch_timestamp(self, page): - date_created = self._search_regex('"dateCreated":"([^"]+)"', page, 'created time') - return parse_iso8601(date_created) - - def _fetch_upload_date(self, url): - return self._search_regex(self._VALID_URL, url, 'upload date', group='date') - - def _fetch_description(self, page): - return self._og_search_property('description', page) - - -class NextMediaActionNewsIE(NextMediaIE): # XXX: Do not subclass from concrete IE - IE_DESC = '蘋果日報 - 動新聞' - _VALID_URL = r'https?://hk\.dv\.nextmedia\.com/actionnews/[^/]+/(?P\d+)/(?P\d+)/\d+' - _TESTS = [{ - 'url': 'http://hk.dv.nextmedia.com/actionnews/hit/20150121/19009428/20061460', - 'md5': '05fce8ffeed7a5e00665d4b7cf0f9201', - 'info_dict': { - 'id': '19009428', - 'ext': 'mp4', - 'title': '【壹週刊】細10年男友偷食 50歲邵美琪再失戀', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:cd802fad1f40fd9ea178c1e2af02d659', - 'timestamp': 1421791200, - 'upload_date': '20150120', - }, - }] - - def _real_extract(self, url): - news_id = self._match_id(url) - actionnews_page = self._download_webpage(url, news_id) - article_url = self._og_search_url(actionnews_page) - article_page = self._download_webpage(article_url, news_id) - return self._extract_from_nextmedia_page(news_id, url, article_page) - - -class AppleDailyIE(NextMediaIE): # XXX: Do not subclass from concrete IE - IE_DESC = '臺灣蘋果日報' - _VALID_URL = r'https?://(www|ent)\.appledaily\.com\.tw/[^/]+/[^/]+/[^/]+/(?P\d+)/(?P\d+)(/.*)?' - _TESTS = [{ - 'url': 'http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694', - 'md5': 'a843ab23d150977cc55ef94f1e2c1e4d', - 'info_dict': { - 'id': '36354694', - 'ext': 'mp4', - 'title': '周亭羽走過摩鐵陰霾2男陪吃 九把刀孤寒看醫生', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:2acd430e59956dc47cd7f67cb3c003f4', - 'upload_date': '20150128', - }, - }, { - 'url': 'http://www.appledaily.com.tw/realtimenews/article/strange/20150128/550549/%E4%B8%8D%E6%BB%BF%E8%A2%AB%E8%B8%A9%E8%85%B3%E3%80%80%E5%B1%B1%E6%9D%B1%E5%85%A9%E5%A4%A7%E5%AA%BD%E4%B8%80%E8%B7%AF%E6%89%93%E4%B8%8B%E8%BB%8A', - 'md5': '86b4e9132d158279c7883822d94ccc49', - 'info_dict': { - 'id': '550549', - 'ext': 'mp4', - 'title': '不滿被踩腳 山東兩大媽一路打下車', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:175b4260c1d7c085993474217e4ab1b4', - 'upload_date': '20150128', - }, - }, { - 'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003671', - 'md5': '03df296d95dedc2d5886debbb80cb43f', - 'info_dict': { - 'id': '5003671', - 'ext': 'mp4', - 'title': '20正妹熱舞 《刀龍傳說Online》火辣上市', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd', - 'upload_date': '20150128', - }, - 'skip': 'redirect to http://www.appledaily.com.tw/animation/', - }, { - # No thumbnail - 'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/', - 'md5': 'b06182cd386ea7bc6115ec7ff0f72aeb', - 'info_dict': { - 'id': '5003673', - 'ext': 'mp4', - 'title': '半夜尿尿 好像會看到___', - 'description': 'md5:61d2da7fe117fede148706cdb85ac066', - 'upload_date': '20150128', - }, - 'expected_warnings': [ - 'video thumbnail', - ], - 'skip': 'redirect to http://www.appledaily.com.tw/animation/', - }, { - 'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/', - 'md5': 'eaa20e6b9df418c912d7f5dec2ba734d', - 'info_dict': { - 'id': '35770334', - 'ext': 'mp4', - 'title': '咖啡占卜測 XU裝熟指數', - 'thumbnail': r're:^https?://.*\.jpg$', - 'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748', - 'upload_date': '20140417', - }, - }, { - 'url': 'http://www.appledaily.com.tw/actionnews/appledaily/7/20161003/960588/', - 'only_matching': True, - }, { - # Redirected from http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694 - 'url': 'http://ent.appledaily.com.tw/section/article/headline/20150128/36354694', - 'only_matching': True, - }] - - _URL_PATTERN = r'\{url: \'(.+)\'\}' - - def _fetch_title(self, page): - return (self._html_search_regex(r'

([^<>]+)

', page, 'news title', default=None) - or self._html_search_meta('description', page, 'news title')) - - def _fetch_thumbnail(self, page): - return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False) - - def _fetch_timestamp(self, page): - return None - - def _fetch_description(self, page): - return self._html_search_meta('description', page, 'news description') - - -class NextTVIE(InfoExtractor): - _WORKING = False - _ENABLED = None # XXX: pass through to GenericIE - IE_DESC = '壹電視' - _VALID_URL = r'https?://(?:www\.)?nexttv\.com\.tw/(?:[^/]+/)+(?P\d+)' - - _TEST = { - 'url': 'http://www.nexttv.com.tw/news/realtime/politics/11779671', - 'info_dict': { - 'id': '11779671', - 'ext': 'mp4', - 'title': '「超收稅」近4千億! 藍議員籲發消費券', - 'thumbnail': r're:^https?://.*\.jpg$', - 'timestamp': 1484825400, - 'upload_date': '20170119', - 'view_count': int, - }, - } - - def _real_extract(self, url): - video_id = self._match_id(url) - - webpage = self._download_webpage(url, video_id) - - title = self._html_search_regex( - r']*>([^<]+)', webpage, 'title') - - data = self._hidden_inputs(webpage) - - video_url = data['ntt-vod-src-detailview'] - - date_str = get_element_by_class('date', webpage) - timestamp = unified_timestamp(date_str + '+0800') if date_str else None - - view_count = int_or_none(remove_start( - clean_html(get_element_by_class('click', webpage)), '點閱:')) - - return { - 'id': video_id, - 'title': title, - 'url': video_url, - 'thumbnail': data.get('ntt-vod-img-src'), - 'timestamp': timestamp, - 'view_count': view_count, - } diff --git a/plugins/youtube_download/yt_dlp/extractor/nhk.py b/plugins/youtube_download/yt_dlp/extractor/nhk.py index eef3ed8..99186ad 100644 --- a/plugins/youtube_download/yt_dlp/extractor/nhk.py +++ b/plugins/youtube_download/yt_dlp/extractor/nhk.py @@ -23,96 +23,38 @@ from ..utils import ( class NhkBaseIE(InfoExtractor): - _API_URL_TEMPLATE = 'https://nwapi.nhk.jp/nhkworld/%sod%slist/v7b/%s/%s/%s/all%s.json' + _API_URL_TEMPLATE = 'https://api.nhkworld.jp/showsapi/v1/{lang}/{content_format}_{page_type}/{m_id}{extra_page}' _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P[a-z]{2})/' def _call_api(self, m_id, lang, is_video, is_episode, is_clip): + content_format = 'video' if is_video else 'audio' + content_type = 'clips' if is_clip else 'episodes' + if not is_episode: + extra_page = f'/{content_format}_{content_type}' + page_type = 'programs' + else: + extra_page = '' + page_type = content_type + return self._download_json( - self._API_URL_TEMPLATE % ( - 'v' if is_video else 'r', - 'clip' if is_clip else 'esd', - 'episode' if is_episode else 'program', - m_id, lang, '/all' if is_video else ''), - m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or [] - - def _get_api_info(self, refresh=True): - if not refresh: - return self.cache.load('nhk', 'api_info') - - self.cache.store('nhk', 'api_info', {}) - movie_player_js = self._download_webpage( - 'https://movie-a.nhk.or.jp/world/player/js/movie-player.js', None, - note='Downloading stream API information') - api_info = { - 'url': self._search_regex( - r'prod:[^;]+\bapiUrl:\s*[\'"]([^\'"]+)[\'"]', movie_player_js, None, 'stream API url'), - 'token': self._search_regex( - r'prod:[^;]+\btoken:\s*[\'"]([^\'"]+)[\'"]', movie_player_js, None, 'stream API token'), - } - self.cache.store('nhk', 'api_info', api_info) - return api_info - - def _extract_stream_info(self, vod_id): - for refresh in (False, True): - api_info = self._get_api_info(refresh) - if not api_info: - continue - - api_url = api_info.pop('url') - meta = traverse_obj( - self._download_json( - api_url, vod_id, 'Downloading stream url info', fatal=False, query={ - **api_info, - 'type': 'json', - 'optional_id': vod_id, - 'active_flg': 1, - }), ('meta', 0)) - stream_url = traverse_obj( - meta, ('movie_url', ('mb_auto', 'auto_sp', 'auto_pc'), {url_or_none}), get_all=False) - - if stream_url: - formats, subtitles = self._extract_m3u8_formats_and_subtitles(stream_url, vod_id) - return { - **traverse_obj(meta, { - 'duration': ('duration', {int_or_none}), - 'timestamp': ('publication_date', {unified_timestamp}), - 'release_timestamp': ('insert_date', {unified_timestamp}), - 'modified_timestamp': ('update_date', {unified_timestamp}), - }), - 'formats': formats, - 'subtitles': subtitles, - } - raise ExtractorError('Unable to extract stream url') + self._API_URL_TEMPLATE.format( + lang=lang, content_format=content_format, page_type=page_type, + m_id=m_id, extra_page=extra_page), + join_nonempty(m_id, lang)) def _extract_episode_info(self, url, episode=None): fetch_episode = episode is None lang, m_type, episode_id = NhkVodIE._match_valid_url(url).group('lang', 'type', 'id') is_video = m_type != 'audio' - if is_video: - episode_id = episode_id[:4] + '-' + episode_id[4:] - if fetch_episode: episode = self._call_api( - episode_id, lang, is_video, True, episode_id[:4] == '9999')[0] + episode_id, lang, is_video, is_episode=True, is_clip=episode_id[:4] == '9999') - def get_clean_field(key): - return clean_html(episode.get(key + '_clean') or episode.get(key)) + video_id = join_nonempty('id', 'lang', from_dict=episode) - title = get_clean_field('sub_title') - series = get_clean_field('title') - - thumbnails = [] - for s, w, h in [('', 640, 360), ('_l', 1280, 720)]: - img_path = episode.get('image' + s) - if not img_path: - continue - thumbnails.append({ - 'id': f'{h}p', - 'height': h, - 'width': w, - 'url': 'https://www3.nhk.or.jp' + img_path, - }) + title = episode.get('title') + series = traverse_obj(episode, (('video_program', 'audio_program'), any, 'title')) episode_name = title if series and title: @@ -125,37 +67,52 @@ class NhkBaseIE(InfoExtractor): episode_name = None info = { - 'id': episode_id + '-' + lang, + 'id': video_id, 'title': title, - 'description': get_clean_field('description'), - 'thumbnails': thumbnails, 'series': series, 'episode': episode_name, + **traverse_obj(episode, { + 'description': ('description', {str}), + 'release_timestamp': ('first_broadcasted_at', {unified_timestamp}), + 'categories': ('categories', ..., 'name', {str}), + 'tags': ('tags', ..., 'name', {str}), + 'thumbnails': ('images', lambda _, v: v['url'], { + 'url': ('url', {urljoin(url)}), + 'width': ('width', {int_or_none}), + 'height': ('height', {int_or_none}), + }), + 'webpage_url': ('url', {urljoin(url)}), + }), + 'extractor_key': NhkVodIE.ie_key(), + 'extractor': NhkVodIE.IE_NAME, } - if is_video: - vod_id = episode['vod_id'] - info.update({ - **self._extract_stream_info(vod_id), - 'id': vod_id, - }) - + # XXX: We are assuming that 'video' and 'audio' are mutually exclusive + stream_info = traverse_obj(episode, (('video', 'audio'), {dict}, any)) or {} + if not stream_info.get('url'): + self.raise_no_formats('Stream not found; it has most likely expired', expected=True) else: - if fetch_episode: + stream_url = stream_info['url'] + if is_video: + formats, subtitles = self._extract_m3u8_formats_and_subtitles(stream_url, video_id) + info.update({ + 'formats': formats, + 'subtitles': subtitles, + **traverse_obj(stream_info, ({ + 'duration': ('duration', {int_or_none}), + 'timestamp': ('published_at', {unified_timestamp}), + })), + }) + else: # From https://www3.nhk.or.jp/nhkworld/common/player/radio/inline/rod.html - audio_path = remove_end(episode['audio']['audio'], '.m4a') + audio_path = remove_end(stream_url, '.m4a') info['formats'] = self._extract_m3u8_formats( f'{urljoin("https://vod-stream.nhk.jp", audio_path)}/index.m3u8', episode_id, 'm4a', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) for f in info['formats']: f['language'] = lang - else: - info.update({ - '_type': 'url_transparent', - 'ie_key': NhkVodIE.ie_key(), - 'url': url, - }) + return info @@ -168,29 +125,29 @@ class NhkVodIE(NhkBaseIE): # Content available only for a limited period of time. Visit # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples. _TESTS = [{ - 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2049126/', + 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/2049165/', 'info_dict': { - 'id': 'nw_vod_v_en_2049_126_20230413233000_01_1681398302', + 'id': '2049165-en', 'ext': 'mp4', - 'title': 'Japan Railway Journal - The Tohoku Shinkansen: Full Speed Ahead', - 'description': 'md5:49f7c5b206e03868a2fdf0d0814b92f6', + 'title': 'Japan Railway Journal - Choshi Electric Railway: Fighting to Get Back on Track', + 'description': 'md5:ab57df2fca7f04245148c2e787bb203d', 'thumbnail': r're:https://.+/.+\.jpg', - 'episode': 'The Tohoku Shinkansen: Full Speed Ahead', + 'episode': 'Choshi Electric Railway: Fighting to Get Back on Track', 'series': 'Japan Railway Journal', - 'modified_timestamp': 1707217907, - 'timestamp': 1681428600, - 'release_timestamp': 1693883728, - 'duration': 1679, - 'upload_date': '20230413', - 'modified_date': '20240206', - 'release_date': '20230905', + 'duration': 1680, + 'categories': ['Biz & Tech'], + 'tags': ['Akita', 'Chiba', 'Trains', 'Transcript', 'All (Japan Navigator)'], + 'timestamp': 1759055880, + 'upload_date': '20250928', + 'release_timestamp': 1758810600, + 'release_date': '20250925', }, }, { # video clip 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/', 'md5': '153c3016dfd252ba09726588149cf0e7', 'info_dict': { - 'id': 'lpZXIwaDE6_Z-976CPsFdxyICyWUzlT5', + 'id': '9999011-en', 'ext': 'mp4', 'title': 'Dining with the Chef - Chef Saito\'s Family recipe: MENCHI-KATSU', 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5', @@ -198,24 +155,23 @@ class NhkVodIE(NhkBaseIE): 'series': 'Dining with the Chef', 'episode': 'Chef Saito\'s Family recipe: MENCHI-KATSU', 'duration': 148, - 'upload_date': '20190816', - 'release_date': '20230902', - 'release_timestamp': 1693619292, - 'modified_timestamp': 1707217907, - 'modified_date': '20240206', - 'timestamp': 1565997540, + 'categories': ['Food'], + 'tags': ['Washoku'], + 'timestamp': 1548212400, + 'upload_date': '20190123', }, }, { # radio - 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/livinginjapan-20231001-1/', + 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/audio/livinginjapan-20240901-1/', 'info_dict': { - 'id': 'livinginjapan-20231001-1-en', + 'id': 'livinginjapan-20240901-1-en', 'ext': 'm4a', - 'title': 'Living in Japan - Tips for Travelers to Japan / Ramen Vending Machines', + 'title': 'Living in Japan - Weekend Hiking / Self-protection from crime', 'series': 'Living in Japan', - 'description': 'md5:0a0e2077d8f07a03071e990a6f51bfab', + 'description': 'md5:4d0e14ab73bdbfedb60a53b093954ed6', 'thumbnail': r're:https://.+/.+\.jpg', - 'episode': 'Tips for Travelers to Japan / Ramen Vending Machines', + 'episode': 'Weekend Hiking / Self-protection from crime', + 'categories': ['Interactive'], }, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/', @@ -256,96 +212,51 @@ class NhkVodIE(NhkBaseIE): }, 'skip': 'expires 2023-10-15', }, { - # a one-off (single-episode series). title from the api is just '

' - 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/3004952/', + # a one-off (single-episode series). title from the api is just null + 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/3026036/', 'info_dict': { - 'id': 'nw_vod_v_en_3004_952_20230723091000_01_1690074552', + 'id': '3026036-en', 'ext': 'mp4', - 'title': 'Barakan Discovers - AMAMI OSHIMA: Isson\'s Treasure Isla', - 'description': 'md5:5db620c46a0698451cc59add8816b797', - 'thumbnail': r're:https://.+/.+\.jpg', - 'release_date': '20230905', - 'timestamp': 1690103400, - 'duration': 2939, - 'release_timestamp': 1693898699, - 'upload_date': '20230723', - 'modified_timestamp': 1707217907, - 'modified_date': '20240206', - 'episode': 'AMAMI OSHIMA: Isson\'s Treasure Isla', - 'series': 'Barakan Discovers', + 'title': 'STATELESS: The Japanese Left Behind in the Philippines', + 'description': 'md5:9a2fd51cdfa9f52baae28569e0053786', + 'duration': 2955, + 'thumbnail': 'https://www3.nhk.or.jp/nhkworld/en/shows/3026036/images/wide_l_QPtWpt4lzVhm3NzPAMIIF35MCg4CdNwcikPaTS5Q.jpg', + 'categories': ['Documentary', 'Culture & Lifestyle'], + 'tags': ['Transcript', 'Documentary 360', 'The Pursuit of PEACE'], + 'timestamp': 1758931800, + 'upload_date': '20250927', + 'release_timestamp': 1758931800, + 'release_date': '20250927', }, }, { # /ondemand/video/ url with alphabetical character in 5th position of id 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a07/', 'info_dict': { - 'id': 'nw_c_en_9999-a07', + 'id': '9999a07-en', 'ext': 'mp4', 'episode': 'Mini-Dramas on SDGs: Ep 1 Close the Gender Gap [Director\'s Cut]', 'series': 'Mini-Dramas on SDGs', - 'modified_date': '20240206', 'title': 'Mini-Dramas on SDGs - Mini-Dramas on SDGs: Ep 1 Close the Gender Gap [Director\'s Cut]', 'description': 'md5:3f9dcb4db22fceb675d90448a040d3f6', - 'timestamp': 1621962360, - 'duration': 189, - 'release_date': '20230903', - 'modified_timestamp': 1707217907, + 'timestamp': 1621911600, + 'duration': 190, 'upload_date': '20210525', 'thumbnail': r're:https://.+/.+\.jpg', - 'release_timestamp': 1693713487, + 'categories': ['Current Affairs', 'Entertainment'], }, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999d17/', 'info_dict': { - 'id': 'nw_c_en_9999-d17', + 'id': '9999d17-en', 'ext': 'mp4', 'title': 'Flowers of snow blossom - The 72 Pentads of Yamato', 'description': 'Today’s focus: Snow', - 'release_timestamp': 1693792402, - 'release_date': '20230904', - 'upload_date': '20220128', - 'timestamp': 1643370960, 'thumbnail': r're:https://.+/.+\.jpg', 'duration': 136, - 'series': '', - 'modified_date': '20240206', - 'modified_timestamp': 1707217907, - }, - }, { - # new /shows/ url format - 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/2032307/', - 'info_dict': { - 'id': 'nw_vod_v_en_2032_307_20240321113000_01_1710990282', - 'ext': 'mp4', - 'title': 'Japanology Plus - 20th Anniversary Special Part 1', - 'description': 'md5:817d41fc8e54339ad2a916161ea24faf', - 'episode': '20th Anniversary Special Part 1', - 'series': 'Japanology Plus', - 'thumbnail': r're:https://.+/.+\.jpg', - 'duration': 1680, - 'timestamp': 1711020600, - 'upload_date': '20240321', - 'release_timestamp': 1711022683, - 'release_date': '20240321', - 'modified_timestamp': 1711031012, - 'modified_date': '20240321', - }, - }, { - 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/3020025/', - 'info_dict': { - 'id': 'nw_vod_v_en_3020_025_20230325144000_01_1679723944', - 'ext': 'mp4', - 'title': '100 Ideas to Save the World - Working Styles Evolve', - 'description': 'md5:9e6c7778eaaf4f7b4af83569649f84d9', - 'episode': 'Working Styles Evolve', - 'series': '100 Ideas to Save the World', - 'thumbnail': r're:https://.+/.+\.jpg', - 'duration': 899, - 'upload_date': '20230325', - 'timestamp': 1679755200, - 'release_date': '20230905', - 'release_timestamp': 1693880540, - 'modified_date': '20240206', - 'modified_timestamp': 1707217907, + 'categories': ['Culture & Lifestyle', 'Science & Nature'], + 'tags': ['Nara', 'Temples & Shrines', 'Winter', 'Snow'], + 'timestamp': 1643339040, + 'upload_date': '20220128', }, }, { # new /shows/audio/ url format @@ -373,6 +284,7 @@ class NhkVodProgramIE(NhkBaseIE): 'id': 'sumo', 'title': 'GRAND SUMO Highlights', 'description': 'md5:fc20d02dc6ce85e4b72e0273aa52fdbf', + 'series': 'GRAND SUMO Highlights', }, 'playlist_mincount': 1, }, { @@ -381,6 +293,7 @@ class NhkVodProgramIE(NhkBaseIE): 'id': 'japanrailway', 'title': 'Japan Railway Journal', 'description': 'md5:ea39d93af7d05835baadf10d1aae0e3f', + 'series': 'Japan Railway Journal', }, 'playlist_mincount': 12, }, { @@ -390,6 +303,7 @@ class NhkVodProgramIE(NhkBaseIE): 'id': 'japanrailway', 'title': 'Japan Railway Journal', 'description': 'md5:ea39d93af7d05835baadf10d1aae0e3f', + 'series': 'Japan Railway Journal', }, 'playlist_mincount': 12, }, { @@ -399,17 +313,9 @@ class NhkVodProgramIE(NhkBaseIE): 'id': 'livinginjapan', 'title': 'Living in Japan', 'description': 'md5:665bb36ec2a12c5a7f598ee713fc2b54', + 'series': 'Living in Japan', }, - 'playlist_mincount': 12, - }, { - # /tv/ program url - 'url': 'https://www3.nhk.or.jp/nhkworld/en/tv/designtalksplus/', - 'info_dict': { - 'id': 'designtalksplus', - 'title': 'DESIGN TALKS plus', - 'description': 'md5:47b3b3a9f10d4ac7b33b53b70a7d2837', - }, - 'playlist_mincount': 20, + 'playlist_mincount': 11, }, { 'url': 'https://www3.nhk.or.jp/nhkworld/en/shows/10yearshayaomiyazaki/', 'only_matching': True, @@ -430,9 +336,8 @@ class NhkVodProgramIE(NhkBaseIE): program_id, lang, m_type != 'audio', False, episode_type == 'clip') def entries(): - for episode in episodes: - if episode_path := episode.get('url'): - yield self._extract_episode_info(urljoin(url, episode_path), episode) + for episode in traverse_obj(episodes, ('items', lambda _, v: v['url'])): + yield self._extract_episode_info(urljoin(url, episode['url']), episode) html = self._download_webpage(url, program_id) program_title = self._extract_meta_from_class_elements([ @@ -446,7 +351,7 @@ class NhkVodProgramIE(NhkBaseIE): 'tAudioProgramMain__info', # /shows/audio/programs/ 'p-program-description'], html) # /tv/ - return self.playlist_result(entries(), program_id, program_title, program_description) + return self.playlist_result(entries(), program_id, program_title, program_description, series=program_title) class NhkForSchoolBangumiIE(InfoExtractor): diff --git a/plugins/youtube_download/yt_dlp/extractor/nowcanal.py b/plugins/youtube_download/yt_dlp/extractor/nowcanal.py new file mode 100644 index 0000000..46a1eeb --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/nowcanal.py @@ -0,0 +1,37 @@ +from .brightcove import BrightcoveNewIE +from .common import InfoExtractor + + +class NowCanalIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?nowcanal\.pt(?:/[\w-]+)+/detalhe/(?P[\w-]+)' + _TESTS = [{ + 'url': 'https://www.nowcanal.pt/ultimas/detalhe/pedro-sousa-hjulmand-pode-ter-uma-saida-limpa-do-sporting-daqui-a-um-ano', + 'md5': '047f17cb783e66e467d703e704bbc95d', + 'info_dict': { + 'id': '6376598467112', + 'ext': 'mp4', + 'title': 'Pedro Sousa «Hjulmand pode ter uma saída limpa do Sporting daqui a um ano»', + 'description': '', + 'uploader_id': '6108484330001', + 'duration': 65.237, + 'thumbnail': r're:^https://.+\.jpg', + 'timestamp': 1754440620, + 'upload_date': '20250806', + 'tags': ['now'], + }, + }, { + 'url': 'https://www.nowcanal.pt/programas/frente-a-frente/detalhe/frente-a-frente-eva-cruzeiro-ps-e-rita-matias-chega', + 'only_matching': True, + }] + + _BC_URL_TMPL = 'https://players.brightcove.net/6108484330001/chhIqzukMq_default/index.html?videoId={}' + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + video_id = self._search_json( + r'videoHandler\.addBrightcoveVideoWithJson\(\[', + webpage, 'video data', display_id)['brightcoveVideoId'] + + return self.url_result(self._BC_URL_TMPL.format(video_id), BrightcoveNewIE) diff --git a/plugins/youtube_download/yt_dlp/extractor/ntvru.py b/plugins/youtube_download/yt_dlp/extractor/ntvru.py index 1ab1be0..07710c1 100644 --- a/plugins/youtube_download/yt_dlp/extractor/ntvru.py +++ b/plugins/youtube_download/yt_dlp/extractor/ntvru.py @@ -1,17 +1,40 @@ from .common import InfoExtractor from ..utils import ( int_or_none, - strip_or_none, + parse_iso8601, unescapeHTML, + url_or_none, xpath_text, ) +from ..utils.traversal import traverse_obj class NTVRuIE(InfoExtractor): IE_NAME = 'ntv.ru' - _VALID_URL = r'https?://(?:www\.)?ntv\.ru/(?:[^/]+/)*(?P[^/?#&]+)' + _VALID_URL = r'https?://(?:www\.)?ntv\.ru/(?:[^/#?]+/)*(?P[^/?#&]+)' _TESTS = [{ + # JSON Api is geo restricted + 'url': 'https://www.ntv.ru/peredacha/svoya_igra/m58980/o818800', + 'md5': '818962a1b52747d446db7cd5be43e142', + 'info_dict': { + 'id': '2520563', + 'ext': 'mp4', + 'title': 'Участники: Ирина Петрова, Сергей Коновалов, Кристина Кораблина', + 'description': 'md5:fcbd21cd45238a940b95550f9e178e3e', + 'thumbnail': r're:^http://.*\.jpg', + 'duration': 2462, + 'view_count': int, + 'comment_count': int, + 'tags': ['игры и игрушки'], + 'timestamp': 1761821096, + 'upload_date': '20251030', + 'release_timestamp': 1761821096, + 'release_date': '20251030', + 'modified_timestamp': 1761821096, + 'modified_date': '20251030', + }, + }, { 'url': 'http://www.ntv.ru/novosti/863142/', 'md5': 'ba7ea172a91cb83eb734cad18c10e723', 'info_dict': { @@ -22,31 +45,35 @@ class NTVRuIE(InfoExtractor): 'thumbnail': r're:^http://.*\.jpg', 'duration': 136, 'view_count': int, + 'comment_count': int, + 'tags': ['ВМС', 'захват', 'митинги', 'Севастополь', 'Украина'], + 'timestamp': 1395222013, + 'upload_date': '20140319', + 'release_timestamp': 1395222013, + 'release_date': '20140319', + 'modified_timestamp': 1395222013, + 'modified_date': '20140319', }, }, { - 'url': 'http://www.ntv.ru/video/novosti/750370/', - 'md5': 'adecff79691b4d71e25220a191477124', - 'info_dict': { - 'id': '750370', - 'ext': 'mp4', - 'title': 'Родные пассажиров пропавшего Boeing не верят в трагический исход', - 'description': 'Родные пассажиров пропавшего Boeing не верят в трагический исход', - 'thumbnail': r're:^http://.*\.jpg', - 'duration': 172, - 'view_count': int, - }, - 'skip': '404 Not Found', - }, { + # Requires unescapeHTML 'url': 'http://www.ntv.ru/peredacha/segodnya/m23700/o232416', 'md5': '82dbd49b38e3af1d00df16acbeab260c', 'info_dict': { 'id': '747480', 'ext': 'mp4', - 'title': '«Сегодня». 21 марта 2014 года. 16:00', - 'description': '«Сегодня». 21 марта 2014 года. 16:00', + 'title': '"Сегодня". 21 марта 2014 года. 16:00 ', + 'description': 'md5:bed80745ca72af557433195f51a02785', 'thumbnail': r're:^http://.*\.jpg', 'duration': 1496, 'view_count': int, + 'comment_count': int, + 'tags': ['Брюссель', 'гражданство', 'ЕС', 'Крым', 'ОСАГО', 'саммит', 'санкции', 'события', 'чиновники', 'рейтинг'], + 'timestamp': 1395406951, + 'upload_date': '20140321', + 'release_timestamp': 1395406951, + 'release_date': '20140321', + 'modified_timestamp': 1395406951, + 'modified_date': '20140321', }, }, { 'url': 'https://www.ntv.ru/kino/Koma_film/m70281/o336036/video/', @@ -54,11 +81,19 @@ class NTVRuIE(InfoExtractor): 'info_dict': { 'id': '1126480', 'ext': 'mp4', - 'title': 'Остросюжетный фильм «Кома»', - 'description': 'Остросюжетный фильм «Кома»', + 'title': 'Остросюжетный фильм "Кома"', + 'description': 'md5:e79ffd0887425a0f05a58885c408d7d8', 'thumbnail': r're:^http://.*\.jpg', - 'duration': 5592, + 'duration': 5608, 'view_count': int, + 'comment_count': int, + 'tags': ['кино'], + 'timestamp': 1432868572, + 'upload_date': '20150529', + 'release_timestamp': 1432868572, + 'release_date': '20150529', + 'modified_timestamp': 1432868572, + 'modified_date': '20150529', }, }, { 'url': 'http://www.ntv.ru/serial/Delo_vrachey/m31760/o233916/', @@ -66,11 +101,19 @@ class NTVRuIE(InfoExtractor): 'info_dict': { 'id': '751482', 'ext': 'mp4', - 'title': '«Дело врачей»: «Деревце жизни»', - 'description': '«Дело врачей»: «Деревце жизни»', + 'title': '"Дело врачей": "Деревце жизни"', + 'description': 'md5:d6fbf9193f880f50d9cbfbcc954161c1', 'thumbnail': r're:^http://.*\.jpg', 'duration': 2590, 'view_count': int, + 'comment_count': int, + 'tags': ['врачи', 'больницы'], + 'timestamp': 1395882300, + 'upload_date': '20140327', + 'release_timestamp': 1395882300, + 'release_date': '20140327', + 'modified_timestamp': 1395882300, + 'modified_date': '20140327', }, }, { # Schemeless file URL @@ -78,48 +121,26 @@ class NTVRuIE(InfoExtractor): 'only_matching': True, }] - _VIDEO_ID_REGEXES = [ - r']+?src=["\'](?P(?:https?:)?//(?:www\.)?pornhub(?:premium)?\.(?:com|net|org)/embed/[\da-z]+)'] _TESTS = [{ 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015', - 'md5': 'a6391306d050e4547f62b3f485dd9ba9', + 'md5': '4d4a4e9178b655776f86cf89ecaf0edf', 'info_dict': { 'id': '648719015', 'ext': 'mp4', 'title': 'Seductive Indian beauty strips down and fingers her pink pussy', - 'uploader': 'Babes', + 'uploader': 'BABES-COM', + 'uploader_id': '/users/babes-com', 'upload_date': '20130628', 'timestamp': 1372447216, 'duration': 361, 'view_count': int, 'like_count': int, - 'dislike_count': int, 'comment_count': int, 'age_limit': 18, 'tags': list, 'categories': list, 'cast': list, + 'thumbnail': r're:https?://.+', }, }, { # non-ASCII title @@ -480,13 +482,6 @@ class PornHubIE(PornHubBaseIE): comment_count = self._extract_count( r'All Comments\s*\(([\d,.]+)\)', webpage, 'comment') - def extract_list(meta_key): - div = self._search_regex( - rf'(?s)]+\bclass=["\'].*?\b{meta_key}Wrapper[^>]*>(.+?)', - webpage, meta_key, default=None) - if div: - return [clean_html(x).strip() for x in re.findall(r'(?s)]+\bhref=[^>]+>.+?', div)] - info = self._search_json_ld(webpage, video_id, default={}) # description provided in JSON-LD is irrelevant info['description'] = None @@ -505,9 +500,11 @@ class PornHubIE(PornHubBaseIE): 'comment_count': comment_count, 'formats': formats, 'age_limit': 18, - 'tags': extract_list('tags'), - 'categories': extract_list('categories'), - 'cast': extract_list('pornstars'), + **traverse_obj(webpage, { + 'tags': ({find_elements(attr='data-label', value='tag')}, ..., {clean_html}), + 'categories': ({find_elements(attr='data-label', value='category')}, ..., {clean_html}), + 'cast': ({find_elements(attr='data-label', value='pornstar')}, ..., {clean_html}), + }), 'subtitles': subtitles, }, info) diff --git a/plugins/youtube_download/yt_dlp/extractor/rinsefm.py b/plugins/youtube_download/yt_dlp/extractor/rinsefm.py index 5bc2eb8..202446e 100644 --- a/plugins/youtube_download/yt_dlp/extractor/rinsefm.py +++ b/plugins/youtube_download/yt_dlp/extractor/rinsefm.py @@ -3,12 +3,14 @@ from ..utils import ( MEDIA_EXTENSIONS, determine_ext, parse_iso8601, - traverse_obj, url_or_none, ) +from ..utils.traversal import traverse_obj class RinseFMBaseIE(InfoExtractor): + _API_BASE = 'https://rinse.fm/api/query/v1' + @staticmethod def _parse_entry(entry): return { @@ -45,8 +47,10 @@ class RinseFMIE(RinseFMBaseIE): def _real_extract(self, url): display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) - entry = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['entry'] + + entry = self._download_json( + f'{self._API_BASE}/episodes/{display_id}', display_id, + note='Downloading episode data from API')['entry'] return self._parse_entry(entry) @@ -58,32 +62,35 @@ class RinseFMArtistPlaylistIE(RinseFMBaseIE): 'info_dict': { 'id': 'resources', 'title': '[re]sources', - 'description': '[re]sources est un label parisien piloté par le DJ et producteur Tommy Kid.', + 'description': 'md5:fd6a7254e8273510e6d49fbf50edf392', }, 'playlist_mincount': 40, }, { - 'url': 'https://rinse.fm/shows/ivy/', + 'url': 'https://www.rinse.fm/shows/esk', 'info_dict': { - 'id': 'ivy', - 'title': '[IVY]', - 'description': 'A dedicated space for DNB/Turbo House and 4x4.', + 'id': 'esk', + 'title': 'Esk', + 'description': 'md5:5893d7c1d411ae8dea7fba12f109aa98', }, - 'playlist_mincount': 7, + 'playlist_mincount': 139, }] def _entries(self, data): for episode in traverse_obj(data, ( - 'props', 'pageProps', 'episodes', lambda _, v: determine_ext(v['fileUrl']) in MEDIA_EXTENSIONS.audio), + 'episodes', lambda _, v: determine_ext(v['fileUrl']) in MEDIA_EXTENSIONS.audio), ): yield self._parse_entry(episode) def _real_extract(self, url): playlist_id = self._match_id(url) - webpage = self._download_webpage(url, playlist_id) - title = self._og_search_title(webpage) or self._html_search_meta('title', webpage) - description = self._og_search_description(webpage) or self._html_search_meta( - 'description', webpage) - data = self._search_nextjs_data(webpage, playlist_id) + + api_data = self._download_json( + f'{self._API_BASE}/shows/{playlist_id}', playlist_id, + note='Downloading show data from API') return self.playlist_result( - self._entries(data), playlist_id, title, description=description) + self._entries(api_data), playlist_id, + **traverse_obj(api_data, ('entry', { + 'title': ('title', {str}), + 'description': ('description', {str}), + }))) diff --git a/plugins/youtube_download/yt_dlp/extractor/s4c.py b/plugins/youtube_download/yt_dlp/extractor/s4c.py index 6eb8b2b..d35436d 100644 --- a/plugins/youtube_download/yt_dlp/extractor/s4c.py +++ b/plugins/youtube_download/yt_dlp/extractor/s4c.py @@ -15,14 +15,15 @@ class S4CIE(InfoExtractor): 'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Y_Swn_2023S4C_099_ii.jpg', }, }, { - 'url': 'https://www.s4c.cymru/clic/programme/856636948', + # Geo restricted to the UK + 'url': 'https://www.s4c.cymru/clic/programme/886303048', 'info_dict': { - 'id': '856636948', + 'id': '886303048', 'ext': 'mp4', - 'title': 'Am Dro', + 'title': 'Pennod 1', + 'description': 'md5:7e3f364b70f61fcdaa8b4cb4a3eb3e7a', 'duration': 2880, - 'description': 'md5:100d8686fc9a632a0cb2db52a3433ffe', - 'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Am_Dro_2022-23S4C_P6_4005.jpg', + 'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Stad_2025S4C_P1_210053.jpg', }, }] @@ -51,7 +52,7 @@ class S4CIE(InfoExtractor): 'https://player-api.s4c-cdn.co.uk/streaming-urls/prod', video_id, query={ 'mode': 'od', 'application': 'clic', - 'region': 'WW', + 'region': 'UK' if player_config.get('application') == 's4chttpl' else 'WW', 'extra': 'false', 'thirdParty': 'false', 'filename': player_config['filename'], diff --git a/plugins/youtube_download/yt_dlp/extractor/scte.py b/plugins/youtube_download/yt_dlp/extractor/scte.py deleted file mode 100644 index 3971132..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/scte.py +++ /dev/null @@ -1,137 +0,0 @@ -import re - -from .common import InfoExtractor -from ..utils import ( - ExtractorError, - decode_packed_codes, - urlencode_postdata, -) - - -class SCTEBaseIE(InfoExtractor): - _LOGIN_URL = 'https://www.scte.org/SCTE/Sign_In.aspx' - _NETRC_MACHINE = 'scte' - - def _perform_login(self, username, password): - login_popup = self._download_webpage( - self._LOGIN_URL, None, 'Downloading login popup') - - def is_logged(webpage): - return any(re.search(p, webpage) for p in ( - r'class=["\']welcome\b', r'>Sign Out<')) - - # already logged in - if is_logged(login_popup): - return - - login_form = self._hidden_inputs(login_popup) - - login_form.update({ - 'ctl01$TemplateBody$WebPartManager1$gwpciNewContactSignInCommon$ciNewContactSignInCommon$signInUserName': username, - 'ctl01$TemplateBody$WebPartManager1$gwpciNewContactSignInCommon$ciNewContactSignInCommon$signInPassword': password, - 'ctl01$TemplateBody$WebPartManager1$gwpciNewContactSignInCommon$ciNewContactSignInCommon$RememberMe': 'on', - }) - - response = self._download_webpage( - self._LOGIN_URL, None, 'Logging in', - data=urlencode_postdata(login_form)) - - if '|pageRedirect|' not in response and not is_logged(response): - error = self._html_search_regex( - r'(?s)<[^>]+class=["\']AsiError["\'][^>]*>(.+?)\d+)' - _TESTS = [{ - 'url': 'https://learning.scte.org/mod/scorm/view.php?id=31484', - 'info_dict': { - 'title': 'Introduction to DOCSIS Engineering Professional', - 'id': '31484', - }, - 'playlist_count': 5, - 'skip': 'Requires account credentials', - }] - - def _real_extract(self, url): - video_id = self._match_id(url) - - webpage = self._download_webpage(url, video_id) - - title = self._search_regex(r'

(.+?)

', webpage, 'title') - - context_id = self._search_regex(r'context-(\d+)', webpage, video_id) - content_base = f'https://learning.scte.org/pluginfile.php/{context_id}/mod_scorm/content/8/' - context = decode_packed_codes(self._download_webpage( - f'{content_base}mobile/data.js', video_id)) - - data = self._parse_xml( - self._search_regex( - r'CreateData\(\s*"(.+?)"', context, 'data').replace(r"\'", "'"), - video_id) - - entries = [] - for asset in data.findall('.//asset'): - asset_url = asset.get('url') - if not asset_url or not asset_url.endswith('.mp4'): - continue - asset_id = self._search_regex( - r'video_([^_]+)_', asset_url, 'asset id', default=None) - if not asset_id: - continue - entries.append({ - 'id': asset_id, - 'title': title, - 'url': content_base + asset_url, - }) - - return self.playlist_result(entries, video_id, title) - - -class SCTECourseIE(SCTEBaseIE): - _WORKING = False - _VALID_URL = r'https?://learning\.scte\.org/(?:mod/sub)?course/view\.php?.*?\bid=(?P\d+)' - _TESTS = [{ - 'url': 'https://learning.scte.org/mod/subcourse/view.php?id=31491', - 'only_matching': True, - }, { - 'url': 'https://learning.scte.org/course/view.php?id=3639', - 'only_matching': True, - }, { - 'url': 'https://learning.scte.org/course/view.php?id=3073', - 'only_matching': True, - }] - - def _real_extract(self, url): - course_id = self._match_id(url) - - webpage = self._download_webpage(url, course_id) - - title = self._search_regex( - r'

(.+?)

', webpage, 'title', default=None) - - entries = [] - for mobj in re.finditer( - r'''(?x) - ]+ - href=(["\']) - (?P - https?://learning\.scte\.org/mod/ - (?Pscorm|subcourse)/view\.php?(?:(?!\1).)*? - \bid=\d+ - ) - ''', - webpage): - item_url = mobj.group('url') - if item_url == url: - continue - ie = (SCTEIE.ie_key() if mobj.group('kind') == 'scorm' - else SCTECourseIE.ie_key()) - entries.append(self.url_result(item_url, ie=ie)) - - return self.playlist_result(entries, course_id, title) diff --git a/plugins/youtube_download/yt_dlp/extractor/soundcloud.py b/plugins/youtube_download/yt_dlp/extractor/soundcloud.py index 7833081..5c3ff28 100644 --- a/plugins/youtube_download/yt_dlp/extractor/soundcloud.py +++ b/plugins/youtube_download/yt_dlp/extractor/soundcloud.py @@ -1064,7 +1064,7 @@ class SoundcloudRelatedIE(SoundcloudPagedPlaylistBaseIE): class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE): - _VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?P[0-9]+)(?:/?\?secret_token=(?P[^&]+?))?$' + _VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?:soundcloud(?:%3A|:)playlists(?:%3A|:))?(?P[0-9]+)(?:/?\?secret_token=(?P[^&]+?))?$' IE_NAME = 'soundcloud:playlist' _TESTS = [{ 'url': 'https://api.soundcloud.com/playlists/4110309', @@ -1079,6 +1079,12 @@ class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE): 'album': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]', }, 'playlist_count': 6, + }, { + 'url': 'https://api.soundcloud.com/playlists/soundcloud%3Aplaylists%3A1759227795', + 'only_matching': True, + }, { + 'url': 'https://api.soundcloud.com/playlists/soundcloud:playlists:2104769627?secret_token=s-wmpCLuExeYX', + 'only_matching': True, }] def _real_extract(self, url): diff --git a/plugins/youtube_download/yt_dlp/extractor/sportdeutschland.py b/plugins/youtube_download/yt_dlp/extractor/sportdeutschland.py index 0b7d90a..cba026c 100644 --- a/plugins/youtube_download/yt_dlp/extractor/sportdeutschland.py +++ b/plugins/youtube_download/yt_dlp/extractor/sportdeutschland.py @@ -8,10 +8,11 @@ from ..utils import ( class SportDeutschlandIE(InfoExtractor): - _VALID_URL = r'https?://(?:player\.)?sportdeutschland\.tv/(?P(?:[^/?#]+/)?[^?#/&]+)' + IE_NAME = 'sporteurope' + _VALID_URL = r'https?://(?:player\.)?sporteurope\.tv/(?P(?:[^/?#]+/)?[^?#/&]+)' _TESTS = [{ # Single-part video, direct link - 'url': 'https://sportdeutschland.tv/rostock-griffins/gfl2-rostock-griffins-vs-elmshorn-fighting-pirates', + 'url': 'https://sporteurope.tv/rostock-griffins/gfl2-rostock-griffins-vs-elmshorn-fighting-pirates', 'md5': '35c11a19395c938cdd076b93bda54cde', 'info_dict': { 'id': '9f27a97d-1544-4d0b-aa03-48d92d17a03a', @@ -19,9 +20,9 @@ class SportDeutschlandIE(InfoExtractor): 'title': 'GFL2: Rostock Griffins vs. Elmshorn Fighting Pirates', 'display_id': 'rostock-griffins/gfl2-rostock-griffins-vs-elmshorn-fighting-pirates', 'channel': 'Rostock Griffins', - 'channel_url': 'https://sportdeutschland.tv/rostock-griffins', + 'channel_url': 'https://sporteurope.tv/rostock-griffins', 'live_status': 'was_live', - 'description': 'md5:60cb00067e55dafa27b0933a43d72862', + 'description': r're:Video-Livestream des Spiels Rostock Griffins vs\. Elmshorn Fighting Pirates.+', 'channel_id': '9635f21c-3f67-4584-9ce4-796e9a47276b', 'timestamp': 1749913117, 'upload_date': '20250614', @@ -29,16 +30,16 @@ class SportDeutschlandIE(InfoExtractor): }, }, { # Single-part video, embedded player link - 'url': 'https://player.sportdeutschland.tv/9e9619c4-7d77-43c4-926d-49fb57dc06dc', + 'url': 'https://player.sporteurope.tv/9e9619c4-7d77-43c4-926d-49fb57dc06dc', 'info_dict': { 'id': '9f27a97d-1544-4d0b-aa03-48d92d17a03a', 'ext': 'mp4', 'title': 'GFL2: Rostock Griffins vs. Elmshorn Fighting Pirates', 'display_id': '9e9619c4-7d77-43c4-926d-49fb57dc06dc', 'channel': 'Rostock Griffins', - 'channel_url': 'https://sportdeutschland.tv/rostock-griffins', + 'channel_url': 'https://sporteurope.tv/rostock-griffins', 'live_status': 'was_live', - 'description': 'md5:60cb00067e55dafa27b0933a43d72862', + 'description': r're:Video-Livestream des Spiels Rostock Griffins vs\. Elmshorn Fighting Pirates.+', 'channel_id': '9635f21c-3f67-4584-9ce4-796e9a47276b', 'timestamp': 1749913117, 'upload_date': '20250614', @@ -47,7 +48,7 @@ class SportDeutschlandIE(InfoExtractor): 'params': {'skip_download': True}, }, { # Multi-part video - 'url': 'https://sportdeutschland.tv/rhine-ruhr-2025-fisu-world-university-games/volleyball-w-japan-vs-brasilien-halbfinale-2', + 'url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games/volleyball-w-japan-vs-brasilien-halbfinale-2', 'info_dict': { 'id': '9f63d737-2444-4e3a-a1ea-840df73fd481', 'display_id': 'rhine-ruhr-2025-fisu-world-university-games/volleyball-w-japan-vs-brasilien-halbfinale-2', @@ -55,7 +56,7 @@ class SportDeutschlandIE(InfoExtractor): 'description': 'md5:0a17da15e48a687e6019639c3452572b', 'channel': 'Rhine-Ruhr 2025 FISU World University Games', 'channel_id': '9f5216be-a49d-470b-9a30-4fe9df993334', - 'channel_url': 'https://sportdeutschland.tv/rhine-ruhr-2025-fisu-world-university-games', + 'channel_url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games', 'live_status': 'was_live', }, 'playlist_count': 2, @@ -66,7 +67,7 @@ class SportDeutschlandIE(InfoExtractor): 'title': 'Volleyball w: Japan vs. Braslien - Halbfinale 2 Part 1', 'channel': 'Rhine-Ruhr 2025 FISU World University Games', 'channel_id': '9f5216be-a49d-470b-9a30-4fe9df993334', - 'channel_url': 'https://sportdeutschland.tv/rhine-ruhr-2025-fisu-world-university-games', + 'channel_url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games', 'duration': 14773.0, 'timestamp': 1753085197, 'upload_date': '20250721', @@ -79,16 +80,17 @@ class SportDeutschlandIE(InfoExtractor): 'title': 'Volleyball w: Japan vs. Braslien - Halbfinale 2 Part 2', 'channel': 'Rhine-Ruhr 2025 FISU World University Games', 'channel_id': '9f5216be-a49d-470b-9a30-4fe9df993334', - 'channel_url': 'https://sportdeutschland.tv/rhine-ruhr-2025-fisu-world-university-games', + 'channel_url': 'https://sporteurope.tv/rhine-ruhr-2025-fisu-world-university-games', 'duration': 14773.0, 'timestamp': 1753128421, 'upload_date': '20250721', 'live_status': 'was_live', }, }], + 'skip': '404 Not Found', }, { # Livestream - 'url': 'https://sportdeutschland.tv/dtb/gymnastik-international-tag-1', + 'url': 'https://sporteurope.tv/dtb/gymnastik-international-tag-1', 'info_dict': { 'id': '95d71b8a-370a-4b87-ad16-94680da18528', 'ext': 'mp4', @@ -96,7 +98,7 @@ class SportDeutschlandIE(InfoExtractor): 'display_id': 'dtb/gymnastik-international-tag-1', 'channel_id': '936ecef1-2f4a-4e08-be2f-68073cb7ecab', 'channel': 'Deutscher Turner-Bund', - 'channel_url': 'https://sportdeutschland.tv/dtb', + 'channel_url': 'https://sporteurope.tv/dtb', 'description': 'md5:07a885dde5838a6f0796ee21dc3b0c52', 'live_status': 'is_live', }, @@ -106,9 +108,9 @@ class SportDeutschlandIE(InfoExtractor): def _process_video(self, asset_id, video): is_live = video['type'] == 'mux_live' token = self._download_json( - f'https://api.sportdeutschland.tv/api/web/personal/asset-token/{asset_id}', + f'https://api.sporteurope.tv/api/web/personal/asset-token/{asset_id}', video['id'], query={'type': video['type'], 'playback_id': video['src']}, - headers={'Referer': 'https://sportdeutschland.tv/'})['token'] + headers={'Referer': 'https://sporteurope.tv/'})['token'] formats, subtitles = self._extract_m3u8_formats_and_subtitles( f'https://stream.mux.com/{video["src"]}.m3u8?token={token}', video['id'], live=is_live) @@ -126,7 +128,7 @@ class SportDeutschlandIE(InfoExtractor): def _real_extract(self, url): display_id = self._match_id(url) meta = self._download_json( - f'https://api.sportdeutschland.tv/api/stateless/frontend/assets/{display_id}', + f'https://api.sporteurope.tv/api/stateless/frontend/assets/{display_id}', display_id, query={'access_token': 'true'}) info = { @@ -139,7 +141,7 @@ class SportDeutschlandIE(InfoExtractor): 'channel_id': ('profile', 'id'), 'is_live': 'currently_live', 'was_live': 'was_live', - 'channel_url': ('profile', 'slug', {lambda x: f'https://sportdeutschland.tv/{x}'}), + 'channel_url': ('profile', 'slug', {lambda x: f'https://sporteurope.tv/{x}'}), }, get_all=False), } diff --git a/plugins/youtube_download/yt_dlp/extractor/sproutvideo.py b/plugins/youtube_download/yt_dlp/extractor/sproutvideo.py index ff9dc7d..80fc6d2 100644 --- a/plugins/youtube_download/yt_dlp/extractor/sproutvideo.py +++ b/plugins/youtube_download/yt_dlp/extractor/sproutvideo.py @@ -101,8 +101,8 @@ class SproutVideoIE(InfoExtractor): webpage = self._download_webpage( url, video_id, headers=traverse_obj(smuggled_data, {'Referer': 'referer'})) data = self._search_json( - r'(?:var|const|let)\s+(?:dat|(?:player|video)Info|)\s*=\s*["\']', webpage, 'player info', - video_id, contains_pattern=r'[A-Za-z0-9+/=]+', end_pattern=r'["\'];', + r'(?:window\.|(?:var|const|let)\s+)(?:dat|(?:player|video)Info|)\s*=\s*["\']', webpage, + 'player info', video_id, contains_pattern=r'[A-Za-z0-9+/=]+', end_pattern=r'["\'];', transform_source=lambda x: base64.b64decode(x).decode()) # SproutVideo may send player info for 'SMPTE Color Monitor Test' [a791d7b71b12ecc52e] diff --git a/plugins/youtube_download/yt_dlp/extractor/tarangplus.py b/plugins/youtube_download/yt_dlp/extractor/tarangplus.py new file mode 100644 index 0000000..62f254d --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/tarangplus.py @@ -0,0 +1,243 @@ +import base64 +import binascii +import functools +import re +import urllib.parse + +from .common import InfoExtractor +from ..dependencies import Cryptodome +from ..utils import ( + ExtractorError, + OnDemandPagedList, + clean_html, + extract_attributes, + urljoin, +) +from ..utils.traversal import ( + find_element, + find_elements, + require, + traverse_obj, +) + + +class TarangPlusBaseIE(InfoExtractor): + _BASE_URL = 'https://tarangplus.in' + + +class TarangPlusVideoIE(TarangPlusBaseIE): + IE_NAME = 'tarangplus:video' + _VALID_URL = r'https?://(?:www\.)?tarangplus\.in/(?:movies|[^#?/]+/[^#?/]+)/(?!episodes)(?P[^#?/]+)' + _TESTS = [{ + 'url': 'https://tarangplus.in/tarangaplus-originals/khitpit/khitpit-ep-10', + 'md5': '78ce056cee755687b8a48199909ecf53', + 'info_dict': { + 'id': '67b8206719521d054c0059b7', + 'display_id': 'khitpit-ep-10', + 'ext': 'mp4', + 'title': 'Khitpit Ep-10', + 'description': 'md5:a45b805cb628e15c853d78b0406eab48', + 'thumbnail': r're:https?://.+/.+\.jpg', + 'duration': 756.0, + 'timestamp': 1740355200, + 'upload_date': '20250224', + 'media_type': 'episode', + 'categories': ['Originals'], + }, + }, { + 'url': 'https://tarangplus.in/tarang-serials/bada-bohu/bada-bohu-ep-233', + 'md5': 'b4f9beb15172559bb362203b4f48382e', + 'info_dict': { + 'id': '680b9d6c19521d054c007782', + 'display_id': 'bada-bohu-ep-233', + 'ext': 'mp4', + 'title': 'Bada Bohu | Ep -233', + 'description': 'md5:e6b8e7edc9e60b92c1b390f8789ecd69', + 'thumbnail': r're:https?://.+/.+\.jpg', + 'duration': 1392.0, + 'timestamp': 1745539200, + 'upload_date': '20250425', + 'media_type': 'episode', + 'categories': ['Prime'], + }, + }, { + # Decrypted m3u8 URL has trailing control characters that need to be stripped + 'url': 'https://tarangplus.in/tarangaplus-originals/ichha/ichha-teaser-1', + 'md5': '16ee43fe21ad8b6e652ec65eba38a64e', + 'info_dict': { + 'id': '5f0f252d3326af0720000342', + 'ext': 'mp4', + 'display_id': 'ichha-teaser-1', + 'title': 'Ichha Teaser', + 'description': 'md5:c724b0b0669a2cefdada3711cec792e6', + 'media_type': 'episode', + 'duration': 21.0, + 'thumbnail': r're:https?://.+/.+\.jpg', + 'categories': ['Originals'], + 'timestamp': 1758153600, + 'upload_date': '20250918', + }, + }, { + 'url': 'https://tarangplus.in/short/ai-maa/ai-maa', + 'only_matching': True, + }, { + 'url': 'https://tarangplus.in/shows/tarang-cine-utsav-2024/tarang-cine-utsav-2024-seg-1', + 'only_matching': True, + }, { + 'url': 'https://tarangplus.in/music-videos/chori-chori-bohu-chori-songs/nijara-laguchu-dhire-dhire', + 'only_matching': True, + }, { + 'url': 'https://tarangplus.in/kids-shows/chhota-jaga/chhota-jaga-ep-33-jamidar-ra-khajana-adaya', + 'only_matching': True, + }, { + 'url': 'https://tarangplus.in/movies/swayambara', + 'only_matching': True, + }] + + def decrypt(self, data, key): + if not Cryptodome.AES: + raise ExtractorError('pycryptodomex not found. Please install', expected=True) + iv = binascii.unhexlify('00000000000000000000000000000000') + cipher = Cryptodome.AES.new(base64.b64decode(key), Cryptodome.AES.MODE_CBC, iv) + return cipher.decrypt(base64.b64decode(data)).decode('utf-8') + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + hidden_inputs_data = self._hidden_inputs(webpage) + json_ld_data = self._search_json_ld(webpage, display_id) + json_ld_data.pop('url', None) + + iframe_url = traverse_obj(webpage, ( + {find_element(tag='iframe', attr='src', value=r'.+[?&]contenturl=.+', html=True, regex=True)}, + {extract_attributes}, 'src', {require('iframe URL')})) + # Can't use parse_qs here since it would decode the encrypted base64 `+` chars to spaces + content = self._search_regex(r'[?&]contenturl=(.+)', iframe_url, 'content') + encrypted_data, _, attrs = content.partition('|') + metadata = { + m.group('k'): m.group('v') + for m in re.finditer(r'(?:^|\|)(?P[a-z_]+)=(?P(?:(?!\|[a-z_]+=).)+)', attrs) + } + m3u8_url = urllib.parse.unquote( + self.decrypt(encrypted_data, metadata['key'])).rstrip('\x0e\x0f') + + return { + 'id': display_id, # Fallback + 'display_id': display_id, + **json_ld_data, + **traverse_obj(metadata, { + 'id': ('content_id', {str}), + 'title': ('title', {str}), + 'thumbnail': ('image', {str}), + }), + **traverse_obj(hidden_inputs_data, { + 'id': ('content_id', {str}), + 'media_type': ('theme_type', {str}), + 'categories': ('genre', {str}, filter, all, filter), + }), + 'formats': self._extract_m3u8_formats(m3u8_url, display_id), + } + + +class TarangPlusEpisodesIE(TarangPlusBaseIE): + IE_NAME = 'tarangplus:episodes' + _VALID_URL = r'https?://(?:www\.)?tarangplus\.in/(?P[^#?/]+)/(?P[^#?/]+)/episodes/?(?:$|[?#])' + _TESTS = [{ + 'url': 'https://tarangplus.in/tarangaplus-originals/balijatra/episodes', + 'info_dict': { + 'id': 'balijatra', + 'title': 'Balijatra', + }, + 'playlist_mincount': 7, + }, { + 'url': 'https://tarangplus.in/tarang-serials/bada-bohu/episodes', + 'info_dict': { + 'id': 'bada-bohu', + 'title': 'Bada Bohu', + }, + 'playlist_mincount': 236, + }, { + 'url': 'https://tarangplus.in/shows/dr-nonsense/episodes', + 'info_dict': { + 'id': 'dr-nonsense', + 'title': 'Dr. Nonsense', + }, + 'playlist_mincount': 15, + }] + _PAGE_SIZE = 20 + + def _entries(self, playlist_url, playlist_id, page): + data = self._download_json( + playlist_url, playlist_id, f'Downloading playlist JSON page {page + 1}', + query={'page_no': page}) + for item in traverse_obj(data, ('items', ..., {str})): + yield self.url_result( + urljoin(self._BASE_URL, item.split('$')[3]), TarangPlusVideoIE) + + def _real_extract(self, url): + url_type, display_id = self._match_valid_url(url).group('type', 'id') + series_url = f'{self._BASE_URL}/{url_type}/{display_id}' + webpage = self._download_webpage(series_url, display_id) + + entries = OnDemandPagedList( + functools.partial(self._entries, f'{series_url}/episodes', display_id), + self._PAGE_SIZE) + return self.playlist_result( + entries, display_id, self._hidden_inputs(webpage).get('title')) + + +class TarangPlusPlaylistIE(TarangPlusBaseIE): + IE_NAME = 'tarangplus:playlist' + _VALID_URL = r'https?://(?:www\.)?tarangplus\.in/(?P[^#?/]+)/all/?(?:$|[?#])' + _TESTS = [{ + 'url': 'https://tarangplus.in/chhota-jaga/all', + 'info_dict': { + 'id': 'chhota-jaga', + 'title': 'Chhota Jaga', + }, + 'playlist_mincount': 33, + }, { + 'url': 'https://tarangplus.in/kids-yali-show/all', + 'info_dict': { + 'id': 'kids-yali-show', + 'title': 'Yali', + }, + 'playlist_mincount': 10, + }, { + 'url': 'https://tarangplus.in/trailer/all', + 'info_dict': { + 'id': 'trailer', + 'title': 'Trailer', + }, + 'playlist_mincount': 57, + }, { + 'url': 'https://tarangplus.in/latest-songs/all', + 'info_dict': { + 'id': 'latest-songs', + 'title': 'Latest Songs', + }, + 'playlist_mincount': 46, + }, { + 'url': 'https://tarangplus.in/premium-serials-episodes/all', + 'info_dict': { + 'id': 'premium-serials-episodes', + 'title': 'Primetime Latest Episodes', + }, + 'playlist_mincount': 100, + }] + + def _entries(self, webpage): + for url_path in traverse_obj(webpage, ( + {find_elements(cls='item')}, ..., + {find_elements(tag='a', attr='href', value='/.+', html=True, regex=True)}, + ..., {extract_attributes}, 'href', + )): + yield self.url_result(urljoin(self._BASE_URL, url_path), TarangPlusVideoIE) + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + return self.playlist_result( + self._entries(webpage), display_id, + traverse_obj(webpage, ({find_element(id='al_title')}, {clean_html}))) diff --git a/plugins/youtube_download/yt_dlp/extractor/telecinco.py b/plugins/youtube_download/yt_dlp/extractor/telecinco.py index bdcae3b..6846191 100644 --- a/plugins/youtube_download/yt_dlp/extractor/telecinco.py +++ b/plugins/youtube_download/yt_dlp/extractor/telecinco.py @@ -6,20 +6,21 @@ from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, clean_html, + extract_attributes, int_or_none, join_nonempty, str_or_none, - traverse_obj, update_url, url_or_none, ) +from ..utils.traversal import traverse_obj class TelecincoBaseIE(InfoExtractor): def _parse_content(self, content, url): - video_id = content['dataMediaId'] + video_id = content['dataMediaId'][1] config = self._download_json( - content['dataConfig'], video_id, 'Downloading config JSON') + content['dataConfig'][1], video_id, 'Downloading config JSON') services = config['services'] caronte = self._download_json(services['caronte'], video_id) if traverse_obj(caronte, ('dls', 0, 'drm', {bool})): @@ -57,9 +58,9 @@ class TelecincoBaseIE(InfoExtractor): 'id': video_id, 'title': traverse_obj(config, ('info', 'title', {str})), 'formats': formats, - 'thumbnail': (traverse_obj(content, ('dataPoster', {url_or_none})) + 'thumbnail': (traverse_obj(content, ('dataPoster', 1, {url_or_none})) or traverse_obj(config, 'poster', 'imageUrl', expected_type=url_or_none)), - 'duration': traverse_obj(content, ('dataDuration', {int_or_none})), + 'duration': traverse_obj(content, ('dataDuration', 1, {int_or_none})), 'http_headers': headers, } @@ -137,30 +138,45 @@ class TelecincoIE(TelecincoBaseIE): 'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html', 'only_matching': True, }] + _ASTRO_ISLAND_RE = re.compile(r']+>') def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id, impersonate=True) - article = self._search_json( - r'window\.\$REACTBASE_STATE\.article(?:_multisite)?\s*=', - webpage, 'article', display_id)['article'] - description = traverse_obj(article, ('leadParagraph', {clean_html}, filter)) - if article.get('editorialType') != 'VID': + props_list = traverse_obj(webpage, ( + {self._ASTRO_ISLAND_RE.findall}, ..., + {extract_attributes}, 'props', {json.loads})) + + description = traverse_obj(props_list, (..., 'leadParagraph', 1, {clean_html}, any, filter)) + main_content = traverse_obj(props_list, (..., ('content', ('articleData', 1, 'opening')), 1, {dict}, any)) + + if traverse_obj(props_list, (..., 'editorialType', 1, {str}, any)) != 'VID': # e.g. 'ART' entries = [] - for p in traverse_obj(article, ((('opening', all), 'body'), lambda _, v: v['content'])): - content = p['content'] - type_ = p.get('type') - if type_ == 'paragraph' and isinstance(content, str): + for p in traverse_obj(props_list, (..., 'articleData', 1, ('opening', ('body', 1, ...)), 1, {dict})): + type_ = traverse_obj(p, ('type', 1, {str})) + content = traverse_obj(p, ('content', 1, {str} if type_ == 'paragraph' else {dict})) + if not content: + continue + if type_ == 'paragraph': description = join_nonempty(description, content, delim='') - elif type_ == 'video' and isinstance(content, dict): + elif type_ == 'video': entries.append(self._parse_content(content, url)) + else: + self.report_warning( + f'Skipping unsupported content type "{type_}"', display_id, only_once=True) return self.playlist_result( - entries, str_or_none(article.get('id')), - traverse_obj(article, ('title', {str})), clean_html(description)) + entries, + traverse_obj(props_list, (..., 'id', 1, {int}, {str_or_none}, any)) or display_id, + traverse_obj(main_content, ('dataTitle', 1, {str})), + clean_html(description)) - info = self._parse_content(article['opening']['content'], url) + if not main_content: + raise ExtractorError('Unable to extract main content from webpage') + + info = self._parse_content(main_content, url) info['description'] = description + return info diff --git a/plugins/youtube_download/yt_dlp/extractor/thisoldhouse.py b/plugins/youtube_download/yt_dlp/extractor/thisoldhouse.py index fbc12d5..b9d1154 100644 --- a/plugins/youtube_download/yt_dlp/extractor/thisoldhouse.py +++ b/plugins/youtube_download/yt_dlp/extractor/thisoldhouse.py @@ -1,18 +1,17 @@ -import json +import urllib.parse from .brightcove import BrightcoveNewIE from .common import InfoExtractor from .zype import ZypeIE from ..networking import HEADRequest -from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, filter_dict, parse_qs, smuggle_url, - try_call, urlencode_postdata, ) +from ..utils.traversal import traverse_obj class ThisOldHouseIE(InfoExtractor): @@ -77,46 +76,43 @@ class ThisOldHouseIE(InfoExtractor): 'only_matching': True, }] - _LOGIN_URL = 'https://login.thisoldhouse.com/usernamepassword/login' - def _perform_login(self, username, password): - self._request_webpage( - HEADRequest('https://www.thisoldhouse.com/insider'), None, 'Requesting session cookies') - urlh = self._request_webpage( - 'https://www.thisoldhouse.com/wp-login.php', None, 'Requesting login info', - errnote='Unable to login', query={'redirect_to': 'https://www.thisoldhouse.com/insider'}) + login_page = self._download_webpage( + 'https://www.thisoldhouse.com/insider-login', None, 'Downloading login page') + hidden_inputs = self._hidden_inputs(login_page) + response = self._download_json( + 'https://www.thisoldhouse.com/wp-admin/admin-ajax.php', None, 'Logging in', + headers={ + 'Accept': 'application/json', + 'X-Requested-With': 'XMLHttpRequest', + }, data=urlencode_postdata(filter_dict({ + 'action': 'onebill_subscriber_login', + 'email': username, + 'password': password, + 'pricingPlanTerm': hidden_inputs['pricing_plan_term'], + 'utm_parameters': hidden_inputs.get('utm_parameters'), + 'nonce': hidden_inputs['mdcr_onebill_login_nonce'], + }))) - try: - auth_form = self._download_webpage( - self._LOGIN_URL, None, 'Submitting credentials', headers={ - 'Content-Type': 'application/json', - 'Referer': urlh.url, - }, data=json.dumps(filter_dict({ - **{('client_id' if k == 'client' else k): v[0] for k, v in parse_qs(urlh.url).items()}, - 'tenant': 'thisoldhouse', - 'username': username, - 'password': password, - 'popup_options': {}, - 'sso': True, - '_csrf': try_call(lambda: self._get_cookies(self._LOGIN_URL)['_csrf'].value), - '_intstate': 'deprecated', - }), separators=(',', ':')).encode()) - except ExtractorError as e: - if isinstance(e.cause, HTTPError) and e.cause.status == 401: + message = traverse_obj(response, ('data', 'message', {str})) + if not response['success']: + if message and 'Something went wrong' in message: raise ExtractorError('Invalid username or password', expected=True) - raise - - self._request_webpage( - 'https://login.thisoldhouse.com/login/callback', None, 'Completing login', - data=urlencode_postdata(self._hidden_inputs(auth_form))) + raise ExtractorError(message or 'Login was unsuccessful') + if message and 'Your subscription is not active' in message: + self.report_warning( + f'{self.IE_NAME} said your subscription is not active. ' + f'If your subscription is active, this could be caused by too many sign-ins, ' + f'and you should instead try using {self._login_hint(method="cookies")[4:]}') + else: + self.write_debug(f'{self.IE_NAME} said: {message}') def _real_extract(self, url): display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) - if 'To Unlock This content' in webpage: - self.raise_login_required( - 'This video is only available for subscribers. ' - 'Note that --cookies-from-browser may not work due to this site using session cookies') + webpage, urlh = self._download_webpage_handle(url, display_id) + # If login response says inactive subscription, site redirects to frontpage for Insider content + if 'To Unlock This content' in webpage or urllib.parse.urlparse(urlh.url).path in ('', '/'): + self.raise_login_required('This video is only available for subscribers') video_url, video_id = self._search_regex( r']+src=[\'"]((?:https?:)?//(?:www\.)?thisoldhouse\.(?:chorus\.build|com)/videos/zype/([0-9a-f]{24})[^\'"]*)[\'"]', diff --git a/plugins/youtube_download/yt_dlp/extractor/tiktok.py b/plugins/youtube_download/yt_dlp/extractor/tiktok.py index b7e058e..02ec2b2 100644 --- a/plugins/youtube_download/yt_dlp/extractor/tiktok.py +++ b/plugins/youtube_download/yt_dlp/extractor/tiktok.py @@ -454,6 +454,7 @@ class TikTokBaseIE(InfoExtractor): 'like_count': 'digg_count', 'repost_count': 'share_count', 'comment_count': 'comment_count', + 'save_count': 'collect_count', }, expected_type=int_or_none), **author_info, 'channel_url': format_field(author_info, 'channel_id', self._UPLOADER_URL_FORMAT, default=None), @@ -607,6 +608,7 @@ class TikTokBaseIE(InfoExtractor): 'like_count': 'diggCount', 'repost_count': 'shareCount', 'comment_count': 'commentCount', + 'save_count': 'collectCount', }), expected_type=int_or_none), 'thumbnails': [ { @@ -646,6 +648,7 @@ class TikTokIE(TikTokBaseIE): 'like_count': int, 'repost_count': int, 'comment_count': int, + 'save_count': int, 'artist': 'Ysrbeats', 'album': 'Lehanga', 'track': 'Lehanga', @@ -675,6 +678,7 @@ class TikTokIE(TikTokBaseIE): 'like_count': int, 'repost_count': int, 'comment_count': int, + 'save_count': int, 'artists': ['Evan Todd', 'Jessica Keenan Wynn', 'Alice Lee', 'Barrett Wilbert Weed', 'Jon Eidson'], 'track': 'Big Fun', }, @@ -702,6 +706,7 @@ class TikTokIE(TikTokBaseIE): 'like_count': int, 'repost_count': int, 'comment_count': int, + 'save_count': int, }, }, { # Sponsored video, only available with feed workaround @@ -725,6 +730,7 @@ class TikTokIE(TikTokBaseIE): 'like_count': int, 'repost_count': int, 'comment_count': int, + 'save_count': int, }, 'skip': 'This video is unavailable', }, { @@ -751,6 +757,7 @@ class TikTokIE(TikTokBaseIE): 'like_count': int, 'repost_count': int, 'comment_count': int, + 'save_count': int, }, }, { # hydration JSON is sent in a