From 3ad9e1c7bbae456dcf8090f63b2f034f718dd0ac Mon Sep 17 00:00:00 2001 From: itdominator <1itdominator@gmail.com> Date: Mon, 20 Feb 2023 19:18:45 -0600 Subject: [PATCH] Plugin cleanup and tweaks --- plugins/archiver/plugin.py | 36 +- plugins/disk_usage/plugin.py | 16 +- plugins/favorites/plugin.py | 16 +- plugins/file_properties/file_properties.glade | 3 +- plugins/file_properties/plugin.py | 13 +- plugins/git_clone/plugin.py | 13 +- plugins/movie_tv_info/plugin.py | 18 +- plugins/searcher/plugin.py | 33 +- plugins/template/plugin.py | 10 +- plugins/translate/__init__.py | 3 + plugins/translate/__main__.py | 3 + plugins/translate/manifest.json | 12 + plugins/translate/plugin.py | 134 + plugins/translate/translate.glade | 210 + plugins/trasher/plugin.py | 16 - plugins/vod_thumbnailer/plugin.py | 22 +- plugins/youtube_download/plugin.py | 11 +- .../yt_dlp-2022.2.4.dist-info/AUTHORS | 0 .../yt_dlp-2022.2.4.dist-info/INSTALLER | 1 - .../yt_dlp-2022.2.4.dist-info/LICENSE | 24 - .../yt_dlp-2022.2.4.dist-info/METADATA | 1992 ---- .../yt_dlp-2022.2.4.dist-info/RECORD | 1939 ---- .../yt_dlp-2022.2.4.dist-info/REQUESTED | 0 .../yt_dlp-2022.2.4.dist-info/WHEEL | 6 - .../entry_points.txt | 3 - .../yt_dlp-2022.2.4.dist-info/top_level.txt | 1 - plugins/youtube_download/yt_dlp/YoutubeDL.py | 2178 ++-- plugins/youtube_download/yt_dlp/__init__.py | 1222 ++- plugins/youtube_download/yt_dlp/__main__.py | 6 +- .../yt_dlp/__pyinstaller/__init__.py | 5 + .../yt_dlp/__pyinstaller/hook-yt_dlp.py | 57 + plugins/youtube_download/yt_dlp/aes.py | 103 +- plugins/youtube_download/yt_dlp/cache.py | 54 +- plugins/youtube_download/yt_dlp/compat.py | 311 - .../yt_dlp/compat/__init__.py | 72 + .../yt_dlp/compat/_deprecated.py | 16 + .../youtube_download/yt_dlp/compat/_legacy.py | 97 + .../yt_dlp/compat/compat_utils.py | 83 + .../yt_dlp/compat/functools.py | 26 + .../youtube_download/yt_dlp/compat/imghdr.py | 16 + .../youtube_download/yt_dlp/compat/shutil.py | 30 + plugins/youtube_download/yt_dlp/cookies.py | 528 +- .../yt_dlp/dependencies/Cryptodome.py | 30 + .../yt_dlp/dependencies/__init__.py | 83 + .../yt_dlp/downloader/__init__.py | 35 +- .../yt_dlp/downloader/common.py | 339 +- .../yt_dlp/downloader/dash.py | 23 +- .../yt_dlp/downloader/external.py | 372 +- .../youtube_download/yt_dlp/downloader/f4m.py | 54 +- .../youtube_download/yt_dlp/downloader/fc2.py | 46 + .../yt_dlp/downloader/fragment.py | 274 +- .../youtube_download/yt_dlp/downloader/hls.py | 103 +- .../yt_dlp/downloader/http.py | 211 +- .../youtube_download/yt_dlp/downloader/ism.py | 68 +- .../yt_dlp/downloader/mhtml.py | 39 +- .../yt_dlp/downloader/niconico.py | 13 +- .../yt_dlp/downloader/rtmp.py | 16 +- .../yt_dlp/downloader/rtsp.py | 9 +- .../yt_dlp/downloader/websocket.py | 19 +- .../yt_dlp/downloader/youtube_live_chat.py | 62 +- .../yt_dlp/extractor/__init__.py | 52 +- .../yt_dlp/extractor/_extractors.py | 2404 +++++ .../youtube_download/yt_dlp/extractor/abc.py | 7 +- .../yt_dlp/extractor/abcnews.py | 4 - .../yt_dlp/extractor/abcotvs.py | 6 - .../yt_dlp/extractor/abematv.py | 522 + .../yt_dlp/extractor/academicearth.py | 2 - .../yt_dlp/extractor/acast.py | 4 - .../yt_dlp/extractor/acfun.py | 199 + .../youtube_download/yt_dlp/extractor/adn.py | 57 +- .../yt_dlp/extractor/adobeconnect.py | 5 +- .../yt_dlp/extractor/adobepass.py | 159 +- .../yt_dlp/extractor/adobetv.py | 5 +- .../yt_dlp/extractor/adultswim.py | 4 - .../yt_dlp/extractor/aenetworks.py | 17 +- .../yt_dlp/extractor/aeonco.py | 40 + .../yt_dlp/extractor/afreecatv.py | 106 +- .../yt_dlp/extractor/agora.py | 251 + .../yt_dlp/extractor/airmozilla.py | 3 - .../yt_dlp/extractor/airtv.py | 96 + .../yt_dlp/extractor/aitube.py | 60 + .../yt_dlp/extractor/aliexpress.py | 5 +- .../yt_dlp/extractor/aljazeera.py | 3 - .../yt_dlp/extractor/allocine.py | 11 +- .../yt_dlp/extractor/alphaporno.py | 2 - .../yt_dlp/extractor/alsace20tv.py | 83 + .../yt_dlp/extractor/alura.py | 16 +- .../yt_dlp/extractor/amara.py | 3 - .../yt_dlp/extractor/amazon.py | 135 +- .../yt_dlp/extractor/amazonminitv.py | 290 + .../yt_dlp/extractor/amcnetworks.py | 6 +- .../yt_dlp/extractor/americastestkitchen.py | 57 +- .../youtube_download/yt_dlp/extractor/amp.py | 7 +- .../yt_dlp/extractor/angel.py | 56 + .../yt_dlp/extractor/animelab.py | 285 - .../yt_dlp/extractor/animeondemand.py | 291 - .../yt_dlp/extractor/ant1newsgr.py | 128 + .../yt_dlp/extractor/anvato.py | 233 +- .../anvato_token_generator/__init__.py | 7 - .../anvato_token_generator/common.py | 6 - .../extractor/anvato_token_generator/nfl.py | 30 - .../youtube_download/yt_dlp/extractor/aol.py | 6 +- .../youtube_download/yt_dlp/extractor/apa.py | 15 +- .../yt_dlp/extractor/aparat.py | 5 +- .../yt_dlp/extractor/appleconnect.py | 3 - .../yt_dlp/extractor/applepodcasts.py | 51 +- .../yt_dlp/extractor/appletrailers.py | 5 - .../yt_dlp/extractor/archiveorg.py | 682 +- .../yt_dlp/extractor/arcpublishing.py | 11 +- .../youtube_download/yt_dlp/extractor/ard.py | 45 +- .../yt_dlp/extractor/arkena.py | 17 +- .../yt_dlp/extractor/arnes.py | 6 +- .../youtube_download/yt_dlp/extractor/arte.py | 405 +- .../yt_dlp/extractor/asiancrush.py | 6 +- .../yt_dlp/extractor/atresplayer.py | 14 +- .../yt_dlp/extractor/atscaleconf.py | 34 + .../yt_dlp/extractor/atttechchannel.py | 2 - .../yt_dlp/extractor/atvat.py | 10 +- .../yt_dlp/extractor/audimedia.py | 4 - .../yt_dlp/extractor/audioboom.py | 76 +- .../yt_dlp/extractor/audiodraft.py | 93 + .../yt_dlp/extractor/audiomack.py | 4 +- .../yt_dlp/extractor/audius.py | 11 +- .../yt_dlp/extractor/awaan.py | 5 +- .../youtube_download/yt_dlp/extractor/aws.py | 5 +- .../yt_dlp/extractor/azmedien.py | 13 +- .../yt_dlp/extractor/baidu.py | 4 - .../yt_dlp/extractor/banbye.py | 148 + .../yt_dlp/extractor/bandaichannel.py | 7 +- .../yt_dlp/extractor/bandcamp.py | 138 +- .../yt_dlp/extractor/bannedvideo.py | 3 - .../youtube_download/yt_dlp/extractor/bbc.py | 119 +- .../yt_dlp/extractor/beatbump.py | 101 + .../yt_dlp/extractor/beatport.py | 4 - .../youtube_download/yt_dlp/extractor/beeg.py | 125 +- .../yt_dlp/extractor/behindkink.py | 4 - .../yt_dlp/extractor/bellmedia.py | 14 +- .../yt_dlp/extractor/berufetv.py | 70 + .../youtube_download/yt_dlp/extractor/bet.py | 2 - .../youtube_download/yt_dlp/extractor/bfi.py | 3 - .../yt_dlp/extractor/bfmtv.py | 24 +- .../yt_dlp/extractor/bibeltv.py | 3 - .../yt_dlp/extractor/bigflix.py | 5 - .../youtube_download/yt_dlp/extractor/bigo.py | 56 + .../youtube_download/yt_dlp/extractor/bild.py | 3 - .../yt_dlp/extractor/bilibili.py | 1247 ++- .../yt_dlp/extractor/biobiochiletv.py | 3 - .../yt_dlp/extractor/biqle.py | 97 +- .../yt_dlp/extractor/bitchute.py | 279 +- .../yt_dlp/extractor/bitwave.py | 3 - .../yt_dlp/extractor/blackboardcollaborate.py | 4 - .../yt_dlp/extractor/bleacherreport.py | 3 - .../yt_dlp/extractor/blinkx.py | 86 - .../yt_dlp/extractor/blogger.py | 11 +- .../yt_dlp/extractor/bloomberg.py | 14 +- .../yt_dlp/extractor/bokecc.py | 6 - .../yt_dlp/extractor/bongacams.py | 21 +- .../yt_dlp/extractor/booyah.py | 86 + .../yt_dlp/extractor/bostonglobe.py | 3 - .../youtube_download/yt_dlp/extractor/box.py | 5 - .../youtube_download/yt_dlp/extractor/bpb.py | 6 - .../youtube_download/yt_dlp/extractor/br.py | 10 +- .../yt_dlp/extractor/bravotv.py | 3 - .../yt_dlp/extractor/breakcom.py | 4 - .../yt_dlp/extractor/breitbart.py | 7 +- .../yt_dlp/extractor/brightcove.py | 542 +- .../yt_dlp/extractor/bundesliga.py | 34 + .../yt_dlp/extractor/businessinsider.py | 3 - .../yt_dlp/extractor/buzzfeed.py | 5 +- .../yt_dlp/extractor/byutv.py | 4 - .../youtube_download/yt_dlp/extractor/c56.py | 5 - .../yt_dlp/extractor/cableav.py | 2 - .../yt_dlp/extractor/callin.py | 6 +- .../yt_dlp/extractor/caltrans.py | 37 + .../youtube_download/yt_dlp/extractor/cam4.py | 4 - .../yt_dlp/extractor/camdemy.py | 3 - .../yt_dlp/extractor/cammodels.py | 4 - .../yt_dlp/extractor/camsoda.py | 57 + .../yt_dlp/extractor/camtasia.py | 71 + .../yt_dlp/extractor/camwithher.py | 2 - .../yt_dlp/extractor/canalalpha.py | 4 - .../yt_dlp/extractor/canalc2.py | 5 - .../yt_dlp/extractor/canalplus.py | 5 - .../yt_dlp/extractor/canvas.py | 19 +- .../yt_dlp/extractor/carambatv.py | 4 - .../yt_dlp/extractor/cartoonnetwork.py | 3 - .../youtube_download/yt_dlp/extractor/cbc.py | 15 +- .../youtube_download/yt_dlp/extractor/cbs.py | 33 +- .../yt_dlp/extractor/cbsinteractive.py | 6 +- .../yt_dlp/extractor/cbslocal.py | 7 +- .../yt_dlp/extractor/cbsnews.py | 8 +- .../yt_dlp/extractor/cbssports.py | 4 - .../youtube_download/yt_dlp/extractor/ccc.py | 5 +- .../youtube_download/yt_dlp/extractor/ccma.py | 17 +- .../youtube_download/yt_dlp/extractor/cctv.py | 5 - .../youtube_download/yt_dlp/extractor/cda.py | 136 +- .../yt_dlp/extractor/cellebrite.py | 63 + .../yt_dlp/extractor/ceskatelevize.py | 77 +- .../youtube_download/yt_dlp/extractor/cgtn.py | 3 - .../yt_dlp/extractor/channel9.py | 10 +- .../yt_dlp/extractor/charlierose.py | 4 - .../yt_dlp/extractor/chaturbate.py | 3 - .../yt_dlp/extractor/chilloutzone.py | 2 - .../yt_dlp/extractor/chingari.py | 18 +- .../yt_dlp/extractor/chirbit.py | 3 - .../yt_dlp/extractor/cinchcast.py | 6 +- .../yt_dlp/extractor/cinemax.py | 4 - .../yt_dlp/extractor/cinetecamilano.py | 61 + .../yt_dlp/extractor/ciscolive.py | 3 - .../yt_dlp/extractor/ciscowebex.py | 34 +- .../youtube_download/yt_dlp/extractor/cjsw.py | 4 - .../yt_dlp/extractor/cliphunter.py | 3 - .../yt_dlp/extractor/clippit.py | 4 - .../yt_dlp/extractor/cliprs.py | 3 - .../yt_dlp/extractor/clipsyndicate.py | 2 - .../yt_dlp/extractor/closertotruth.py | 6 +- .../yt_dlp/extractor/cloudflarestream.py | 16 +- .../yt_dlp/extractor/cloudy.py | 3 - .../yt_dlp/extractor/clubic.py | 4 - .../youtube_download/yt_dlp/extractor/clyp.py | 46 +- .../youtube_download/yt_dlp/extractor/cmt.py | 4 +- .../youtube_download/yt_dlp/extractor/cnbc.py | 4 - .../youtube_download/yt_dlp/extractor/cnn.py | 60 +- .../yt_dlp/extractor/comedycentral.py | 2 - .../yt_dlp/extractor/common.py | 1657 +-- .../yt_dlp/extractor/commonmistakes.py | 12 +- .../yt_dlp/extractor/commonprotocols.py | 8 +- .../yt_dlp/extractor/condenast.py | 9 +- .../yt_dlp/extractor/contv.py | 5 - .../yt_dlp/extractor/corus.py | 7 +- .../youtube_download/yt_dlp/extractor/coub.py | 5 - .../yt_dlp/extractor/cozytv.py | 3 - .../youtube_download/yt_dlp/extractor/cpac.py | 136 + .../yt_dlp/extractor/cracked.py | 2 - .../yt_dlp/extractor/crackle.py | 4 - .../yt_dlp/extractor/craftsy.py | 68 + .../yt_dlp/extractor/crooksandliars.py | 5 +- .../yt_dlp/extractor/crowdbunker.py | 4 - .../yt_dlp/extractor/crunchyroll.py | 971 +- .../yt_dlp/extractor/cspan.py | 54 +- .../yt_dlp/extractor/ctsnews.py | 3 - .../youtube_download/yt_dlp/extractor/ctv.py | 3 - .../yt_dlp/extractor/ctvnews.py | 3 - .../yt_dlp/extractor/cultureunplugged.py | 2 - .../yt_dlp/extractor/curiositystream.py | 32 +- .../youtube_download/yt_dlp/extractor/cwtv.py | 4 +- .../yt_dlp/extractor/cybrary.py | 144 + .../yt_dlp/extractor/daftsex.py | 102 +- .../yt_dlp/extractor/dailymail.py | 13 +- .../yt_dlp/extractor/dailymotion.py | 63 +- .../yt_dlp/extractor/dailywire.py | 113 + .../yt_dlp/extractor/damtomo.py | 4 - .../youtube_download/yt_dlp/extractor/daum.py | 6 +- .../yt_dlp/extractor/daystar.py | 47 + .../youtube_download/yt_dlp/extractor/dbtv.py | 12 +- .../youtube_download/yt_dlp/extractor/dctp.py | 3 - .../yt_dlp/extractor/deezer.py | 4 - .../yt_dlp/extractor/defense.py | 2 - .../yt_dlp/extractor/democracynow.py | 5 - .../yt_dlp/extractor/detik.py | 159 + .../yt_dlp/extractor/deuxm.py | 76 + .../youtube_download/yt_dlp/extractor/dfb.py | 4 - .../youtube_download/yt_dlp/extractor/dhm.py | 2 - .../youtube_download/yt_dlp/extractor/digg.py | 2 - .../yt_dlp/extractor/digitalconcerthall.py | 14 +- .../yt_dlp/extractor/digiteka.py | 16 +- .../yt_dlp/extractor/discovery.py | 4 +- .../yt_dlp/extractor/discoverygo.py | 3 - .../yt_dlp/extractor/discoveryvr.py | 59 - .../yt_dlp/extractor/disney.py | 4 - .../yt_dlp/extractor/dispeak.py | 3 - .../yt_dlp/extractor/dlive.py | 4 - .../yt_dlp/extractor/doodstream.py | 76 - .../yt_dlp/extractor/dotsub.py | 2 - .../yt_dlp/extractor/douyutv.py | 58 +- .../yt_dlp/extractor/dplay.py | 83 +- .../yt_dlp/extractor/drbonanza.py | 4 - .../yt_dlp/extractor/dreisat.py | 4 +- .../yt_dlp/extractor/drooble.py | 3 - .../yt_dlp/extractor/dropbox.py | 8 +- .../yt_dlp/extractor/dropout.py | 36 +- .../yt_dlp/extractor/drtuber.py | 10 +- .../youtube_download/yt_dlp/extractor/drtv.py | 161 +- .../yt_dlp/extractor/dtube.py | 3 - .../yt_dlp/extractor/duboku.py | 53 +- .../yt_dlp/extractor/dumpert.py | 4 - .../youtube_download/yt_dlp/extractor/dvtv.py | 4 - .../youtube_download/yt_dlp/extractor/dw.py | 4 - .../yt_dlp/extractor/eagleplatform.py | 39 +- .../yt_dlp/extractor/ebaumsworld.py | 2 - .../yt_dlp/extractor/echomsk.py | 3 - .../yt_dlp/extractor/egghead.py | 4 - .../youtube_download/yt_dlp/extractor/ehow.py | 2 - .../yt_dlp/extractor/eighttracks.py | 3 - .../yt_dlp/extractor/einthusan.py | 5 - .../youtube_download/yt_dlp/extractor/eitb.py | 5 - .../yt_dlp/extractor/ellentube.py | 7 +- .../yt_dlp/extractor/elonet.py | 87 +- .../yt_dlp/extractor/elpais.py | 3 - .../yt_dlp/extractor/embedly.py | 74 +- .../yt_dlp/extractor/engadget.py | 12 - .../yt_dlp/extractor/epicon.py | 4 - .../yt_dlp/extractor/epoch.py | 55 + .../yt_dlp/extractor/eporner.py | 5 - .../yt_dlp/extractor/eroprofile.py | 11 +- .../yt_dlp/extractor/ertgr.py | 26 +- .../yt_dlp/extractor/escapist.py | 3 - .../youtube_download/yt_dlp/extractor/espn.py | 165 +- .../youtube_download/yt_dlp/extractor/esri.py | 4 - .../yt_dlp/extractor/europa.py | 88 +- .../yt_dlp/extractor/europeantour.py | 3 - .../yt_dlp/extractor/eurosport.py | 97 + .../yt_dlp/extractor/euscreen.py | 4 - .../yt_dlp/extractor/everyonesmixtape.py | 76 - .../yt_dlp/extractor/expotv.py | 3 - .../yt_dlp/extractor/expressen.py | 21 +- .../yt_dlp/extractor/extractors.py | 2092 +--- .../yt_dlp/extractor/extremetube.py | 4 +- .../yt_dlp/extractor/eyedotv.py | 3 - .../yt_dlp/extractor/facebook.py | 97 +- .../yt_dlp/extractor/fancode.py | 43 +- .../youtube_download/yt_dlp/extractor/faz.py | 4 - .../youtube_download/yt_dlp/extractor/fc2.py | 154 +- .../yt_dlp/extractor/fczenit.py | 5 - .../youtube_download/yt_dlp/extractor/fifa.py | 83 + .../yt_dlp/extractor/filmmodu.py | 5 - .../yt_dlp/extractor/filmon.py | 5 - .../yt_dlp/extractor/filmweb.py | 3 - .../yt_dlp/extractor/firsttv.py | 4 - .../yt_dlp/extractor/fivemin.py | 54 - .../yt_dlp/extractor/fivetv.py | 7 +- .../yt_dlp/extractor/flickr.py | 5 +- .../yt_dlp/extractor/folketinget.py | 4 - .../yt_dlp/extractor/footyroom.py | 3 - .../yt_dlp/extractor/formula1.py | 3 - .../yt_dlp/extractor/fourtube.py | 3 - .../yt_dlp/extractor/fourzerostudio.py | 106 + .../youtube_download/yt_dlp/extractor/fox.py | 10 +- .../youtube_download/yt_dlp/extractor/fox9.py | 3 - .../yt_dlp/extractor/foxgay.py | 9 +- .../yt_dlp/extractor/foxnews.py | 43 +- .../yt_dlp/extractor/foxsports.py | 58 +- .../yt_dlp/extractor/fptplay.py | 117 + .../yt_dlp/extractor/franceculture.py | 73 - .../yt_dlp/extractor/franceinter.py | 3 - .../yt_dlp/extractor/francetv.py | 12 +- .../yt_dlp/extractor/freesound.py | 4 +- .../yt_dlp/extractor/freespeech.py | 2 - .../yt_dlp/extractor/freetv.py | 139 + .../yt_dlp/extractor/freshlive.py | 80 - .../yt_dlp/extractor/frontendmasters.py | 17 +- .../yt_dlp/extractor/fujitv.py | 31 +- .../yt_dlp/extractor/funimation.py | 31 +- .../youtube_download/yt_dlp/extractor/funk.py | 4 - .../yt_dlp/extractor/fusion.py | 3 - .../yt_dlp/extractor/fuyintv.py | 30 + .../yt_dlp/extractor/fxnetworks.py | 77 - .../youtube_download/yt_dlp/extractor/gab.py | 6 - .../youtube_download/yt_dlp/extractor/gaia.py | 35 +- .../yt_dlp/extractor/gameinformer.py | 3 - .../yt_dlp/extractor/gamejolt.py | 3 +- .../yt_dlp/extractor/gamespot.py | 4 - .../yt_dlp/extractor/gamestar.py | 4 - .../yt_dlp/extractor/gaskrank.py | 4 - .../yt_dlp/extractor/gazeta.py | 4 - .../yt_dlp/extractor/gdcvault.py | 2 - .../yt_dlp/extractor/gedidigital.py | 36 +- .../yt_dlp/extractor/generic.py | 2269 +---- .../yt_dlp/extractor/genericembeds.py | 114 + .../yt_dlp/extractor/genius.py | 127 + .../yt_dlp/extractor/gettr.py | 162 +- .../yt_dlp/extractor/gfycat.py | 17 +- .../yt_dlp/extractor/giantbomb.py | 4 - .../youtube_download/yt_dlp/extractor/giga.py | 13 +- .../yt_dlp/extractor/gigya.py | 2 - .../yt_dlp/extractor/glide.py | 7 +- .../yt_dlp/extractor/globo.py | 37 +- .../yt_dlp/extractor/glomex.py | 16 +- .../youtube_download/yt_dlp/extractor/go.py | 67 +- .../yt_dlp/extractor/godtube.py | 3 - .../yt_dlp/extractor/gofile.py | 53 +- .../yt_dlp/extractor/golem.py | 4 - .../yt_dlp/extractor/goodgame.py | 57 + .../yt_dlp/extractor/googledrive.py | 68 +- .../yt_dlp/extractor/googlepodcasts.py | 3 - .../yt_dlp/extractor/googlesearch.py | 2 - .../yt_dlp/extractor/goplay.py | 394 + .../yt_dlp/extractor/gopro.py | 5 - .../yt_dlp/extractor/goshgay.py | 3 - .../yt_dlp/extractor/gotostage.py | 3 - .../yt_dlp/extractor/gputechconf.py | 3 - .../yt_dlp/extractor/gronkh.py | 76 +- .../yt_dlp/extractor/groupon.py | 2 - .../yt_dlp/extractor/harpodeon.py | 70 + .../youtube_download/yt_dlp/extractor/hbo.py | 4 - .../yt_dlp/extractor/hearthisat.py | 5 - .../yt_dlp/extractor/heise.py | 71 +- .../yt_dlp/extractor/hellporno.py | 6 +- .../yt_dlp/extractor/helsinki.py | 5 - .../yt_dlp/extractor/hentaistigma.py | 2 - .../youtube_download/yt_dlp/extractor/hgtv.py | 3 - .../yt_dlp/extractor/hidive.py | 62 +- .../yt_dlp/extractor/historicfilms.py | 2 - .../yt_dlp/extractor/hitbox.py | 13 +- .../yt_dlp/extractor/hitrecord.py | 2 - .../yt_dlp/extractor/hketv.py | 4 - .../yt_dlp/extractor/holodex.py | 100 + .../yt_dlp/extractor/hornbunny.py | 49 - .../yt_dlp/extractor/hotnewhiphop.py | 2 - .../yt_dlp/extractor/hotstar.py | 331 +- .../yt_dlp/extractor/howcast.py | 2 - .../yt_dlp/extractor/howstuffworks.py | 4 - .../yt_dlp/extractor/hrfensehen.py | 58 +- .../youtube_download/yt_dlp/extractor/hrti.py | 19 +- .../youtube_download/yt_dlp/extractor/hse.py | 2 - .../yt_dlp/extractor/huajiao.py | 3 - .../yt_dlp/extractor/huffpost.py | 8 +- .../yt_dlp/extractor/hungama.py | 48 +- .../youtube_download/yt_dlp/extractor/huya.py | 134 + .../yt_dlp/extractor/hypem.py | 2 - .../yt_dlp/extractor/hytale.py | 58 + .../yt_dlp/extractor/icareus.py | 179 + .../yt_dlp/extractor/ichinanalive.py | 7 - .../youtube_download/yt_dlp/extractor/ign.py | 4 - .../yt_dlp/extractor/iheart.py | 3 - .../yt_dlp/extractor/iltalehti.py | 51 + .../youtube_download/yt_dlp/extractor/imdb.py | 67 +- .../yt_dlp/extractor/imggaming.py | 19 +- .../yt_dlp/extractor/imgur.py | 6 +- .../youtube_download/yt_dlp/extractor/ina.py | 110 +- .../youtube_download/yt_dlp/extractor/inc.py | 2 - .../yt_dlp/extractor/indavideo.py | 28 +- .../yt_dlp/extractor/infoq.py | 17 +- .../yt_dlp/extractor/instagram.py | 362 +- .../yt_dlp/extractor/internazionale.py | 4 - .../yt_dlp/extractor/internetvideoarchive.py | 3 - .../yt_dlp/extractor/iprima.py | 24 +- .../yt_dlp/extractor/iqiyi.py | 63 +- .../yt_dlp/extractor/ir90tv.py | 42 - .../yt_dlp/extractor/islamchannel.py | 81 + .../yt_dlp/extractor/israelnationalnews.py | 50 + .../yt_dlp/extractor/itprotv.py | 139 + .../youtube_download/yt_dlp/extractor/itv.py | 4 - .../youtube_download/yt_dlp/extractor/ivi.py | 31 +- .../yt_dlp/extractor/ivideon.py | 5 - .../yt_dlp/extractor/iwara.py | 140 +- .../yt_dlp/extractor/ixigua.py | 83 + .../yt_dlp/extractor/izlesene.py | 4 - .../yt_dlp/extractor/jable.py | 103 + .../yt_dlp/extractor/jamendo.py | 41 +- .../yt_dlp/extractor/japandiet.py | 274 + .../yt_dlp/extractor/jeuxvideo.py | 5 - .../yt_dlp/extractor/jixie.py | 47 + .../youtube_download/yt_dlp/extractor/joj.py | 43 +- .../youtube_download/yt_dlp/extractor/jove.py | 3 - .../yt_dlp/extractor/jwplatform.py | 46 +- .../yt_dlp/extractor/kakao.py | 6 +- .../yt_dlp/extractor/kaltura.py | 273 +- .../yt_dlp/extractor/kanal2.py | 66 + .../yt_dlp/extractor/kanalplay.py | 96 - .../yt_dlp/extractor/kankan.py | 48 - .../yt_dlp/extractor/kankanews.py | 48 + .../yt_dlp/extractor/karaoketv.py | 3 - .../yt_dlp/extractor/karrierevideos.py | 3 - .../yt_dlp/extractor/keezmovies.py | 11 +- .../yt_dlp/extractor/kelbyone.py | 4 - .../yt_dlp/extractor/ketnet.py | 2 - .../yt_dlp/extractor/khanacademy.py | 19 +- .../youtube_download/yt_dlp/extractor/kick.py | 127 + .../yt_dlp/extractor/kicker.py | 55 + .../yt_dlp/extractor/kickstarter.py | 3 - .../yt_dlp/extractor/kinja.py | 17 +- .../yt_dlp/extractor/kinopoisk.py | 4 - .../yt_dlp/extractor/kompas.py | 26 + .../yt_dlp/extractor/konserthusetplay.py | 5 - .../youtube_download/yt_dlp/extractor/koo.py | 3 - .../yt_dlp/extractor/krasview.py | 3 - .../youtube_download/yt_dlp/extractor/kth.py | 28 + .../youtube_download/yt_dlp/extractor/ku6.py | 2 - .../youtube_download/yt_dlp/extractor/kusi.py | 10 +- .../youtube_download/yt_dlp/extractor/kuwo.py | 6 - .../youtube_download/yt_dlp/extractor/la7.py | 62 +- .../yt_dlp/extractor/laola1tv.py | 6 +- .../yt_dlp/extractor/lastfm.py | 126 + .../yt_dlp/extractor/lazy_extractors.py | 8845 ----------------- .../youtube_download/yt_dlp/extractor/lbry.py | 89 +- .../youtube_download/yt_dlp/extractor/lci.py | 32 +- .../youtube_download/yt_dlp/extractor/lcp.py | 5 +- .../yt_dlp/extractor/lecture2go.py | 5 - .../yt_dlp/extractor/lecturio.py | 13 +- .../yt_dlp/extractor/leeco.py | 6 +- .../youtube_download/yt_dlp/extractor/lego.py | 4 - .../yt_dlp/extractor/lemonde.py | 2 - .../yt_dlp/extractor/lenta.py | 3 - .../yt_dlp/extractor/libraryofcongress.py | 5 - .../yt_dlp/extractor/libsyn.py | 5 +- .../yt_dlp/extractor/lifenews.py | 5 - .../yt_dlp/extractor/likee.py | 192 + .../yt_dlp/extractor/limelight.py | 11 +- .../youtube_download/yt_dlp/extractor/line.py | 7 +- .../yt_dlp/extractor/linkedin.py | 22 +- .../yt_dlp/extractor/linuxacademy.py | 17 +- .../yt_dlp/extractor/liputan6.py | 64 + .../yt_dlp/extractor/listennotes.py | 86 + .../youtube_download/yt_dlp/extractor/litv.py | 3 - .../yt_dlp/extractor/livejournal.py | 3 - .../yt_dlp/extractor/livestream.py | 7 +- .../yt_dlp/extractor/livestreamfails.py | 37 + .../yt_dlp/extractor/lnkgo.py | 8 +- .../yt_dlp/extractor/localnews8.py | 4 - .../yt_dlp/extractor/lovehomeporn.py | 3 - .../youtube_download/yt_dlp/extractor/lrt.py | 58 +- .../yt_dlp/extractor/lynda.py | 15 +- .../youtube_download/yt_dlp/extractor/m6.py | 3 - .../yt_dlp/extractor/magentamusik360.py | 3 - .../yt_dlp/extractor/mailru.py | 4 - .../yt_dlp/extractor/mainstreaming.py | 11 +- .../yt_dlp/extractor/malltv.py | 37 +- .../yt_dlp/extractor/mangomolo.py | 31 +- .../yt_dlp/extractor/manoto.py | 5 - .../yt_dlp/extractor/manyvids.py | 121 +- .../yt_dlp/extractor/maoritv.py | 3 - .../yt_dlp/extractor/markiza.py | 3 - .../yt_dlp/extractor/massengeschmacktv.py | 4 - .../yt_dlp/extractor/masters.py | 38 + .../yt_dlp/extractor/matchtv.py | 4 - .../youtube_download/yt_dlp/extractor/mdr.py | 5 - .../yt_dlp/extractor/medaltv.py | 77 +- .../yt_dlp/extractor/mediaite.py | 3 - .../yt_dlp/extractor/mediaklikk.py | 4 - .../yt_dlp/extractor/medialaan.py | 7 +- .../yt_dlp/extractor/mediaset.py | 208 +- .../yt_dlp/extractor/mediasite.py | 30 +- .../yt_dlp/extractor/mediastream.py | 155 + .../yt_dlp/extractor/mediaworksnz.py | 103 + .../yt_dlp/extractor/medici.py | 3 - .../yt_dlp/extractor/megaphone.py | 11 +- .../yt_dlp/extractor/megatvcom.py | 11 +- .../yt_dlp/extractor/meipai.py | 7 +- .../yt_dlp/extractor/melonvod.py | 4 - .../youtube_download/yt_dlp/extractor/meta.py | 3 - .../yt_dlp/extractor/metacafe.py | 16 +- .../yt_dlp/extractor/metacritic.py | 3 - .../yt_dlp/extractor/mgoon.py | 5 - .../youtube_download/yt_dlp/extractor/mgtv.py | 64 +- .../yt_dlp/extractor/miaopai.py | 6 +- .../yt_dlp/extractor/microsoftembed.py | 65 + .../yt_dlp/extractor/microsoftstream.py | 4 - .../extractor/microsoftvirtualacademy.py | 12 +- .../yt_dlp/extractor/mildom.py | 300 +- .../yt_dlp/extractor/minds.py | 8 +- .../yt_dlp/extractor/ministrygrid.py | 2 - .../yt_dlp/extractor/minoto.py | 5 - .../yt_dlp/extractor/miomio.py | 3 - .../yt_dlp/extractor/mirrativ.py | 86 +- .../yt_dlp/extractor/mirrorcouk.py | 98 + .../youtube_download/yt_dlp/extractor/mit.py | 2 - .../yt_dlp/extractor/mitele.py | 5 +- .../yt_dlp/extractor/mixch.py | 12 +- .../yt_dlp/extractor/mixcloud.py | 11 +- .../youtube_download/yt_dlp/extractor/mlb.py | 120 +- .../yt_dlp/extractor/mlssoccer.py | 3 - .../youtube_download/yt_dlp/extractor/mnet.py | 4 - .../yt_dlp/extractor/mocha.py | 64 + .../yt_dlp/extractor/moevideo.py | 4 - .../yt_dlp/extractor/mofosex.py | 13 +- .../yt_dlp/extractor/mojvideo.py | 7 +- .../yt_dlp/extractor/morningstar.py | 4 - .../yt_dlp/extractor/motherless.py | 31 +- .../yt_dlp/extractor/motorsport.py | 12 +- .../yt_dlp/extractor/movieclips.py | 3 - .../yt_dlp/extractor/moviepilot.py | 97 + .../yt_dlp/extractor/moview.py | 43 + .../yt_dlp/extractor/moviezine.py | 6 - .../yt_dlp/extractor/movingimage.py | 2 - .../youtube_download/yt_dlp/extractor/msn.py | 4 - .../youtube_download/yt_dlp/extractor/mtv.py | 26 +- .../yt_dlp/extractor/muenchentv.py | 4 - .../yt_dlp/extractor/murrtube.py | 162 + .../yt_dlp/extractor/musescore.py | 3 - .../yt_dlp/extractor/musicdex.py | 5 +- .../yt_dlp/extractor/mwave.py | 3 - .../yt_dlp/extractor/mxplayer.py | 153 +- .../yt_dlp/extractor/mychannels.py | 4 - .../yt_dlp/extractor/myspace.py | 5 - .../yt_dlp/extractor/myspass.py | 3 - .../youtube_download/yt_dlp/extractor/myvi.py | 13 +- .../yt_dlp/extractor/myvideoge.py | 3 - .../yt_dlp/extractor/myvidster.py | 2 - .../youtube_download/yt_dlp/extractor/n1.py | 5 - .../youtube_download/yt_dlp/extractor/nate.py | 4 - .../yt_dlp/extractor/nationalgeographic.py | 4 +- .../yt_dlp/extractor/naver.py | 193 +- .../youtube_download/yt_dlp/extractor/nba.py | 4 - .../youtube_download/yt_dlp/extractor/nbc.py | 263 +- .../youtube_download/yt_dlp/extractor/ndr.py | 252 +- .../youtube_download/yt_dlp/extractor/ndtv.py | 15 +- .../yt_dlp/extractor/nebula.py | 117 +- .../yt_dlp/extractor/nerdcubed.py | 3 - .../yt_dlp/extractor/neteasemusic.py | 176 +- .../yt_dlp/extractor/netverse.py | 281 + .../yt_dlp/extractor/netzkino.py | 5 - .../yt_dlp/extractor/newgrounds.py | 10 +- .../yt_dlp/extractor/newspicks.py | 53 + .../yt_dlp/extractor/newstube.py | 4 - .../yt_dlp/extractor/newsy.py | 4 - .../yt_dlp/extractor/nextmedia.py | 7 +- .../youtube_download/yt_dlp/extractor/nexx.py | 25 +- .../youtube_download/yt_dlp/extractor/nfb.py | 58 + .../yt_dlp/extractor/nfhsnetwork.py | 7 +- .../youtube_download/yt_dlp/extractor/nfl.py | 163 +- .../youtube_download/yt_dlp/extractor/nhk.py | 172 +- .../youtube_download/yt_dlp/extractor/nhl.py | 4 - .../youtube_download/yt_dlp/extractor/nick.py | 6 +- .../yt_dlp/extractor/niconico.py | 806 +- .../yt_dlp/extractor/ninecninemedia.py | 4 - .../yt_dlp/extractor/ninegag.py | 48 +- .../yt_dlp/extractor/ninenow.py | 3 - .../yt_dlp/extractor/nintendo.py | 3 - .../yt_dlp/extractor/nitter.py | 218 +- .../yt_dlp/extractor/njpwworld.py | 15 +- .../yt_dlp/extractor/nobelprize.py | 4 - .../youtube_download/yt_dlp/extractor/noco.py | 235 - .../yt_dlp/extractor/noice.py | 116 + .../yt_dlp/extractor/nonktube.py | 2 - .../yt_dlp/extractor/noodlemagazine.py | 5 - .../yt_dlp/extractor/noovo.py | 3 - .../yt_dlp/extractor/normalboots.py | 3 - .../yt_dlp/extractor/nosnl.py | 115 + .../yt_dlp/extractor/nosvideo.py | 3 - .../youtube_download/yt_dlp/extractor/nova.py | 5 - .../yt_dlp/extractor/novaplay.py | 54 +- .../yt_dlp/extractor/nowness.py | 3 - .../youtube_download/yt_dlp/extractor/noz.py | 11 +- .../youtube_download/yt_dlp/extractor/npo.py | 324 +- .../youtube_download/yt_dlp/extractor/npr.py | 23 +- .../youtube_download/yt_dlp/extractor/nrk.py | 29 +- .../youtube_download/yt_dlp/extractor/nrl.py | 3 - .../yt_dlp/extractor/ntvcojp.py | 3 - .../yt_dlp/extractor/ntvde.py | 4 - .../yt_dlp/extractor/ntvru.py | 4 - .../yt_dlp/extractor/nuevo.py | 3 - .../yt_dlp/extractor/nuvid.py | 52 +- .../yt_dlp/extractor/nytimes.py | 5 +- .../yt_dlp/extractor/nzherald.py | 49 +- .../yt_dlp/extractor/nzonscreen.py | 93 + .../youtube_download/yt_dlp/extractor/nzz.py | 3 - .../yt_dlp/extractor/odatv.py | 3 - .../yt_dlp/extractor/odnoklassniki.py | 140 +- .../youtube_download/yt_dlp/extractor/oftv.py | 54 + .../yt_dlp/extractor/oktoberfesttv.py | 3 - .../yt_dlp/extractor/olympics.py | 6 +- .../youtube_download/yt_dlp/extractor/on24.py | 4 - .../youtube_download/yt_dlp/extractor/once.py | 5 +- .../yt_dlp/extractor/ondemandkorea.py | 25 +- .../yt_dlp/extractor/onefootball.py | 4 - .../yt_dlp/extractor/onenewsnz.py | 111 + .../yt_dlp/extractor/oneplace.py | 43 + .../youtube_download/yt_dlp/extractor/onet.py | 4 - .../yt_dlp/extractor/onionstudios.py | 13 +- .../yt_dlp/extractor/ooyala.py | 27 +- .../yt_dlp/extractor/opencast.py | 5 - .../yt_dlp/extractor/openload.py | 109 +- .../yt_dlp/extractor/openrec.py | 103 +- .../youtube_download/yt_dlp/extractor/ora.py | 4 - .../youtube_download/yt_dlp/extractor/orf.py | 291 +- .../yt_dlp/extractor/outsidetv.py | 3 - .../yt_dlp/extractor/packtpub.py | 7 +- .../yt_dlp/extractor/palcomp3.py | 4 - .../yt_dlp/extractor/pandoratv.py | 5 - .../yt_dlp/extractor/panopto.py | 600 ++ .../yt_dlp/extractor/paramountplus.py | 98 +- .../yt_dlp/extractor/parler.py | 111 + .../yt_dlp/extractor/parliamentliveuk.py | 80 - .../yt_dlp/extractor/parlview.py | 4 - .../yt_dlp/extractor/patreon.py | 406 +- .../youtube_download/yt_dlp/extractor/pbs.py | 4 - .../yt_dlp/extractor/pearvideo.py | 13 +- .../yt_dlp/extractor/peekvids.py | 191 + .../yt_dlp/extractor/peertube.py | 25 +- .../yt_dlp/extractor/peertv.py | 5 - .../yt_dlp/extractor/peloton.py | 16 +- .../yt_dlp/extractor/people.py | 3 - .../yt_dlp/extractor/performgroup.py | 5 - .../yt_dlp/extractor/periscope.py | 16 +- .../yt_dlp/extractor/philharmoniedeparis.py | 43 +- .../yt_dlp/extractor/phoenix.py | 3 - .../yt_dlp/extractor/photobucket.py | 2 - .../yt_dlp/extractor/piapro.py | 105 + .../yt_dlp/extractor/picarto.py | 5 - .../yt_dlp/extractor/piksel.py | 15 +- .../yt_dlp/extractor/pinkbike.py | 4 - .../yt_dlp/extractor/pinterest.py | 157 +- .../yt_dlp/extractor/pixivsketch.py | 4 - .../yt_dlp/extractor/pladform.py | 15 +- .../yt_dlp/extractor/planetmarathi.py | 4 - .../yt_dlp/extractor/platzi.py | 13 +- .../yt_dlp/extractor/playfm.py | 4 - .../yt_dlp/extractor/playplustv.py | 16 +- .../yt_dlp/extractor/plays.py | 4 - .../yt_dlp/extractor/playstuff.py | 2 - .../yt_dlp/extractor/playsuisse.py | 147 + .../yt_dlp/extractor/playtvak.py | 4 - .../yt_dlp/extractor/playvid.py | 19 +- .../yt_dlp/extractor/playwire.py | 6 +- .../yt_dlp/extractor/pluralsight.py | 13 +- .../yt_dlp/extractor/plutotv.py | 17 +- .../yt_dlp/extractor/podbayfm.py | 75 + .../yt_dlp/extractor/podchaser.py | 97 + .../yt_dlp/extractor/podomatic.py | 2 - .../yt_dlp/extractor/pokemon.py | 4 - .../yt_dlp/extractor/pokergo.py | 13 +- .../yt_dlp/extractor/polsatgo.py | 4 - .../yt_dlp/extractor/polskieradio.py | 218 +- .../yt_dlp/extractor/popcorntimes.py | 11 +- .../yt_dlp/extractor/popcorntv.py | 3 - .../yt_dlp/extractor/porn91.py | 92 +- .../yt_dlp/extractor/porncom.py | 4 - .../yt_dlp/extractor/pornez.py | 7 +- .../yt_dlp/extractor/pornflip.py | 4 - .../yt_dlp/extractor/pornhd.py | 4 - .../yt_dlp/extractor/pornhub.py | 47 +- .../yt_dlp/extractor/pornotube.py | 2 - .../yt_dlp/extractor/pornovoisines.py | 5 - .../yt_dlp/extractor/pornoxo.py | 3 - .../yt_dlp/extractor/prankcast.py | 66 + .../yt_dlp/extractor/premiershiprugby.py | 39 + .../yt_dlp/extractor/presstv.py | 4 - .../yt_dlp/extractor/projectveritas.py | 4 - .../yt_dlp/extractor/prosiebensat1.py | 4 - .../youtube_download/yt_dlp/extractor/prx.py | 3 - .../yt_dlp/extractor/puhutv.py | 4 - .../yt_dlp/extractor/puls4.py | 10 +- .../yt_dlp/extractor/pyvideo.py | 2 - .../yt_dlp/extractor/qingting.py | 47 + .../yt_dlp/extractor/qqmusic.py | 4 - .../youtube_download/yt_dlp/extractor/r7.py | 4 - .../yt_dlp/extractor/radiko.py | 101 +- .../yt_dlp/extractor/radiobremen.py | 4 - .../yt_dlp/extractor/radiocanada.py | 5 - .../yt_dlp/extractor/radiode.py | 3 - .../yt_dlp/extractor/radiofrance.py | 53 +- .../yt_dlp/extractor/radiojavan.py | 3 - .../yt_dlp/extractor/radiokapital.py | 2 - .../yt_dlp/extractor/radiozet.py | 1 - .../yt_dlp/extractor/radlive.py | 7 +- .../youtube_download/yt_dlp/extractor/rai.py | 254 +- .../yt_dlp/extractor/raywenderlich.py | 2 - .../yt_dlp/extractor/rbmaradio.py | 3 - .../youtube_download/yt_dlp/extractor/rcs.py | 465 +- .../youtube_download/yt_dlp/extractor/rcti.py | 5 - .../youtube_download/yt_dlp/extractor/rds.py | 3 - .../yt_dlp/extractor/redbee.py | 379 + .../yt_dlp/extractor/redbulltv.py | 7 +- .../yt_dlp/extractor/reddit.py | 144 +- .../yt_dlp/extractor/redgifs.py | 40 +- .../yt_dlp/extractor/redtube.py | 12 +- .../yt_dlp/extractor/regiotv.py | 3 - .../yt_dlp/extractor/rentv.py | 4 - .../yt_dlp/extractor/restudy.py | 4 - .../yt_dlp/extractor/reuters.py | 4 - .../yt_dlp/extractor/reverbnation.py | 2 - .../youtube_download/yt_dlp/extractor/rice.py | 4 - .../yt_dlp/extractor/rmcdecouverte.py | 4 - .../yt_dlp/extractor/ro220.py | 43 - .../yt_dlp/extractor/rockstargames.py | 5 - .../yt_dlp/extractor/rokfin.py | 409 + .../yt_dlp/extractor/roosterteeth.py | 10 +- .../yt_dlp/extractor/rottentomatoes.py | 2 - .../yt_dlp/extractor/roxwel.py | 52 - .../yt_dlp/extractor/rozhlas.py | 143 +- .../youtube_download/yt_dlp/extractor/rtbf.py | 159 - .../youtube_download/yt_dlp/extractor/rte.py | 5 - .../youtube_download/yt_dlp/extractor/rtl2.py | 6 - .../yt_dlp/extractor/rtlnl.py | 156 +- .../yt_dlp/extractor/rtnews.py | 3 - .../youtube_download/yt_dlp/extractor/rtp.py | 3 - .../yt_dlp/extractor/rtrfm.py | 2 - .../youtube_download/yt_dlp/extractor/rts.py | 6 +- .../youtube_download/yt_dlp/extractor/rtve.py | 31 +- .../yt_dlp/extractor/rtvnh.py | 4 - .../youtube_download/yt_dlp/extractor/rtvs.py | 74 +- .../yt_dlp/extractor/rtvslo.py | 150 + .../youtube_download/yt_dlp/extractor/ruhd.py | 3 - .../yt_dlp/extractor/rule34video.py | 6 +- .../yt_dlp/extractor/rumble.py | 296 +- .../yt_dlp/extractor/rutube.py | 44 +- .../youtube_download/yt_dlp/extractor/rutv.py | 44 +- .../yt_dlp/extractor/ruutu.py | 53 +- .../youtube_download/yt_dlp/extractor/ruv.py | 91 +- .../yt_dlp/extractor/safari.py | 12 +- .../yt_dlp/extractor/saitosan.py | 4 - .../yt_dlp/extractor/samplefocus.py | 3 - .../youtube_download/yt_dlp/extractor/sapo.py | 5 - .../yt_dlp/extractor/savefrom.py | 3 - .../youtube_download/yt_dlp/extractor/sbs.py | 16 +- .../yt_dlp/extractor/screen9.py | 62 + .../yt_dlp/extractor/screencast.py | 14 +- .../yt_dlp/extractor/screencastify.py | 52 + .../yt_dlp/extractor/screencastomatic.py | 27 +- .../yt_dlp/extractor/scrippsnetworks.py | 3 - .../yt_dlp/extractor/scrolller.py | 102 + .../youtube_download/yt_dlp/extractor/scte.py | 11 +- .../yt_dlp/extractor/seeker.py | 3 - .../yt_dlp/extractor/senategov.py | 17 +- .../yt_dlp/extractor/sendtonews.py | 13 +- .../yt_dlp/extractor/servus.py | 173 +- .../yt_dlp/extractor/sevenplus.py | 7 +- .../youtube_download/yt_dlp/extractor/sexu.py | 3 - .../yt_dlp/extractor/seznamzpravy.py | 16 +- .../yt_dlp/extractor/shahid.py | 12 +- .../yt_dlp/extractor/shared.py | 13 +- .../yt_dlp/extractor/sharevideos.py | 6 + .../yt_dlp/extractor/shemaroome.py | 4 - .../yt_dlp/extractor/showroomlive.py | 4 - .../yt_dlp/extractor/sibnet.py | 17 + .../yt_dlp/extractor/simplecast.py | 19 +- .../youtube_download/yt_dlp/extractor/sina.py | 5 - .../yt_dlp/extractor/sixplay.py | 5 - .../youtube_download/yt_dlp/extractor/skeb.py | 3 - .../youtube_download/yt_dlp/extractor/sky.py | 3 - .../yt_dlp/extractor/skyit.py | 99 +- .../yt_dlp/extractor/skylinewebcams.py | 3 - .../yt_dlp/extractor/skynewsarabia.py | 3 - .../yt_dlp/extractor/skynewsau.py | 3 - .../yt_dlp/extractor/slideshare.py | 2 - .../yt_dlp/extractor/slideslive.py | 577 +- .../yt_dlp/extractor/slutload.py | 2 - .../yt_dlp/extractor/smotrim.py | 65 + .../yt_dlp/extractor/snotr.py | 4 - .../youtube_download/yt_dlp/extractor/sohu.py | 4 - .../yt_dlp/extractor/sonyliv.py | 50 +- .../yt_dlp/extractor/soundcloud.py | 439 +- .../yt_dlp/extractor/soundgasm.py | 3 - .../yt_dlp/extractor/southpark.py | 54 +- .../yt_dlp/extractor/sovietscloset.py | 18 +- .../yt_dlp/extractor/spankbang.py | 11 +- .../yt_dlp/extractor/spankwire.py | 10 +- .../yt_dlp/extractor/spiegel.py | 5 +- .../yt_dlp/extractor/spiegeltv.py | 17 - .../yt_dlp/extractor/spike.py | 2 - .../yt_dlp/extractor/sport5.py | 5 - .../yt_dlp/extractor/sportbox.py | 13 +- .../yt_dlp/extractor/sportdeutschland.py | 3 - .../yt_dlp/extractor/spotify.py | 53 +- .../yt_dlp/extractor/spreaker.py | 3 - .../yt_dlp/extractor/springboardplatform.py | 14 +- .../yt_dlp/extractor/sprout.py | 3 - .../yt_dlp/extractor/srgssr.py | 5 - .../yt_dlp/extractor/srmediathek.py | 3 - .../yt_dlp/extractor/stanfordoc.py | 2 - .../yt_dlp/extractor/startrek.py | 75 + .../yt_dlp/extractor/startv.py | 3 - .../yt_dlp/extractor/steam.py | 49 +- .../yt_dlp/extractor/stitcher.py | 2 - .../yt_dlp/extractor/storyfire.py | 5 +- .../yt_dlp/extractor/streamable.py | 15 +- .../yt_dlp/extractor/streamanity.py | 4 - .../yt_dlp/extractor/streamcloud.py | 3 - .../yt_dlp/extractor/streamcz.py | 24 +- .../yt_dlp/extractor/streamff.py | 1 - .../yt_dlp/extractor/streetvoice.py | 3 - .../yt_dlp/extractor/stretchinternet.py | 2 - .../yt_dlp/extractor/stripchat.py | 49 +- .../youtube_download/yt_dlp/extractor/stv.py | 8 +- .../yt_dlp/extractor/substack.py | 100 + .../yt_dlp/extractor/sunporno.py | 6 +- .../yt_dlp/extractor/sverigesradio.py | 4 - .../youtube_download/yt_dlp/extractor/svt.py | 37 +- .../yt_dlp/extractor/swearnet.py | 73 + .../yt_dlp/extractor/swrmediathek.py | 4 - .../youtube_download/yt_dlp/extractor/syfy.py | 2 - .../yt_dlp/extractor/syvdk.py | 33 + .../yt_dlp/extractor/sztvhu.py | 3 - .../yt_dlp/extractor/tagesschau.py | 5 - .../youtube_download/yt_dlp/extractor/tass.py | 4 - .../yt_dlp/extractor/tastytrade.py | 43 - .../youtube_download/yt_dlp/extractor/tbs.py | 3 - .../yt_dlp/extractor/tdslifeway.py | 2 - .../yt_dlp/extractor/teachable.py | 19 +- .../yt_dlp/extractor/teachertube.py | 5 - .../yt_dlp/extractor/teachingchannel.py | 2 - .../yt_dlp/extractor/teamcoco.py | 4 - .../yt_dlp/extractor/teamtreehouse.py | 10 +- .../yt_dlp/extractor/techtalks.py | 2 - .../youtube_download/yt_dlp/extractor/ted.py | 8 +- .../yt_dlp/extractor/tele13.py | 4 - .../yt_dlp/extractor/tele5.py | 90 +- .../yt_dlp/extractor/telebruxelles.py | 4 - .../yt_dlp/extractor/telecinco.py | 4 - .../yt_dlp/extractor/telegraaf.py | 9 +- .../yt_dlp/extractor/telegram.py | 136 + .../yt_dlp/extractor/telemb.py | 4 - .../yt_dlp/extractor/telemundo.py | 4 - .../yt_dlp/extractor/telequebec.py | 3 - .../yt_dlp/extractor/teletask.py | 2 - .../yt_dlp/extractor/telewebion.py | 3 - .../yt_dlp/extractor/tempo.py | 53 + .../yt_dlp/extractor/tencent.py | 452 + .../yt_dlp/extractor/tennistv.py | 177 +- .../yt_dlp/extractor/tenplay.py | 46 +- .../yt_dlp/extractor/testurl.py | 56 +- .../youtube_download/yt_dlp/extractor/tf1.py | 3 - .../youtube_download/yt_dlp/extractor/tfo.py | 3 - .../yt_dlp/extractor/theholetv.py | 35 + .../yt_dlp/extractor/theintercept.py | 3 - .../yt_dlp/extractor/theplatform.py | 30 +- .../yt_dlp/extractor/thescene.py | 44 - .../yt_dlp/extractor/thestar.py | 3 - .../yt_dlp/extractor/thesun.py | 2 - .../yt_dlp/extractor/theta.py | 5 - .../yt_dlp/extractor/theweatherchannel.py | 6 +- .../yt_dlp/extractor/thisamericanlife.py | 2 - .../yt_dlp/extractor/thisav.py | 8 +- .../yt_dlp/extractor/thisoldhouse.py | 3 - .../yt_dlp/extractor/thisvid.py | 226 + .../yt_dlp/extractor/threeqsdn.py | 29 +- .../yt_dlp/extractor/threespeak.py | 4 - .../yt_dlp/extractor/tiktok.py | 405 +- .../yt_dlp/extractor/tinypic.py | 2 - .../youtube_download/yt_dlp/extractor/tmz.py | 62 +- .../yt_dlp/extractor/tnaflix.py | 202 +- .../yt_dlp/extractor/toggle.py | 4 - .../yt_dlp/extractor/toggo.py | 11 +- .../yt_dlp/extractor/tokentube.py | 5 - .../yt_dlp/extractor/tonline.py | 3 - .../yt_dlp/extractor/toongoggles.py | 4 - .../yt_dlp/extractor/toutv.py | 12 +- .../yt_dlp/extractor/toypics.py | 3 - .../yt_dlp/extractor/traileraddict.py | 5 +- .../yt_dlp/extractor/triller.py | 294 + .../yt_dlp/extractor/trilulilu.py | 3 - .../yt_dlp/extractor/trovo.py | 308 +- .../yt_dlp/extractor/trtcocuk.py | 48 + .../yt_dlp/extractor/trueid.py | 3 - .../yt_dlp/extractor/trunews.py | 2 - .../yt_dlp/extractor/truth.py | 69 + .../yt_dlp/extractor/trutv.py | 4 - .../yt_dlp/extractor/tube8.py | 11 +- .../yt_dlp/extractor/tubetugraz.py | 233 + .../yt_dlp/extractor/tubitv.py | 77 +- .../yt_dlp/extractor/tudou.py | 49 - .../yt_dlp/extractor/tumblr.py | 353 +- .../yt_dlp/extractor/tunein.py | 11 +- .../yt_dlp/extractor/tunepk.py | 3 - .../yt_dlp/extractor/turbo.py | 4 - .../yt_dlp/extractor/turner.py | 7 +- .../youtube_download/yt_dlp/extractor/tv2.py | 45 +- .../yt_dlp/extractor/tv24ua.py | 78 + .../yt_dlp/extractor/tv2dk.py | 21 +- .../yt_dlp/extractor/tv2hu.py | 3 - .../youtube_download/yt_dlp/extractor/tv4.py | 5 - .../yt_dlp/extractor/tv5mondeplus.py | 4 - .../yt_dlp/extractor/tv5unis.py | 4 - .../youtube_download/yt_dlp/extractor/tva.py | 3 - .../yt_dlp/extractor/tvanouvelles.py | 3 - .../youtube_download/yt_dlp/extractor/tvc.py | 14 +- .../youtube_download/yt_dlp/extractor/tver.py | 121 +- .../yt_dlp/extractor/tvigle.py | 6 +- .../yt_dlp/extractor/tviplayer.py | 78 + .../yt_dlp/extractor/tvland.py | 3 - .../yt_dlp/extractor/tvn24.py | 4 - .../yt_dlp/extractor/tvnet.py | 8 +- .../yt_dlp/extractor/tvnoe.py | 3 - .../yt_dlp/extractor/tvnow.py | 7 +- .../yt_dlp/extractor/tvopengr.py | 33 +- .../youtube_download/yt_dlp/extractor/tvp.py | 280 +- .../yt_dlp/extractor/tvplay.py | 7 - .../yt_dlp/extractor/tvplayer.py | 4 - .../yt_dlp/extractor/tweakers.py | 3 - .../yt_dlp/extractor/twentyfourvideo.py | 4 - .../yt_dlp/extractor/twentymin.py | 13 +- .../yt_dlp/extractor/twentythreevideo.py | 3 - .../yt_dlp/extractor/twitcasting.py | 98 +- .../yt_dlp/extractor/twitch.py | 166 +- .../yt_dlp/extractor/twitter.py | 929 +- .../youtube_download/yt_dlp/extractor/txxx.py | 418 + .../yt_dlp/extractor/udemy.py | 50 +- .../youtube_download/yt_dlp/extractor/udn.py | 6 +- .../yt_dlp/extractor/ufctv.py | 3 - .../yt_dlp/extractor/ukcolumn.py | 2 - .../yt_dlp/extractor/uktvplay.py | 8 +- .../youtube_download/yt_dlp/extractor/umg.py | 4 - .../yt_dlp/extractor/unistra.py | 3 - .../yt_dlp/extractor/unity.py | 2 - .../yt_dlp/extractor/unscripted.py | 53 + .../yt_dlp/extractor/unsupported.py | 143 + .../youtube_download/yt_dlp/extractor/uol.py | 5 - .../yt_dlp/extractor/uplynk.py | 85 +- .../yt_dlp/extractor/urort.py | 13 +- .../yt_dlp/extractor/urplay.py | 85 +- .../yt_dlp/extractor/usanetwork.py | 5 +- .../yt_dlp/extractor/usatoday.py | 3 - .../yt_dlp/extractor/ustream.py | 12 +- .../yt_dlp/extractor/ustudio.py | 5 - .../yt_dlp/extractor/utreon.py | 4 - .../yt_dlp/extractor/varzesh3.py | 6 +- .../yt_dlp/extractor/vbox7.py | 14 +- .../yt_dlp/extractor/veehd.py | 2 - .../youtube_download/yt_dlp/extractor/veo.py | 52 +- .../youtube_download/yt_dlp/extractor/veoh.py | 67 +- .../yt_dlp/extractor/vesti.py | 3 - .../youtube_download/yt_dlp/extractor/vevo.py | 123 +- .../youtube_download/yt_dlp/extractor/vgtv.py | 16 +- .../youtube_download/yt_dlp/extractor/vh1.py | 3 - .../youtube_download/yt_dlp/extractor/vice.py | 17 +- .../yt_dlp/extractor/vidbit.py | 2 - .../yt_dlp/extractor/viddler.py | 6 +- .../yt_dlp/extractor/videa.py | 32 +- .../yt_dlp/extractor/videocampus_sachsen.py | 253 + .../yt_dlp/extractor/videodetective.py | 2 - .../yt_dlp/extractor/videofyme.py | 2 - .../yt_dlp/extractor/videoken.py | 336 + .../yt_dlp/extractor/videomore.py | 25 +- .../yt_dlp/extractor/videopress.py | 13 +- .../yt_dlp/extractor/vidio.py | 51 +- .../yt_dlp/extractor/vidlii.py | 6 +- .../yt_dlp/extractor/vidzi.py | 68 - .../youtube_download/yt_dlp/extractor/vier.py | 264 - .../yt_dlp/extractor/viewlift.py | 16 +- .../yt_dlp/extractor/viidea.py | 3 - .../youtube_download/yt_dlp/extractor/viki.py | 14 +- .../yt_dlp/extractor/vimeo.py | 398 +- .../youtube_download/yt_dlp/extractor/vimm.py | 3 - .../yt_dlp/extractor/vimple.py | 3 - .../youtube_download/yt_dlp/extractor/vine.py | 8 +- .../yt_dlp/extractor/viqeo.py | 15 +- .../youtube_download/yt_dlp/extractor/viu.py | 267 +- .../youtube_download/yt_dlp/extractor/vk.py | 329 +- .../yt_dlp/extractor/vlive.py | 367 - .../yt_dlp/extractor/vodlocker.py | 3 - .../yt_dlp/extractor/vodpl.py | 3 - .../yt_dlp/extractor/vodplatform.py | 5 +- .../yt_dlp/extractor/voicerepublic.py | 3 - .../yt_dlp/extractor/voicy.py | 8 +- .../yt_dlp/extractor/volejtv.py | 40 + .../youtube_download/yt_dlp/extractor/voot.py | 9 +- .../yt_dlp/extractor/voxmedia.py | 6 +- .../youtube_download/yt_dlp/extractor/vrak.py | 3 - .../youtube_download/yt_dlp/extractor/vrt.py | 4 - .../youtube_download/yt_dlp/extractor/vrv.py | 83 +- .../yt_dlp/extractor/vshare.py | 25 +- .../youtube_download/yt_dlp/extractor/vtm.py | 3 - .../youtube_download/yt_dlp/extractor/vube.py | 170 - .../yt_dlp/extractor/vuclip.py | 2 - .../yt_dlp/extractor/vupload.py | 5 +- .../yt_dlp/extractor/vvvvid.py | 21 +- .../yt_dlp/extractor/vyborymos.py | 3 - .../yt_dlp/extractor/vzaar.py | 16 +- .../yt_dlp/extractor/wakanim.py | 3 - .../yt_dlp/extractor/walla.py | 4 - .../yt_dlp/extractor/wasdtv.py | 159 + .../yt_dlp/extractor/washingtonpost.py | 31 +- .../youtube_download/yt_dlp/extractor/wat.py | 7 +- .../yt_dlp/extractor/watchbox.py | 5 - .../yt_dlp/extractor/watchindianporn.py | 3 - .../youtube_download/yt_dlp/extractor/wdr.py | 62 +- .../yt_dlp/extractor/webcamerapl.py | 44 + .../yt_dlp/extractor/webcaster.py | 20 +- .../yt_dlp/extractor/webofstories.py | 5 - .../yt_dlp/extractor/weibo.py | 8 +- .../yt_dlp/extractor/weiqitv.py | 3 - .../yt_dlp/extractor/whowatch.py | 13 +- .../yt_dlp/extractor/wikimedia.py | 55 + .../yt_dlp/extractor/willow.py | 2 - .../yt_dlp/extractor/wimtv.py | 19 +- .../yt_dlp/extractor/wistia.py | 267 +- .../yt_dlp/extractor/wordpress.py | 154 + .../yt_dlp/extractor/worldstarhiphop.py | 2 - .../yt_dlp/extractor/wppilot.py | 10 +- .../yt_dlp/extractor/wrestleuniverse.py | 233 + .../youtube_download/yt_dlp/extractor/wsj.py | 7 +- .../youtube_download/yt_dlp/extractor/wwe.py | 2 - .../yt_dlp/extractor/xanimu.py | 51 + .../youtube_download/yt_dlp/extractor/xbef.py | 2 - .../yt_dlp/extractor/xboxclips.py | 3 - .../yt_dlp/extractor/xfileshare.py | 31 +- .../yt_dlp/extractor/xhamster.py | 27 +- .../yt_dlp/extractor/xiami.py | 201 - .../yt_dlp/extractor/ximalaya.py | 162 +- .../yt_dlp/extractor/xinpianchang.py | 90 + .../yt_dlp/extractor/xminus.py | 3 - .../youtube_download/yt_dlp/extractor/xnxx.py | 9 +- .../yt_dlp/extractor/xstream.py | 4 - .../yt_dlp/extractor/xtube.py | 3 - .../yt_dlp/extractor/xuite.py | 4 - .../yt_dlp/extractor/xvideos.py | 4 - .../yt_dlp/extractor/xxxymovies.py | 3 - .../yt_dlp/extractor/yahoo.py | 139 +- .../yt_dlp/extractor/yandexdisk.py | 4 - .../yt_dlp/extractor/yandexmusic.py | 6 +- .../yt_dlp/extractor/yandexvideo.py | 189 +- .../yt_dlp/extractor/yapfiles.py | 14 +- .../yt_dlp/extractor/yesjapan.py | 3 - .../yt_dlp/extractor/yinyuetai.py | 4 - .../yt_dlp/extractor/yle_areena.py | 113 + .../youtube_download/yt_dlp/extractor/ynet.py | 10 +- .../yt_dlp/extractor/youjizz.py | 6 +- .../yt_dlp/extractor/youku.py | 40 +- .../yt_dlp/extractor/younow.py | 5 +- .../yt_dlp/extractor/youporn.py | 39 +- .../yt_dlp/extractor/yourporn.py | 2 - .../yt_dlp/extractor/yourupload.py | 3 - .../yt_dlp/extractor/youtube.py | 3749 ++++--- .../yt_dlp/extractor/zapiks.py | 5 +- .../youtube_download/yt_dlp/extractor/zaq1.py | 101 - .../yt_dlp/extractor/zattoo.py | 724 +- .../youtube_download/yt_dlp/extractor/zdf.py | 207 +- .../youtube_download/yt_dlp/extractor/zee5.py | 98 +- .../yt_dlp/extractor/zeenews.py | 57 + .../yt_dlp/extractor/zhihu.py | 6 +- .../yt_dlp/extractor/zingmp3.py | 409 +- .../youtube_download/yt_dlp/extractor/zoom.py | 42 +- .../youtube_download/yt_dlp/extractor/zype.py | 13 +- plugins/youtube_download/yt_dlp/jsinterp.py | 791 +- plugins/youtube_download/yt_dlp/minicurses.py | 7 +- plugins/youtube_download/yt_dlp/options.py | 772 +- plugins/youtube_download/yt_dlp/plugins.py | 173 + .../yt_dlp/postprocessor/__init__.py | 18 +- .../yt_dlp/postprocessor/common.py | 71 +- .../yt_dlp/postprocessor/embedthumbnail.py | 79 +- .../yt_dlp/postprocessor/exec.py | 8 +- .../yt_dlp/postprocessor/ffmpeg.py | 555 +- .../yt_dlp/postprocessor/metadataparser.py | 34 +- .../yt_dlp/postprocessor/modify_chapters.py | 39 +- .../postprocessor/movefilesafterdownload.py | 7 +- .../yt_dlp/postprocessor/sponskrub.py | 25 +- .../yt_dlp/postprocessor/sponsorblock.py | 57 +- .../yt_dlp/postprocessor/xattrpp.py | 85 +- plugins/youtube_download/yt_dlp/socks.py | 70 +- plugins/youtube_download/yt_dlp/update.py | 534 +- plugins/youtube_download/yt_dlp/utils.py | 3443 ++++--- plugins/youtube_download/yt_dlp/version.py | 8 +- plugins/youtube_download/yt_dlp/webvtt.py | 61 +- .../solarfm/core/mixins/ui/tab_mixin.py | 2 +- .../SolarFM/solarfm/plugins/plugin_base.py | 14 + .../solarfm/shellfm/windows/tabs/tab.py | 5 +- .../shellfm/windows/tabs/utils/settings.py | 12 +- 1138 files changed, 48878 insertions(+), 40445 deletions(-) create mode 100644 plugins/translate/__init__.py create mode 100644 plugins/translate/__main__.py create mode 100644 plugins/translate/manifest.json create mode 100644 plugins/translate/plugin.py create mode 100644 plugins/translate/translate.glade delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/AUTHORS delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/INSTALLER delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/LICENSE delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/METADATA delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/RECORD delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/REQUESTED delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/WHEEL delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/entry_points.txt delete mode 100644 plugins/youtube_download/yt_dlp-2022.2.4.dist-info/top_level.txt create mode 100644 plugins/youtube_download/yt_dlp/__pyinstaller/__init__.py create mode 100644 plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py delete mode 100644 plugins/youtube_download/yt_dlp/compat.py create mode 100644 plugins/youtube_download/yt_dlp/compat/__init__.py create mode 100644 plugins/youtube_download/yt_dlp/compat/_deprecated.py create mode 100644 plugins/youtube_download/yt_dlp/compat/_legacy.py create mode 100644 plugins/youtube_download/yt_dlp/compat/compat_utils.py create mode 100644 plugins/youtube_download/yt_dlp/compat/functools.py create mode 100644 plugins/youtube_download/yt_dlp/compat/imghdr.py create mode 100644 plugins/youtube_download/yt_dlp/compat/shutil.py create mode 100644 plugins/youtube_download/yt_dlp/dependencies/Cryptodome.py create mode 100644 plugins/youtube_download/yt_dlp/dependencies/__init__.py create mode 100644 plugins/youtube_download/yt_dlp/downloader/fc2.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/_extractors.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/abematv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/acfun.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/aeonco.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/agora.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/airtv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/aitube.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/alsace20tv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/amazonminitv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/angel.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/animelab.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/animeondemand.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/ant1newsgr.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/__init__.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/common.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/nfl.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/atscaleconf.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/audiodraft.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/banbye.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/beatbump.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/berufetv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/bigo.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/blinkx.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/booyah.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/bundesliga.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/caltrans.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/camsoda.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/camtasia.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/cellebrite.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/cinetecamilano.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/cpac.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/craftsy.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/cybrary.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/dailywire.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/daystar.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/detik.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/deuxm.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/discoveryvr.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/doodstream.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/epoch.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/eurosport.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/everyonesmixtape.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/fifa.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/fivemin.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/fourzerostudio.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/fptplay.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/franceculture.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/freetv.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/freshlive.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/fuyintv.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/fxnetworks.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/genericembeds.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/genius.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/goodgame.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/goplay.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/harpodeon.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/holodex.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/hornbunny.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/huya.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/hytale.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/icareus.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/iltalehti.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/ir90tv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/islamchannel.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/israelnationalnews.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/itprotv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/ixigua.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/jable.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/japandiet.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/jixie.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/kanal2.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/kanalplay.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/kankan.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/kankanews.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/kick.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/kicker.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/kompas.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/kth.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/lastfm.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/lazy_extractors.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/likee.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/liputan6.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/listennotes.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/livestreamfails.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/masters.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/mediastream.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/mediaworksnz.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/microsoftembed.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/mirrorcouk.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/mocha.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/moviepilot.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/moview.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/murrtube.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/netverse.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/newspicks.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/nfb.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/noco.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/noice.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/nosnl.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/nzonscreen.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/oftv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/onenewsnz.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/oneplace.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/panopto.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/parler.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/parliamentliveuk.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/peekvids.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/piapro.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/playsuisse.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/podbayfm.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/podchaser.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/prankcast.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/premiershiprugby.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/qingting.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/redbee.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/ro220.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/rokfin.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/roxwel.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/rtbf.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/rtvslo.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/screen9.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/screencastify.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/scrolller.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/sharevideos.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/sibnet.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/smotrim.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/spiegeltv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/startrek.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/substack.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/swearnet.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/syvdk.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/tastytrade.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/telegram.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/tempo.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/tencent.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/theholetv.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/thescene.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/thisvid.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/triller.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/trtcocuk.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/truth.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/tubetugraz.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/tudou.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/tv24ua.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/tviplayer.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/txxx.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/unscripted.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/unsupported.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/videocampus_sachsen.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/videoken.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/vidzi.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/vier.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/vlive.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/volejtv.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/vube.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/wasdtv.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/webcamerapl.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/wikimedia.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/wordpress.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/wrestleuniverse.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/xanimu.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/xiami.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/xinpianchang.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/yle_areena.py delete mode 100644 plugins/youtube_download/yt_dlp/extractor/zaq1.py create mode 100644 plugins/youtube_download/yt_dlp/extractor/zeenews.py create mode 100644 plugins/youtube_download/yt_dlp/plugins.py diff --git a/plugins/archiver/plugin.py b/plugins/archiver/plugin.py index 2ab92cd..a142ee8 100644 --- a/plugins/archiver/plugin.py +++ b/plugins/archiver/plugin.py @@ -1,8 +1,5 @@ # Python imports import os -import threading -import subprocess -import inspect import shlex # Lib imports @@ -14,28 +11,16 @@ from gi.repository import Gtk from plugins.plugin_base import PluginBase -# NOTE: Threads WILL NOT die with parent's destruction. -def threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start() - return wrapper - -# NOTE: Threads WILL die with parent's destruction. -def daemon_threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() - return wrapper - - class Plugin(PluginBase): def __init__(self): super().__init__() - self.path = os.path.dirname(os.path.realpath(__file__)) - self._GLADE_FILE = f"{self.path}/archiver.glade" + self.name = "Archiver" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus # where self.name should not be needed for message comms + self.path = os.path.dirname(os.path.realpath(__file__)) + self._GLADE_FILE = f"{self.path}/archiver.glade" self._archiver_dialogue = None self._arc_command_buffer = None @@ -67,20 +52,9 @@ class Plugin(PluginBase): def generate_reference_ui_element(self): - self._builder = Gtk.Builder() + self._builder = Gtk.Builder() self._builder.add_from_file(self._GLADE_FILE) - - classes = [self] - handlers = {} - for c in classes: - methods = None - try: - methods = inspect.getmembers(c, predicate=inspect.ismethod) - handlers.update(methods) - except Exception as e: - print(repr(e)) - - self._builder.connect_signals(handlers) + self._connect_builder_signals(self, self._builder) self._archiver_dialogue = self._builder.get_object("archiver_dialogue") self._arc_command_buffer = self._builder.get_object("arc_command_buffer") diff --git a/plugins/disk_usage/plugin.py b/plugins/disk_usage/plugin.py index 65b9fa7..3811661 100644 --- a/plugins/disk_usage/plugin.py +++ b/plugins/disk_usage/plugin.py @@ -2,7 +2,6 @@ import os import subprocess import time -import inspect # Lib imports import gi @@ -29,20 +28,9 @@ class Plugin(PluginBase): def run(self): - self._builder = Gtk.Builder() + self._builder = Gtk.Builder() self._builder.add_from_file(self._GLADE_FILE) - - classes = [self] - handlers = {} - for c in classes: - methods = None - try: - methods = inspect.getmembers(c, predicate=inspect.ismethod) - handlers.update(methods) - except Exception as e: - print(repr(e)) - - self._builder.connect_signals(handlers) + self._connect_builder_signals(self, self._builder) self._du_dialog = self._builder.get_object("du_dialog") self._du_store = self._builder.get_object("du_store") diff --git a/plugins/favorites/plugin.py b/plugins/favorites/plugin.py index 585f8b9..a406147 100644 --- a/plugins/favorites/plugin.py +++ b/plugins/favorites/plugin.py @@ -1,6 +1,5 @@ # Python imports import os -import inspect import json # Lib imports @@ -31,20 +30,9 @@ class Plugin(PluginBase): def run(self): - self._builder = Gtk.Builder() + self._builder = Gtk.Builder() self._builder.add_from_file(self._GLADE_FILE) - - classes = [self] - handlers = {} - for c in classes: - methods = None - try: - methods = inspect.getmembers(c, predicate=inspect.ismethod) - handlers.update(methods) - except Exception as e: - print(repr(e)) - - self._builder.connect_signals(handlers) + self._connect_builder_signals(self, self._builder) self._favorites_dialog = self._builder.get_object("favorites_dialog") self._favorites_store = self._builder.get_object("favorites_store") diff --git a/plugins/file_properties/file_properties.glade b/plugins/file_properties/file_properties.glade index 7390e9e..24cdf8c 100644 --- a/plugins/file_properties/file_properties.glade +++ b/plugins/file_properties/file_properties.glade @@ -1,5 +1,5 @@ - + @@ -14,7 +14,6 @@ True True center - True diff --git a/plugins/file_properties/plugin.py b/plugins/file_properties/plugin.py index e29a3dc..1036fcf 100644 --- a/plugins/file_properties/plugin.py +++ b/plugins/file_properties/plugin.py @@ -24,12 +24,6 @@ def threaded(fn): threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start() return wrapper -# NOTE: Threads WILL die with parent's destruction. -def daemon_threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() - return wrapper - @@ -51,10 +45,10 @@ class Plugin(PluginBase): def __init__(self): super().__init__() - self.path = os.path.dirname(os.path.realpath(__file__)) - self._GLADE_FILE = f"{self.path}/file_properties.glade" self.name = "Properties" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus # where self.name should not be needed for message comms + self.path = os.path.dirname(os.path.realpath(__file__)) + self._GLADE_FILE = f"{self.path}/file_properties.glade" self._properties_dialog = None self._file_name = None @@ -91,8 +85,9 @@ class Plugin(PluginBase): def run(self): - self._builder = Gtk.Builder() + self._builder = Gtk.Builder() self._builder.add_from_file(self._GLADE_FILE) + self._connect_builder_signals(self, self._builder) self._properties_dialog = self._builder.get_object("file_properties_dialog") self._file_name = self._builder.get_object("file_name") diff --git a/plugins/git_clone/plugin.py b/plugins/git_clone/plugin.py index bfa2e80..7e870f8 100644 --- a/plugins/git_clone/plugin.py +++ b/plugins/git_clone/plugin.py @@ -1,5 +1,8 @@ # Python imports -import os, threading, subprocess, time +import os +import threading +import subprocess +import time # Lib imports import gi @@ -16,12 +19,6 @@ def threaded(fn): threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start() return wrapper -# NOTE: Threads WILL die with parent's destruction. -def daemon_threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() - return wrapper - @@ -29,9 +26,9 @@ class Plugin(PluginBase): def __init__(self): super().__init__() - self.path = os.path.dirname(os.path.realpath(__file__)) self.name = "Git Clone" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus # where self.name should not be needed for message comms + self.path = os.path.dirname(os.path.realpath(__file__)) def generate_reference_ui_element(self): button = Gtk.Button(label=self.name) diff --git a/plugins/movie_tv_info/plugin.py b/plugins/movie_tv_info/plugin.py index 6bd8c45..a772153 100644 --- a/plugins/movie_tv_info/plugin.py +++ b/plugins/movie_tv_info/plugin.py @@ -2,7 +2,6 @@ import os import threading import subprocess -import inspect import requests import shutil @@ -38,9 +37,9 @@ class Plugin(PluginBase): def __init__(self): super().__init__() - self.path = os.path.dirname(os.path.realpath(__file__)) self.name = "Movie/TV Info" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus # where self.name should not be needed for message comms + self.path = os.path.dirname(os.path.realpath(__file__)) self._GLADE_FILE = f"{self.path}/movie_tv_info.glade" self._dialog = None @@ -53,20 +52,9 @@ class Plugin(PluginBase): def run(self): - self._builder = Gtk.Builder() + self._builder = Gtk.Builder() self._builder.add_from_file(self._GLADE_FILE) - - classes = [self] - handlers = {} - for c in classes: - methods = None - try: - methods = inspect.getmembers(c, predicate=inspect.ismethod) - handlers.update(methods) - except Exception as e: - print(repr(e)) - - self._builder.connect_signals(handlers) + self._connect_builder_signals(self, self._builder) self._thumbnailer_dialog = self._builder.get_object("info_dialog") self._overview = self._builder.get_object("textbuffer") diff --git a/plugins/searcher/plugin.py b/plugins/searcher/plugin.py index 391fe1b..4851166 100644 --- a/plugins/searcher/plugin.py +++ b/plugins/searcher/plugin.py @@ -1,7 +1,5 @@ # Python imports import os -import threading -import inspect import time # Lib imports @@ -18,28 +16,14 @@ from .utils.ipc_server import IPCServer -# NOTE: Threads WILL NOT die with parent's destruction. -def threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start() - return wrapper - -# NOTE: Threads WILL die with parent's destruction. -def daemon_threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() - return wrapper - - - class Plugin(IPCServer, FileSearchMixin, GrepSearchMixin, PluginBase): def __init__(self): super().__init__() - self.path = os.path.dirname(os.path.realpath(__file__)) self.name = "Search" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus # where self.name should not be needed for message comms + self.path = os.path.dirname(os.path.realpath(__file__)) self._GLADE_FILE = f"{self.path}/search_dialog.glade" self.update_list_ui_buffer = () @@ -59,20 +43,9 @@ class Plugin(IPCServer, FileSearchMixin, GrepSearchMixin, PluginBase): def run(self): - self._builder = Gtk.Builder() + self._builder = Gtk.Builder() self._builder.add_from_file(self._GLADE_FILE) - - classes = [self] - handlers = {} - for c in classes: - methods = None - try: - methods = inspect.getmembers(c, predicate=inspect.ismethod) - handlers.update(methods) - except Exception as e: - print(repr(e)) - - self._builder.connect_signals(handlers) + self._connect_builder_signals(self, self._builder) self._search_dialog = self._builder.get_object("search_dialog") self.fsearch = self._builder.get_object("fsearch") diff --git a/plugins/template/plugin.py b/plugins/template/plugin.py index 89822ef..021e2ac 100644 --- a/plugins/template/plugin.py +++ b/plugins/template/plugin.py @@ -2,7 +2,6 @@ import os import threading import subprocess -import ime # Lib imports import gi @@ -32,8 +31,10 @@ class Plugin(PluginBase): def __init__(self): super().__init__() - self.name = "Example Plugin" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus - # where self.name should not be needed for message comms + self.name = "Example Plugin" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus + # where self.name should not be needed for message comms + # self.path = os.path.dirname(os.path.realpath(__file__)) + # self._GLADE_FILE = f"{self.path}/glade_file.glade" def generate_reference_ui_element(self): @@ -42,6 +43,9 @@ class Plugin(PluginBase): return button def run(self): + # self._builder = Gtk.Builder() + # self._builder.add_from_file(self._GLADE_FILE) + # self._connect_builder_signals(self, self._builder) ... def send_message(self, widget=None, eve=None): diff --git a/plugins/translate/__init__.py b/plugins/translate/__init__.py new file mode 100644 index 0000000..d36fa8c --- /dev/null +++ b/plugins/translate/__init__.py @@ -0,0 +1,3 @@ +""" + Pligin Module +""" diff --git a/plugins/translate/__main__.py b/plugins/translate/__main__.py new file mode 100644 index 0000000..a576329 --- /dev/null +++ b/plugins/translate/__main__.py @@ -0,0 +1,3 @@ +""" + Pligin Package +""" diff --git a/plugins/translate/manifest.json b/plugins/translate/manifest.json new file mode 100644 index 0000000..91caabe --- /dev/null +++ b/plugins/translate/manifest.json @@ -0,0 +1,12 @@ +{ + "manifest": { + "name": "Translate", + "author": "ITDominator", + "version": "0.0.1", + "support": "", + "requests": { + "ui_target": "plugin_control_list", + "pass_fm_events": "true" + } + } +} diff --git a/plugins/translate/plugin.py b/plugins/translate/plugin.py new file mode 100644 index 0000000..823b012 --- /dev/null +++ b/plugins/translate/plugin.py @@ -0,0 +1,134 @@ +# Python imports +import os +import time +import threading +import requests + +# Lib imports +import gi +gi.require_version('Gtk', '3.0') +from gi.repository import Gtk +from gi.repository import GLib + +# Application imports +from plugins.plugin_base import PluginBase + + +# NOTE: Threads WILL die with parent's destruction. +def daemon_threaded(fn): + def wrapper(*args, **kwargs): + threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() + return wrapper + + + + +class Plugin(PluginBase): + def __init__(self): + super().__init__() + self.path = os.path.dirname(os.path.realpath(__file__)) + self.name = "Translate" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus + # where self.name should not be needed for message comms + self._GLADE_FILE = f"{self.path}/translate.glade" + + self._link = "https://duckduckgo.com/translation.js?vqd=4-79469202070473384659389009732578528471&query=translate&to=en" + self._headers = { + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:106.0) Gecko/20100101 Firefox/106.0', + 'Accept': '*/*', + 'Accept-Language': 'en-US,en;q=0.5', + 'Accept-Encoding': 'gzip, deflate, br', + 'Referer': 'https://duckduckgo.com/', + 'Content-Type': 'text/plain', + 'X-Requested-With': 'XMLHttpRequest', + 'Origin': 'https://duckduckgo.com', + 'DNT': '1', + 'Connection': 'keep-alive', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'Pragma': 'no-cache', + 'Cache-Control': 'no-cache' + } + + self._queue_translate = False + self._watcher_running = False + + + def generate_reference_ui_element(self): + button = Gtk.Button(label=self.name) + button.connect("button-release-event", self._show_translate_page) + return button + + def run(self): + self._builder = Gtk.Builder() + self._builder.add_from_file(self._GLADE_FILE) + self._connect_builder_signals(self, self._builder) + + self._translate_dialog = self._builder.get_object("translate_dialog") + self._translate_from = self._builder.get_object("translate_from") + self._translate_to = self._builder.get_object("translate_to") + self._translate_from_buffer = self._builder.get_object("translate_from_buffer") + self._translate_to_buffer = self._builder.get_object("translate_to_buffer") + self._detected_language_lbl = self._builder.get_object("detected_language_lbl") + + + @threaded + def _show_translate_page(self, widget=None, eve=None): + event_system.emit("get_current_state") + + state = self._fm_state + self._event_message = None + + GLib.idle_add(self._show_ui, (state)) + + def _show_ui(self, state): + if state.uris and len(state.uris) == 1: + file_name = state.uris[0].split("/")[-1] + self._translate_from_buffer.set_text(file_name) + + response = self._translate_dialog.run() + if response in [Gtk.ResponseType.CLOSE, Gtk.ResponseType.CANCEL, Gtk.ResponseType.DELETE_EVENT]: + self._translate_dialog.hide() + + self._translate_dialog.hide() + + def _pre_translate(self, widget=None, eve=None): + self._queue_translate = True + + if not self._watcher_running: + self._watcher_running = True + self.run_translate_watcher() + + @daemon_threaded + def run_translate_watcher(self): + while True: + if self._queue_translate: + self._queue_translate = False + time.sleep(1) + + # NOTE: Hold call to translate if we're still typing/updating... + if self._queue_translate: + continue + + GLib.idle_add(self._translate) + self._watcher_running = False + break + + def _translate(self): + start_itr, end_itr = self._translate_from_buffer.get_bounds() + from_translate = self._translate_from_buffer.get_text(start_itr, end_itr, True).encode('utf-8') + + if from_translate in ("", None) or self._queue_translate: + return + + response = requests.post(self._link, headers=self._headers, data=from_translate) + if response.status_code == 200: + data = response.json() + self._translate_to_buffer.set_text(data["translated"]) + + if "detected_language" in data.keys(): + self._detected_language_lbl.set_label(f"Detected Language: {data['detected_language']}") + else: + msg = f"Could not translate... Response Code: {response.status_code}" + self._translate_to_buffer.set_text(msg) + self._detected_language_lbl.set_label(f"Detected Language:") diff --git a/plugins/translate/translate.glade b/plugins/translate/translate.glade new file mode 100644 index 0000000..c8e42e3 --- /dev/null +++ b/plugins/translate/translate.glade @@ -0,0 +1,210 @@ + + + + + + + + + + False + 6 + Translate + False + True + center-on-parent + 620 + 320 + True + dialog + True + True + False + center + + + True + False + 12 + + + True + False + end + + + gtk-cancel + True + True + True + False + True + + + True + True + 0 + + + + + gtk-close + True + True + True + False + True + + + True + True + 1 + + + + + False + False + end + 0 + + + + + True + False + vertical + + + True + False + Detected Language: + + + False + True + 0 + + + + + True + False + 15 + True + + + True + False + vertical + + + True + False + From: + + + False + True + 0 + + + + + True + True + never + in + + + True + True + word-char + translate_from_buffer + True + + + + + True + True + 1 + + + + + True + True + 0 + + + + + True + False + vertical + + + True + False + To: + + + False + True + 0 + + + + + True + True + never + in + + + True + True + False + word-char + False + translate_to_buffer + True + + + + + True + True + 1 + + + + + True + True + 1 + + + + + True + True + 1 + + + + + True + True + 1 + + + + + + cancel_button + ok_button + + + diff --git a/plugins/trasher/plugin.py b/plugins/trasher/plugin.py index 2c6e8cd..ba9ac51 100644 --- a/plugins/trasher/plugin.py +++ b/plugins/trasher/plugin.py @@ -1,8 +1,5 @@ # Python imports import os -import threading -import subprocess -import inspect # Lib imports import gi @@ -16,19 +13,6 @@ from plugins.plugin_base import PluginBase from .xdgtrash import XDGTrash -# NOTE: Threads WILL NOT die with parent's destruction. -def threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start() - return wrapper - -# NOTE: Threads WILL die with parent's destruction. -def daemon_threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() - return wrapper - - class Plugin(PluginBase): diff --git a/plugins/vod_thumbnailer/plugin.py b/plugins/vod_thumbnailer/plugin.py index e2b0af7..9fd51bc 100644 --- a/plugins/vod_thumbnailer/plugin.py +++ b/plugins/vod_thumbnailer/plugin.py @@ -3,7 +3,6 @@ import os import threading import subprocess import time -import inspect import hashlib from datetime import datetime @@ -26,12 +25,6 @@ def threaded(fn): threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start() return wrapper -# NOTE: Threads WILL die with parent's destruction. -def daemon_threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() - return wrapper - @@ -53,20 +46,9 @@ class Plugin(PluginBase): def run(self): - self._builder = Gtk.Builder() + self._builder = Gtk.Builder() self._builder.add_from_file(self._GLADE_FILE) - - classes = [self] - handlers = {} - for c in classes: - methods = None - try: - methods = inspect.getmembers(c, predicate=inspect.ismethod) - handlers.update(methods) - except Exception as e: - print(repr(e)) - - self._builder.connect_signals(handlers) + self._connect_builder_signals(self, self._builder) self._thumbnailer_dialog = self._builder.get_object("thumbnailer_dialog") self._scrub_step = self._builder.get_object("scrub_step") diff --git a/plugins/youtube_download/plugin.py b/plugins/youtube_download/plugin.py index 1b4ce50..58df3b3 100644 --- a/plugins/youtube_download/plugin.py +++ b/plugins/youtube_download/plugin.py @@ -1,5 +1,8 @@ # Python imports -import os, threading, subprocess, time +import os +import threading +import subprocess +import time # Lib imports import gi @@ -16,12 +19,6 @@ def threaded(fn): threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start() return wrapper -# NOTE: Threads WILL die with parent's destruction. -def daemon_threaded(fn): - def wrapper(*args, **kwargs): - threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start() - return wrapper - diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/AUTHORS b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/AUTHORS deleted file mode 100644 index e69de29..0000000 diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/INSTALLER b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/LICENSE b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/LICENSE deleted file mode 100644 index 68a49da..0000000 --- a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/METADATA b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/METADATA deleted file mode 100644 index 1feb95d..0000000 --- a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/METADATA +++ /dev/null @@ -1,1992 +0,0 @@ -Metadata-Version: 2.1 -Name: yt-dlp -Version: 2022.2.4 -Summary: A youtube-dl fork with additional features and patches -Home-page: https://github.com/yt-dlp/yt-dlp -Maintainer: pukkandan -Maintainer-email: pukkandan.ytdlp@gmail.com -License: UNKNOWN -Project-URL: Documentation, https://yt-dlp.readthedocs.io -Project-URL: Source, https://github.com/yt-dlp/yt-dlp -Project-URL: Tracker, https://github.com/yt-dlp/yt-dlp/issues -Project-URL: Funding, https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collaborators -Platform: UNKNOWN -Classifier: Topic :: Multimedia :: Video -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: Implementation -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: License :: Public Domain -Classifier: Operating System :: OS Independent -Requires-Python: >=3.6 -Description-Content-Type: text/markdown -Requires-Dist: mutagen -Requires-Dist: pycryptodomex -Requires-Dist: websockets - -Official repository: - -**PS**: Some links in this document will not work since this is a copy of the README.md from Github - - -
- -[![YT-DLP](https://raw.githubusercontent.com/yt-dlp/yt-dlp/master/.github/banner.svg)](#readme) - -[![Release version](https://img.shields.io/github/v/release/yt-dlp/yt-dlp?color=blue&label=Download&style=for-the-badge)](#release-files "Release") -[![License: Unlicense](https://img.shields.io/badge/-Unlicense-brightgreen.svg?style=for-the-badge)](LICENSE "License") -[![Donate](https://img.shields.io/badge/_-Donate-red.svg?logo=githubsponsors&labelColor=555555&style=for-the-badge)](Collaborators.md#collaborators "Donate") -[![Docs](https://img.shields.io/badge/-Docs-blue.svg?color=blue&style=for-the-badge)](https://readthedocs.org/projects/yt-dlp/ "Docs") -[![Supported Sites](https://img.shields.io/badge/-Supported_Sites-brightgreen.svg?style=for-the-badge)](supportedsites.md "Supported Sites") -[![PyPi](https://img.shields.io/badge/-PyPi-blue.svg?logo=pypi&labelColor=555555&style=for-the-badge)](https://pypi.org/project/yt-dlp "PyPi") -[![CI Status](https://img.shields.io/github/workflow/status/yt-dlp/yt-dlp/Core%20Tests/master?label=Tests&style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/actions "CI Status") -[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)](https://discord.gg/H5MNcFW63r "Discord") -[![Matrix](https://img.shields.io/matrix/yt-dlp:matrix.org?color=brightgreen&labelColor=555555&label=&logo=element&style=for-the-badge)](https://matrix.to/#/#yt-dlp:matrix.org "Matrix") -[![Commits](https://img.shields.io/github/commit-activity/m/yt-dlp/yt-dlp?label=commits&style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/commits "Commit History") -[![Last Commit](https://img.shields.io/github/last-commit/yt-dlp/yt-dlp/master?label=&style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/commits "Commit History") - -
- - -yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on the now inactive [youtube-dlc](https://github.com/blackjack4494/yt-dlc). The main focus of this project is adding new features and patches while also keeping up to date with the original project - - - - -* [NEW FEATURES](#new-features) - * [Differences in default behavior](#differences-in-default-behavior) -* [INSTALLATION](#installation) - * [Update](#update) - * [Release Files](#release-files) - * [Dependencies](#dependencies) - * [Compile](#compile) -* [USAGE AND OPTIONS](#usage-and-options) - * [General Options](#general-options) - * [Network Options](#network-options) - * [Geo-restriction](#geo-restriction) - * [Video Selection](#video-selection) - * [Download Options](#download-options) - * [Filesystem Options](#filesystem-options) - * [Thumbnail Options](#thumbnail-options) - * [Internet Shortcut Options](#internet-shortcut-options) - * [Verbosity and Simulation Options](#verbosity-and-simulation-options) - * [Workarounds](#workarounds) - * [Video Format Options](#video-format-options) - * [Subtitle Options](#subtitle-options) - * [Authentication Options](#authentication-options) - * [Post-processing Options](#post-processing-options) - * [SponsorBlock Options](#sponsorblock-options) - * [Extractor Options](#extractor-options) -* [CONFIGURATION](#configuration) - * [Authentication with .netrc file](#authentication-with-netrc-file) -* [OUTPUT TEMPLATE](#output-template) - * [Output template and Windows batch files](#output-template-and-windows-batch-files) - * [Output template examples](#output-template-examples) -* [FORMAT SELECTION](#format-selection) - * [Filtering Formats](#filtering-formats) - * [Sorting Formats](#sorting-formats) - * [Format Selection examples](#format-selection-examples) -* [MODIFYING METADATA](#modifying-metadata) - * [Modifying metadata examples](#modifying-metadata-examples) -* [EXTRACTOR ARGUMENTS](#extractor-arguments) -* [PLUGINS](#plugins) -* [EMBEDDING YT-DLP](#embedding-yt-dlp) -* [DEPRECATED OPTIONS](#deprecated-options) -* [CONTRIBUTING](CONTRIBUTING.md#contributing-to-yt-dlp) - * [Opening an Issue](CONTRIBUTING.md#opening-an-issue) - * [Developer Instructions](CONTRIBUTING.md#developer-instructions) -* [MORE](#more) - - - -# NEW FEATURES - -* Based on **youtube-dl 2021.12.17 [commit/5add3f4](https://github.com/ytdl-org/youtube-dl/commit/5add3f4373287e6346ca3551239edab549284db3)** and **youtube-dlc 2020.11.11-3 [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl) - -* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in youtube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API - -* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples)) - -* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that the NicoNico improvements are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details. - -* **Youtube improvements**: - * All Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`) and private playlists supports downloading multiple pages of content - * Search (`ytsearch:`, `ytsearchdate:`), search URLs and in-channel search works - * Mixes supports downloading multiple pages of content - * Some (but not all) age-gated content can be downloaded without cookies - * Fix for [n-sig based throttling](https://github.com/ytdl-org/youtube-dl/issues/29326) - * Redirect channel's home URL automatically to `/video` to preserve the old behaviour - * `255kbps` audio is extracted (if available) from youtube music when premium cookies are given - * Youtube music Albums, channels etc can be downloaded ([except self-uploaded music](https://github.com/yt-dlp/yt-dlp/issues/723)) - * Download livestreams from the start using `--live-from-start` (experimental) - -* **Cookies from browser**: Cookies can be automatically extracted from all major web browsers using `--cookies-from-browser BROWSER[+KEYRING][:PROFILE]` - -* **Split video by chapters**: Videos can be split into multiple files based on chapters using `--split-chapters` - -* **Multi-threaded fragment downloads**: Download multiple fragments of m3u8/mpd videos in parallel. Use `--concurrent-fragments` (`-N`) option to set the number of threads used - -* **Aria2c with HLS/DASH**: You can use `aria2c` as the external downloader for DASH(mpd) and HLS(m3u8) formats - -* **New and fixed extractors**: Many new extractors have been added and a lot of existing ones have been fixed. See the [changelog](Changelog.md) or the [list of supported sites](supportedsites.md) - -* **New MSOs**: Philo, Spectrum, SlingTV, Cablevision, RCN - -* **Subtitle extraction from manifests**: Subtitles can be extracted from streaming media manifests. See [commit/be6202f](https://github.com/yt-dlp/yt-dlp/commit/be6202f12b97858b9d716e608394b51065d0419f) for details - -* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`) - -* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [configuration](#configuration) for details - -* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata` - -* **Other new options**: Many new options have been added such as `--concat-playlist`, `--print`, `--wait-for-video`, `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc - -* **Improvements**: Regex and other operators in `--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, `--exec` at different stages, etc - -* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details - -* **Self-updater**: The releases can be updated using `yt-dlp -U` - -See [changelog](Changelog.md) or [commits](https://github.com/yt-dlp/yt-dlp/commits) for the full list of changes - -### Differences in default behavior - -Some of yt-dlp's default options are different from that of youtube-dl and youtube-dlc: - -* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details -* `avconv` is not supported as an alternative to `ffmpeg` -* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename` -* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order -* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this -* Unlike youtube-dlc, yt-dlp does not allow merging multiple audio/video streams into one file by default (since this conflicts with the use of `-f bv*+ba`). If needed, this feature must be enabled using `--audio-multistreams` and `--video-multistreams`. You can also use `--compat-options multistreams` to enable both -* `--ignore-errors` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead -* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files -* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this -* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this -* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior -* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this -* All *experiences* of a funimation episode are considered as a single video. This behavior breaks existing archives. Use `--compat-options seperate-video-versions` to extract information from only the default player -* Youtube live chat (if available) is considered as a subtitle. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent live chat from downloading -* Youtube channel URLs are automatically redirected to `/video`. Append a `/featured` to the URL to download only the videos in the home page. If the channel does not have a videos tab, we try to download the equivalent `UU` playlist instead. For all other tabs, if the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections -* Unavailable videos are also listed for youtube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this -* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this -* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead -* Some private fields such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this -* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this - -For ease of use, a few more compat options are available: -* `--compat-options all`: Use all compat options -* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams` -* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect` - - -# INSTALLATION - -You can install yt-dlp using one of the following methods: - -### Using the release binary - -You can simply download the [correct binary file](#release-files) for your OS - - -[![Windows](https://img.shields.io/badge/-Windows_x64-blue.svg?style=for-the-badge&logo=windows)](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.exe) -[![Linux](https://img.shields.io/badge/-Linux/MacOS/BSD-red.svg?style=for-the-badge&logo=linux)](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp) -[![Source Tarball](https://img.shields.io/badge/-Source_tar-green.svg?style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz) -[![Other variants](https://img.shields.io/badge/-Other-grey.svg?style=for-the-badge)](#release-files) -[![ALl versions](https://img.shields.io/badge/-All_Versions-lightgrey.svg?style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/releases) - - -Note: The manpages, shell completion files etc. are available in the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz) - - -In UNIX-like OSes (MacOS, Linux, BSD), you can also install the same in one of the following ways: - -``` -sudo curl -L https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -o /usr/local/bin/yt-dlp -sudo chmod a+rx /usr/local/bin/yt-dlp -``` - -``` -sudo wget https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp -O /usr/local/bin/yt-dlp -sudo chmod a+rx /usr/local/bin/yt-dlp -``` - -``` -sudo aria2c https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp --dir /usr/local/bin -o yt-dlp -sudo chmod a+rx /usr/local/bin/yt-dlp -``` - - -### With [PIP](https://pypi.org/project/pip) - -You can install the [PyPI package](https://pypi.org/project/yt-dlp) with: -``` -python3 -m pip install -U yt-dlp -``` - -You can install without any of the optional dependencies using: -``` -python3 -m pip install --no-deps -U yt-dlp -``` - -If you want to be on the cutting edge, you can also install the master branch with: -``` -python3 -m pip install --force-reinstall https://github.com/yt-dlp/yt-dlp/archive/master.zip -``` - -Note that on some systems, you may need to use `py` or `python` instead of `python3` - - -### With [Homebrew](https://brew.sh) - -macOS or Linux users that are using Homebrew can also install it by: - -``` -brew install yt-dlp/taps/yt-dlp -``` - -## UPDATE -You can use `yt-dlp -U` to update if you are [using the provided release](#using-the-release-binary) - -If you [installed with pip](#with-pip), simply re-run the same command that was used to install the program - -If you [installed using Homebrew](#with-homebrew), run `brew upgrade yt-dlp/taps/yt-dlp` - - -## RELEASE FILES - -#### Recommended - -File|Description -:---|:--- -[yt-dlp](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp)|Platform-independant binary. Needs Python (recommended for **UNIX-like systems**) -[yt-dlp.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.exe)|Windows (Win7 SP1+) standalone x64 binary (recommended for **Windows**) - -#### Alternatives - -File|Description -:---|:--- -[yt-dlp_macos](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_macos)|MacOS (10.15+) standalone executable -[yt-dlp_x86.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_x86.exe)|Windows (Vista SP2+) standalone x86 (32-bit) binary -[yt-dlp_min.exe](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_min.exe)|Windows (Win7 SP1+) standalone x64 binary built with `py2exe`.
Does not contain `pycryptodomex`, needs VC++14 -[yt-dlp_win.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_win.zip)|Unpackaged Windows executable (no auto-update) -[yt-dlp_macos.zip](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp_macos.zip)|Unpackaged MacOS (10.15+) executable (no auto-update) - -#### Misc - -File|Description -:---|:--- -[yt-dlp.tar.gz](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)|Source tarball. Also contains manpages, completions, etc -[SHA2-512SUMS](https://github.com/yt-dlp/yt-dlp/releases/latest/download/SHA2-512SUMS)|GNU-style SHA512 sums -[SHA2-256SUMS](https://github.com/yt-dlp/yt-dlp/releases/latest/download/SHA2-256SUMS)|GNU-style SHA256 sums - - -## DEPENDENCIES -Python versions 3.6+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly. - - - -While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly recommended - -* [**ffmpeg** and **ffprobe**](https://www.ffmpeg.org) - Required for [merging separate video and audio files](#format-selection) as well as for various [post-processing](#post-processing-options) tasks. License [depends on the build](https://www.ffmpeg.org/legal.html) -* [**mutagen**](https://github.com/quodlibet/mutagen) - For embedding thumbnail in certain formats. Licensed under [GPLv2+](https://github.com/quodlibet/mutagen/blob/master/COPYING) -* [**pycryptodomex**](https://github.com/Legrandin/pycryptodome) - For decrypting AES-128 HLS streams and various other data. Licensed under [BSD2](https://github.com/Legrandin/pycryptodome/blob/master/LICENSE.rst) -* [**websockets**](https://github.com/aaugustin/websockets) - For downloading over websocket. Licensed under [BSD3](https://github.com/aaugustin/websockets/blob/main/LICENSE) -* [**secretstorage**](https://github.com/mitya57/secretstorage) - For accessing the Gnome keyring while decrypting cookies of Chromium-based browsers on Linux. Licensed under [BSD](https://github.com/mitya57/secretstorage/blob/master/LICENSE) -* [**AtomicParsley**](https://github.com/wez/atomicparsley) - For embedding thumbnail in mp4/m4a if mutagen is not present. Licensed under [GPLv2+](https://github.com/wez/atomicparsley/blob/master/COPYING) -* [**rtmpdump**](http://rtmpdump.mplayerhq.hu) - For downloading `rtmp` streams. ffmpeg will be used as a fallback. Licensed under [GPLv2+](http://rtmpdump.mplayerhq.hu) -* [**mplayer**](http://mplayerhq.hu/design7/info.html) or [**mpv**](https://mpv.io) - For downloading `rstp` streams. ffmpeg will be used as a fallback. Licensed under [GPLv2+](https://github.com/mpv-player/mpv/blob/master/Copyright) -* [**phantomjs**](https://github.com/ariya/phantomjs) - Used in extractors where javascript needs to be run. Licensed under [BSD3](https://github.com/ariya/phantomjs/blob/master/LICENSE.BSD) -* [**sponskrub**](https://github.com/faissaloo/SponSkrub) - For using the now **deprecated** [sponskrub options](#sponskrub-options). Licensed under [GPLv3+](https://github.com/faissaloo/SponSkrub/blob/master/LICENCE.md) -* Any external downloader that you want to use with `--downloader` - -To use or redistribute the dependencies, you must agree to their respective licensing terms. - -The Windows and MacOS standalone release binaries are already built with the python interpreter, mutagen, pycryptodomex and websockets included. - -**Note**: There are some regressions in newer ffmpeg versions that causes various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds - - -## COMPILE - -**For Windows**: -To build the Windows executable, you must have pyinstaller (and optionally mutagen, pycryptodomex, websockets). Once you have all the necessary dependencies installed, (optionally) build lazy extractors using `devscripts/make_lazy_extractors.py`, and then just run `pyinst.py`. The executable will be built for the same architecture (32/64 bit) as the python used to build it. - - py -m pip install -U pyinstaller -r requirements.txt - py devscripts/make_lazy_extractors.py - py pyinst.py - -Note that pyinstaller [does not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment - -**For Unix**: -You will need the required build tools: `python`, `make` (GNU), `pandoc`, `zip`, `pytest` -Then simply run `make`. You can also run `make yt-dlp` instead to compile only the binary without updating any of the additional files - -**Note**: In either platform, `devscripts/update-version.py` can be used to automatically update the version number - -You can also fork the project on github and run your fork's [build workflow](.github/workflows/build.yml) to automatically build a release - -# USAGE AND OPTIONS - - - yt-dlp [OPTIONS] [--] URL [URL...] - -`Ctrl+F` is your friend :D - - - -## General Options: - -h, --help Print this help text and exit - --version Print program version and exit - -U, --update Update this program to latest version. Make - sure that you have sufficient permissions - (run with sudo if needed) - -i, --ignore-errors Ignore download and postprocessing errors. - The download will be considered successful - even if the postprocessing fails - --no-abort-on-error Continue with next video on download - errors; e.g. to skip unavailable videos in - a playlist (default) - --abort-on-error Abort downloading of further videos if an - error occurs (Alias: --no-ignore-errors) - --dump-user-agent Display the current user-agent and exit - --list-extractors List all supported extractors and exit - --extractor-descriptions Output descriptions of all supported - extractors and exit - --force-generic-extractor Force extraction to use the generic - extractor - --default-search PREFIX Use this prefix for unqualified URLs. For - example "gvsearch2:" downloads two videos - from google videos for the search term - "large apple". Use the value "auto" to let - yt-dlp guess ("auto_warning" to emit a - warning when guessing). "error" just throws - an error. The default value "fixup_error" - repairs broken URLs, but emits an error if - this is not possible instead of searching - --ignore-config Don't load any more configuration files - except those given by --config-locations. - For backward compatibility, if this option - is found inside the system configuration - file, the user configuration is not loaded. - (Alias: --no-config) - --no-config-locations Do not load any custom configuration files - (default). When given inside a - configuration file, ignore all previous - --config-locations defined in the current - file - --config-locations PATH Location of the main configuration file; - either the path to the config or its - containing directory. Can be used multiple - times and inside other configuration files - --flat-playlist Do not extract the videos of a playlist, - only list them - --no-flat-playlist Extract the videos of a playlist - --live-from-start Download livestreams from the start. - Currently only supported for YouTube - (Experimental) - --no-live-from-start Download livestreams from the current time - (default) - --wait-for-video MIN[-MAX] Wait for scheduled streams to become - available. Pass the minimum number of - seconds (or range) to wait between retries - --no-wait-for-video Do not wait for scheduled streams (default) - --mark-watched Mark videos watched (even with --simulate). - Currently only supported for YouTube - --no-mark-watched Do not mark videos watched (default) - --no-colors Do not emit color codes in output - --compat-options OPTS Options that can help keep compatibility - with youtube-dl or youtube-dlc - configurations by reverting some of the - changes made in yt-dlp. See "Differences in - default behavior" for details - -## Network Options: - --proxy URL Use the specified HTTP/HTTPS/SOCKS proxy. - To enable SOCKS proxy, specify a proper - scheme. For example - socks5://user:pass@127.0.0.1:1080/. Pass in - an empty string (--proxy "") for direct - connection - --socket-timeout SECONDS Time to wait before giving up, in seconds - --source-address IP Client-side IP address to bind to - -4, --force-ipv4 Make all connections via IPv4 - -6, --force-ipv6 Make all connections via IPv6 - -## Geo-restriction: - --geo-verification-proxy URL Use this proxy to verify the IP address for - some geo-restricted sites. The default - proxy specified by --proxy (or none, if the - option is not present) is used for the - actual downloading - --geo-bypass Bypass geographic restriction via faking - X-Forwarded-For HTTP header (default) - --no-geo-bypass Do not bypass geographic restriction via - faking X-Forwarded-For HTTP header - --geo-bypass-country CODE Force bypass geographic restriction with - explicitly provided two-letter ISO 3166-2 - country code - --geo-bypass-ip-block IP_BLOCK Force bypass geographic restriction with - explicitly provided IP block in CIDR - notation - -## Video Selection: - --playlist-start NUMBER Playlist video to start at (default is 1) - --playlist-end NUMBER Playlist video to end at (default is last) - --playlist-items ITEM_SPEC Playlist video items to download. Specify - indices of the videos in the playlist - separated by commas like: "--playlist-items - 1,2,5,8" if you want to download videos - indexed 1, 2, 5, 8 in the playlist. You can - specify range: "--playlist-items - 1-3,7,10-13", it will download the videos - at index 1, 2, 3, 7, 10, 11, 12 and 13 - --min-filesize SIZE Do not download any videos smaller than - SIZE (e.g. 50k or 44.6m) - --max-filesize SIZE Do not download any videos larger than SIZE - (e.g. 50k or 44.6m) - --date DATE Download only videos uploaded on this date. - The date can be "YYYYMMDD" or in the format - "(now|today)[+-][0-9](day|week|month|year)( - s)?" - --datebefore DATE Download only videos uploaded on or before - this date. The date formats accepted is the - same as --date - --dateafter DATE Download only videos uploaded on or after - this date. The date formats accepted is the - same as --date - --match-filter FILTER Generic video filter. Any field (see - "OUTPUT TEMPLATE") can be compared with a - number or a string using the operators - defined in "Filtering formats". You can - also simply specify a field to match if the - field is present and "!field" to check if - the field is not present. In addition, - Python style regular expression matching - can be done using "~=", and multiple - filters can be checked with "&". Use a "\" - to escape "&" or quotes if needed. Eg: - --match-filter "!is_live & like_count>?100 - & description~='(?i)\bcats \& dogs\b'" - matches only videos that are not live, has - a like count more than 100 (or the like - field is not available), and also has a - description that contains the phrase "cats - & dogs" (ignoring case) - --no-match-filter Do not use generic video filter (default) - --no-playlist Download only the video, if the URL refers - to a video and a playlist - --yes-playlist Download the playlist, if the URL refers to - a video and a playlist - --age-limit YEARS Download only videos suitable for the given - age - --download-archive FILE Download only videos not listed in the - archive file. Record the IDs of all - downloaded videos in it - --no-download-archive Do not use archive file (default) - --max-downloads NUMBER Abort after downloading NUMBER files - --break-on-existing Stop the download process when encountering - a file that is in the archive - --break-on-reject Stop the download process when encountering - a file that has been filtered out - --break-per-input Make --break-on-existing and --break-on- - reject act only on the current input URL - --no-break-per-input --break-on-existing and --break-on-reject - terminates the entire download queue - --skip-playlist-after-errors N Number of allowed failures until the rest - of the playlist is skipped - -## Download Options: - -N, --concurrent-fragments N Number of fragments of a dash/hlsnative - video that should be downloaded - concurrently (default is 1) - -r, --limit-rate RATE Maximum download rate in bytes per second - (e.g. 50K or 4.2M) - --throttled-rate RATE Minimum download rate in bytes per second - below which throttling is assumed and the - video data is re-extracted (e.g. 100K) - -R, --retries RETRIES Number of retries (default is 10), or - "infinite" - --file-access-retries RETRIES Number of times to retry on file access - error (default is 10), or "infinite" - --fragment-retries RETRIES Number of retries for a fragment (default - is 10), or "infinite" (DASH, hlsnative and - ISM) - --skip-unavailable-fragments Skip unavailable fragments for DASH, - hlsnative and ISM (default) (Alias: --no- - abort-on-unavailable-fragment) - --abort-on-unavailable-fragment Abort downloading if a fragment is - unavailable (Alias: --no-skip-unavailable- - fragments) - --keep-fragments Keep downloaded fragments on disk after - downloading is finished - --no-keep-fragments Delete downloaded fragments after - downloading is finished (default) - --buffer-size SIZE Size of download buffer (e.g. 1024 or 16K) - (default is 1024) - --resize-buffer The buffer size is automatically resized - from an initial value of --buffer-size - (default) - --no-resize-buffer Do not automatically adjust the buffer size - --http-chunk-size SIZE Size of a chunk for chunk-based HTTP - downloading (e.g. 10485760 or 10M) (default - is disabled). May be useful for bypassing - bandwidth throttling imposed by a webserver - (experimental) - --playlist-reverse Download playlist videos in reverse order - --no-playlist-reverse Download playlist videos in default order - (default) - --playlist-random Download playlist videos in random order - --xattr-set-filesize Set file xattribute ytdl.filesize with - expected file size - --hls-use-mpegts Use the mpegts container for HLS videos; - allowing some players to play the video - while downloading, and reducing the chance - of file corruption if download is - interrupted. This is enabled by default for - live streams - --no-hls-use-mpegts Do not use the mpegts container for HLS - videos. This is default when not - downloading live streams - --downloader [PROTO:]NAME Name or path of the external downloader to - use (optionally) prefixed by the protocols - (http, ftp, m3u8, dash, rstp, rtmp, mms) to - use it for. Currently supports native, - aria2c, avconv, axel, curl, ffmpeg, httpie, - wget (Recommended: aria2c). You can use - this option multiple times to set different - downloaders for different protocols. For - example, --downloader aria2c --downloader - "dash,m3u8:native" will use aria2c for - http/ftp downloads, and the native - downloader for dash/m3u8 downloads (Alias: - --external-downloader) - --downloader-args NAME:ARGS Give these arguments to the external - downloader. Specify the downloader name and - the arguments separated by a colon ":". For - ffmpeg, arguments can be passed to - different positions using the same syntax - as --postprocessor-args. You can use this - option multiple times to give different - arguments to different downloaders (Alias: - --external-downloader-args) - -## Filesystem Options: - -a, --batch-file FILE File containing URLs to download ("-" for - stdin), one URL per line. Lines starting - with "#", ";" or "]" are considered as - comments and ignored - --no-batch-file Do not read URLs from batch file (default) - -P, --paths [TYPES:]PATH The paths where the files should be - downloaded. Specify the type of file and - the path separated by a colon ":". All the - same TYPES as --output are supported. - Additionally, you can also provide "home" - (default) and "temp" paths. All - intermediary files are first downloaded to - the temp path and then the final files are - moved over to the home path after download - is finished. This option is ignored if - --output is an absolute path - -o, --output [TYPES:]TEMPLATE Output filename template; see "OUTPUT - TEMPLATE" for details - --output-na-placeholder TEXT Placeholder value for unavailable meta - fields in output filename template - (default: "NA") - --restrict-filenames Restrict filenames to only ASCII - characters, and avoid "&" and spaces in - filenames - --no-restrict-filenames Allow Unicode characters, "&" and spaces in - filenames (default) - --windows-filenames Force filenames to be Windows-compatible - --no-windows-filenames Make filenames Windows-compatible only if - using Windows (default) - --trim-filenames LENGTH Limit the filename length (excluding - extension) to the specified number of - characters - -w, --no-overwrites Do not overwrite any files - --force-overwrites Overwrite all video and metadata files. - This option includes --no-continue - --no-force-overwrites Do not overwrite the video, but overwrite - related files (default) - -c, --continue Resume partially downloaded files/fragments - (default) - --no-continue Do not resume partially downloaded - fragments. If the file is not fragmented, - restart download of the entire file - --part Use .part files instead of writing directly - into output file (default) - --no-part Do not use .part files - write directly - into output file - --mtime Use the Last-modified header to set the - file modification time (default) - --no-mtime Do not use the Last-modified header to set - the file modification time - --write-description Write video description to a .description - file - --no-write-description Do not write video description (default) - --write-info-json Write video metadata to a .info.json file - (this may contain personal information) - --no-write-info-json Do not write video metadata (default) - --write-playlist-metafiles Write playlist metadata in addition to the - video metadata when using --write-info- - json, --write-description etc. (default) - --no-write-playlist-metafiles Do not write playlist metadata when using - --write-info-json, --write-description etc. - --clean-infojson Remove some private fields such as - filenames from the infojson. Note that it - could still contain some personal - information (default) - --no-clean-infojson Write all fields to the infojson - --write-comments Retrieve video comments to be placed in the - infojson. The comments are fetched even - without this option if the extraction is - known to be quick (Alias: --get-comments) - --no-write-comments Do not retrieve video comments unless the - extraction is known to be quick (Alias: - --no-get-comments) - --load-info-json FILE JSON file containing the video information - (created with the "--write-info-json" - option) - --cookies FILE Netscape formatted file to read cookies - from and dump cookie jar in - --no-cookies Do not read/dump cookies from/to file - (default) - --cookies-from-browser BROWSER[+KEYRING][:PROFILE] - The name of the browser and (optionally) - the name/path of the profile to load - cookies from, separated by a ":". Currently - supported browsers are: brave, chrome, - chromium, edge, firefox, opera, safari, - vivaldi. By default, the most recently - accessed profile is used. The keyring used - for decrypting Chromium cookies on Linux - can be (optionally) specified after the - browser name separated by a "+". Currently - supported keyrings are: basictext, - gnomekeyring, kwallet - --no-cookies-from-browser Do not load cookies from browser (default) - --cache-dir DIR Location in the filesystem where youtube-dl - can store some downloaded information (such - as client ids and signatures) permanently. - By default $XDG_CACHE_HOME/yt-dlp or - ~/.cache/yt-dlp - --no-cache-dir Disable filesystem caching - --rm-cache-dir Delete all filesystem cache files - -## Thumbnail Options: - --write-thumbnail Write thumbnail image to disk - --no-write-thumbnail Do not write thumbnail image to disk - (default) - --write-all-thumbnails Write all thumbnail image formats to disk - --list-thumbnails List available thumbnails of each video. - Simulate unless --no-simulate is used - -## Internet Shortcut Options: - --write-link Write an internet shortcut file, depending - on the current platform (.url, .webloc or - .desktop). The URL may be cached by the OS - --write-url-link Write a .url Windows internet shortcut. The - OS caches the URL based on the file path - --write-webloc-link Write a .webloc macOS internet shortcut - --write-desktop-link Write a .desktop Linux internet shortcut - -## Verbosity and Simulation Options: - -q, --quiet Activate quiet mode. If used with - --verbose, print the log to stderr - --no-warnings Ignore warnings - -s, --simulate Do not download the video and do not write - anything to disk - --no-simulate Download the video even if printing/listing - options are used - --ignore-no-formats-error Ignore "No video formats" error. Useful for - extracting metadata even if the videos are - not actually available for download - (experimental) - --no-ignore-no-formats-error Throw error when no downloadable video - formats are found (default) - --skip-download Do not download the video but write all - related files (Alias: --no-download) - -O, --print [WHEN:]TEMPLATE Field name or output template to print to - screen, optionally prefixed with when to - print it, separated by a ":". Supported - values of "WHEN" are the same as that of - --use-postprocessor, and "video" (default). - Implies --quiet and --simulate (unless - --no-simulate is used). This option can be - used multiple times - --print-to-file [WHEN:]TEMPLATE FILE - Append given template to the file. The - values of WHEN and TEMPLATE are same as - that of --print. FILE uses the same syntax - as the output template. This option can be - used multiple times - -j, --dump-json Quiet, but print JSON information for each - video. Simulate unless --no-simulate is - used. See "OUTPUT TEMPLATE" for a - description of available keys - -J, --dump-single-json Quiet, but print JSON information for each - url or infojson passed. Simulate unless - --no-simulate is used. If the URL refers to - a playlist, the whole playlist information - is dumped in a single line - --force-write-archive Force download archive entries to be - written as far as no errors occur, even if - -s or another simulation option is used - (Alias: --force-download-archive) - --newline Output progress bar as new lines - --no-progress Do not print progress bar - --progress Show progress bar, even if in quiet mode - --console-title Display progress in console titlebar - --progress-template [TYPES:]TEMPLATE - Template for progress outputs, optionally - prefixed with one of "download:" (default), - "download-title:" (the console title), - "postprocess:", or "postprocess-title:". - The video's fields are accessible under the - "info" key and the progress attributes are - accessible under "progress" key. E.g.: - --console-title --progress-template - "download- - title:%(info.id)s-%(progress.eta)s" - -v, --verbose Print various debugging information - --dump-pages Print downloaded pages encoded using base64 - to debug problems (very verbose) - --write-pages Write downloaded intermediary pages to - files in the current directory to debug - problems - --print-traffic Display sent and read HTTP traffic - -## Workarounds: - --encoding ENCODING Force the specified encoding (experimental) - --legacy-server-connect Explicitly allow HTTPS connection to - servers that do not support RFC 5746 secure - renegotiation - --no-check-certificates Suppress HTTPS certificate validation - --prefer-insecure Use an unencrypted connection to retrieve - information about the video (Currently - supported only for YouTube) - --user-agent UA Specify a custom user agent - --referer URL Specify a custom referer, use if the video - access is restricted to one domain - --add-header FIELD:VALUE Specify a custom HTTP header and its value, - separated by a colon ":". You can use this - option multiple times - --bidi-workaround Work around terminals that lack - bidirectional text support. Requires bidiv - or fribidi executable in PATH - --sleep-requests SECONDS Number of seconds to sleep between requests - during data extraction - --sleep-interval SECONDS Number of seconds to sleep before each - download. This is the minimum time to sleep - when used along with --max-sleep-interval - (Alias: --min-sleep-interval) - --max-sleep-interval SECONDS Maximum number of seconds to sleep. Can - only be used along with --min-sleep- - interval - --sleep-subtitles SECONDS Number of seconds to sleep before each - subtitle download - -## Video Format Options: - -f, --format FORMAT Video format code, see "FORMAT SELECTION" - for more details - -S, --format-sort SORTORDER Sort the formats by the fields given, see - "Sorting Formats" for more details - --format-sort-force Force user specified sort order to have - precedence over all fields, see "Sorting - Formats" for more details - --no-format-sort-force Some fields have precedence over the user - specified sort order (default), see - "Sorting Formats" for more details - --video-multistreams Allow multiple video streams to be merged - into a single file - --no-video-multistreams Only one video stream is downloaded for - each output file (default) - --audio-multistreams Allow multiple audio streams to be merged - into a single file - --no-audio-multistreams Only one audio stream is downloaded for - each output file (default) - --prefer-free-formats Prefer video formats with free containers - over non-free ones of same quality. Use - with "-S ext" to strictly prefer free - containers irrespective of quality - --no-prefer-free-formats Don't give any special preference to free - containers (default) - --check-formats Check that the selected formats are - actually downloadable - --check-all-formats Check all formats for whether they are - actually downloadable - --no-check-formats Do not check that the formats are actually - downloadable - -F, --list-formats List available formats of each video. - Simulate unless --no-simulate is used - --merge-output-format FORMAT If a merge is required (e.g. - bestvideo+bestaudio), output to given - container format. One of mkv, mp4, ogg, - webm, flv. Ignored if no merge is required - -## Subtitle Options: - --write-subs Write subtitle file - --no-write-subs Do not write subtitle file (default) - --write-auto-subs Write automatically generated subtitle file - (Alias: --write-automatic-subs) - --no-write-auto-subs Do not write auto-generated subtitles - (default) (Alias: --no-write-automatic- - subs) - --list-subs List available subtitles of each video. - Simulate unless --no-simulate is used - --sub-format FORMAT Subtitle format, accepts formats - preference, for example: "srt" or - "ass/srt/best" - --sub-langs LANGS Languages of the subtitles to download (can - be regex) or "all" separated by commas. - (Eg: --sub-langs "en.*,ja") You can prefix - the language code with a "-" to exempt it - from the requested languages. (Eg: --sub- - langs all,-live_chat) Use --list-subs for a - list of available language tags - -## Authentication Options: - -u, --username USERNAME Login with this account ID - -p, --password PASSWORD Account password. If this option is left - out, yt-dlp will ask interactively - -2, --twofactor TWOFACTOR Two-factor authentication code - -n, --netrc Use .netrc authentication data - --netrc-location PATH Location of .netrc authentication data; - either the path or its containing - directory. Defaults to ~/.netrc - --video-password PASSWORD Video password (vimeo, youku) - --ap-mso MSO Adobe Pass multiple-system operator (TV - provider) identifier, use --ap-list-mso for - a list of available MSOs - --ap-username USERNAME Multiple-system operator account login - --ap-password PASSWORD Multiple-system operator account password. - If this option is left out, yt-dlp will ask - interactively - --ap-list-mso List all supported multiple-system - operators - -## Post-Processing Options: - -x, --extract-audio Convert video files to audio-only files - (requires ffmpeg and ffprobe) - --audio-format FORMAT Specify audio format to convert the audio - to when -x is used. Currently supported - formats are: best (default) or one of - best|aac|flac|mp3|m4a|opus|vorbis|wav|alac - --audio-quality QUALITY Specify ffmpeg audio quality, insert a - value between 0 (best) and 10 (worst) for - VBR or a specific bitrate like 128K - (default 5) - --remux-video FORMAT Remux the video into another container if - necessary (currently supported: mp4|mkv|flv - |webm|mov|avi|mp3|mka|m4a|ogg|opus). If - target container does not support the - video/audio codec, remuxing will fail. You - can specify multiple rules; Eg. - "aac>m4a/mov>mp4/mkv" will remux aac to - m4a, mov to mp4 and anything else to mkv. - --recode-video FORMAT Re-encode the video into another format if - re-encoding is necessary. The syntax and - supported formats are the same as --remux- - video - --postprocessor-args NAME:ARGS Give these arguments to the postprocessors. - Specify the postprocessor/executable name - and the arguments separated by a colon ":" - to give the argument to the specified - postprocessor/executable. Supported PP are: - Merger, ModifyChapters, SplitChapters, - ExtractAudio, VideoRemuxer, VideoConvertor, - Metadata, EmbedSubtitle, EmbedThumbnail, - SubtitlesConvertor, ThumbnailsConvertor, - FixupStretched, FixupM4a, FixupM3u8, - FixupTimestamp and FixupDuration. The - supported executables are: AtomicParsley, - FFmpeg and FFprobe. You can also specify - "PP+EXE:ARGS" to give the arguments to the - specified executable only when being used - by the specified postprocessor. - Additionally, for ffmpeg/ffprobe, "_i"/"_o" - can be appended to the prefix optionally - followed by a number to pass the argument - before the specified input/output file. Eg: - --ppa "Merger+ffmpeg_i1:-v quiet". You can - use this option multiple times to give - different arguments to different - postprocessors. (Alias: --ppa) - -k, --keep-video Keep the intermediate video file on disk - after post-processing - --no-keep-video Delete the intermediate video file after - post-processing (default) - --post-overwrites Overwrite post-processed files (default) - --no-post-overwrites Do not overwrite post-processed files - --embed-subs Embed subtitles in the video (only for mp4, - webm and mkv videos) - --no-embed-subs Do not embed subtitles (default) - --embed-thumbnail Embed thumbnail in the video as cover art - --no-embed-thumbnail Do not embed thumbnail (default) - --embed-metadata Embed metadata to the video file. Also - embeds chapters/infojson if present unless - --no-embed-chapters/--no-embed-info-json - are used (Alias: --add-metadata) - --no-embed-metadata Do not add metadata to file (default) - (Alias: --no-add-metadata) - --embed-chapters Add chapter markers to the video file - (Alias: --add-chapters) - --no-embed-chapters Do not add chapter markers (default) - (Alias: --no-add-chapters) - --embed-info-json Embed the infojson as an attachment to - mkv/mka video files - --no-embed-info-json Do not embed the infojson as an attachment - to the video file - --parse-metadata FROM:TO Parse additional metadata like title/artist - from other fields; see "MODIFYING METADATA" - for details - --replace-in-metadata FIELDS REGEX REPLACE - Replace text in a metadata field using the - given regex. This option can be used - multiple times - --xattrs Write metadata to the video file's xattrs - (using dublin core and xdg standards) - --concat-playlist POLICY Concatenate videos in a playlist. One of - "never", "always", or "multi_video" - (default; only when the videos form a - single show). All the video files must have - same codecs and number of streams to be - concatable. The "pl_video:" prefix can be - used with "--paths" and "--output" to set - the output filename for the split files. - See "OUTPUT TEMPLATE" for details - --fixup POLICY Automatically correct known faults of the - file. One of never (do nothing), warn (only - emit a warning), detect_or_warn (the - default; fix file if we can, warn - otherwise), force (try fixing even if file - already exists) - --ffmpeg-location PATH Location of the ffmpeg binary; either the - path to the binary or its containing - directory - --exec [WHEN:]CMD Execute a command, optionally prefixed with - when to execute it (after_move if - unspecified), separated by a ":". Supported - values of "WHEN" are the same as that of - --use-postprocessor. Same syntax as the - output template can be used to pass any - field as arguments to the command. After - download, an additional field "filepath" - that contains the final path of the - downloaded file is also available, and if - no fields are passed, %(filepath)q is - appended to the end of the command. This - option can be used multiple times - --no-exec Remove any previously defined --exec - --convert-subs FORMAT Convert the subtitles to another format - (currently supported: srt|vtt|ass|lrc) - (Alias: --convert-subtitles) - --convert-thumbnails FORMAT Convert the thumbnails to another format - (currently supported: jpg|png) - --split-chapters Split video into multiple files based on - internal chapters. The "chapter:" prefix - can be used with "--paths" and "--output" - to set the output filename for the split - files. See "OUTPUT TEMPLATE" for details - --no-split-chapters Do not split video based on chapters - (default) - --remove-chapters REGEX Remove chapters whose title matches the - given regular expression. Time ranges - prefixed by a "*" can also be used in place - of chapters to remove the specified range. - Eg: --remove-chapters "*10:15-15:00" - --remove-chapters "intro". This option can - be used multiple times - --no-remove-chapters Do not remove any chapters from the file - (default) - --force-keyframes-at-cuts Force keyframes around the chapters before - removing/splitting them. Requires a re- - encode and thus is very slow, but the - resulting video may have fewer artifacts - around the cuts - --no-force-keyframes-at-cuts Do not force keyframes around the chapters - when cutting/splitting (default) - --use-postprocessor NAME[:ARGS] The (case sensitive) name of plugin - postprocessors to be enabled, and - (optionally) arguments to be passed to it, - separated by a colon ":". ARGS are a - semicolon ";" delimited list of NAME=VALUE. - The "when" argument determines when the - postprocessor is invoked. It can be one of - "pre_process" (after extraction), - "before_dl" (before video download), - "post_process" (after video download; - default), "after_move" (after moving file - to their final locations), "after_video" - (after downloading and processing all - formats of a video), or "playlist" (end of - playlist). This option can be used multiple - times to add different postprocessors - -## SponsorBlock Options: - Make chapter entries for, or remove various segments (sponsor, - introductions, etc.) from downloaded YouTube videos using the - SponsorBlock API (https://sponsor.ajay.app) - - --sponsorblock-mark CATS SponsorBlock categories to create chapters - for, separated by commas. Available - categories are all, default(=all), sponsor, - intro, outro, selfpromo, preview, filler, - interaction, music_offtopic, poi_highlight. - You can prefix the category with a "-" to - exempt it. See [1] for description of the - categories. Eg: --sponsorblock-mark - all,-preview [1] https://wiki.sponsor.ajay. - app/w/Segment_Categories - --sponsorblock-remove CATS SponsorBlock categories to be removed from - the video file, separated by commas. If a - category is present in both mark and - remove, remove takes precedence. The syntax - and available categories are the same as - for --sponsorblock-mark except that - "default" refers to "all,-filler" and - poi_highlight is not available - --sponsorblock-chapter-title TEMPLATE - The title template for SponsorBlock - chapters created by --sponsorblock-mark. - The same syntax as the output template is - used, but the only available fields are - start_time, end_time, category, categories, - name, category_names. Defaults to - "[SponsorBlock]: %(category_names)l" - --no-sponsorblock Disable both --sponsorblock-mark and - --sponsorblock-remove - --sponsorblock-api URL SponsorBlock API location, defaults to - https://sponsor.ajay.app - -## Extractor Options: - --extractor-retries RETRIES Number of retries for known extractor - errors (default is 3), or "infinite" - --allow-dynamic-mpd Process dynamic DASH manifests (default) - (Alias: --no-ignore-dynamic-mpd) - --ignore-dynamic-mpd Do not process dynamic DASH manifests - (Alias: --no-allow-dynamic-mpd) - --hls-split-discontinuity Split HLS playlists to different formats at - discontinuities such as ad breaks - --no-hls-split-discontinuity Do not split HLS playlists to different - formats at discontinuities such as ad - breaks (default) - --extractor-args KEY:ARGS Pass these arguments to the extractor. See - "EXTRACTOR ARGUMENTS" for details. You can - use this option multiple times to give - arguments for different extractors - -# CONFIGURATION - -You can configure yt-dlp by placing any supported command line option to a configuration file. The configuration is loaded from the following locations: - -1. **Main Configuration**: The file given by `--config-location` -1. **Portable Configuration**: `yt-dlp.conf` in the same directory as the bundled binary. If you are running from source-code (`/yt_dlp/__main__.py`), the root directory is used instead. -1. **Home Configuration**: `yt-dlp.conf` in the home path given by `-P`, or in the current directory if no such path is given -1. **User Configuration**: - * `%XDG_CONFIG_HOME%/yt-dlp/config` (recommended on Linux/macOS) - * `%XDG_CONFIG_HOME%/yt-dlp.conf` - * `%APPDATA%/yt-dlp/config` (recommended on Windows) - * `%APPDATA%/yt-dlp/config.txt` - * `~/yt-dlp.conf` - * `~/yt-dlp.conf.txt` - - `%XDG_CONFIG_HOME%` defaults to `~/.config` if undefined. On windows, `%APPDATA%` generally points to `C:\Users\\AppData\Roaming` and `~` points to `%HOME%` if present, `%USERPROFILE%` (generally `C:\Users\`), or `%HOMEDRIVE%%HOMEPATH%` -1. **System Configuration**: `/etc/yt-dlp.conf` - -For example, with the following configuration file yt-dlp will always extract the audio, not copy the mtime, use a proxy and save all videos under `YouTube` directory in your home directory: -``` -# Lines starting with # are comments - -# Always extract audio --x - -# Do not copy the mtime ---no-mtime - -# Use this proxy ---proxy 127.0.0.1:3128 - -# Save all videos under YouTube directory in your home directory --o ~/YouTube/%(title)s.%(ext)s -``` - -Note that options in configuration file are just the same options aka switches used in regular command line calls; thus there **must be no whitespace** after `-` or `--`, e.g. `-o` or `--proxy` but not `- o` or `-- proxy`. - -You can use `--ignore-config` if you want to disable all configuration files for a particular yt-dlp run. If `--ignore-config` is found inside any configuration file, no further configuration will be loaded. For example, having the option in the portable configuration file prevents loading of home, user, and system configurations. Additionally, (for backward compatibility) if `--ignore-config` is found inside the system configuration file, the user configuration is not loaded. - -### Authentication with `.netrc` file - -You may also want to configure automatic credentials storage for extractors that support authentication (by providing login and password with `--username` and `--password`) in order not to pass credentials as command line arguments on every yt-dlp execution and prevent tracking plain text passwords in the shell command history. You can achieve this using a [`.netrc` file](https://stackoverflow.com/tags/.netrc/info) on a per extractor basis. For that you will need to create a `.netrc` file in `--netrc-location` and restrict permissions to read/write by only you: -``` -touch $HOME/.netrc -chmod a-rwx,u+rw $HOME/.netrc -``` -After that you can add credentials for an extractor in the following format, where *extractor* is the name of the extractor in lowercase: -``` -machine login password -``` -For example: -``` -machine youtube login myaccount@gmail.com password my_youtube_password -machine twitch login my_twitch_account_name password my_twitch_password -``` -To activate authentication with the `.netrc` file you should pass `--netrc` to yt-dlp or place it in the [configuration file](#configuration). - -The default location of the .netrc file is `$HOME` (`~`) in UNIX. On Windows, it is `%HOME%` if present, `%USERPROFILE%` (generally `C:\Users\`) or `%HOMEDRIVE%%HOMEPATH%` - -# OUTPUT TEMPLATE - -The `-o` option is used to indicate a template for the output file names while `-P` option is used to specify the path each type of file should be saved to. - - -**tl;dr:** [navigate me to examples](#output-template-examples). - - -The simplest usage of `-o` is not to set any template arguments when downloading a single file, like in `yt-dlp -o funny_video.flv "https://some/video"` (hard-coding file extension like this is _not_ recommended and could break some post-processing). - -It may however also contain special sequences that will be replaced when downloading each video. The special sequences may be formatted according to [Python string formatting operations](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting). For example, `%(NAME)s` or `%(NAME)05d`. To clarify, that is a percent symbol followed by a name in parentheses, followed by formatting operations. - -The field names themselves (the part inside the parenthesis) can also have some special formatting: -1. **Object traversal**: The dictionaries and lists available in metadata can be traversed by using a `.` (dot) separator. You can also do python slicing using `:`. Eg: `%(tags.0)s`, `%(subtitles.en.-1.ext)s`, `%(id.3:7:-1)s`, `%(formats.:.format_id)s`. `%()s` refers to the entire infodict. Note that all the fields that become available using this method are not listed below. Use `-j` to see such fields - -1. **Addition**: Addition and subtraction of numeric fields can be done using `+` and `-` respectively. Eg: `%(playlist_index+10)03d`, `%(n_entries+1-playlist_index)d` - -1. **Date/time Formatting**: Date/time fields can be formatted according to [strftime formatting](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes) by specifying it separated from the field name using a `>`. Eg: `%(duration>%H-%M-%S)s`, `%(upload_date>%Y-%m-%d)s`, `%(epoch-3600>%H-%M-%S)s` - -1. **Alternatives**: Alternate fields can be specified separated with a `,`. Eg: `%(release_date>%Y,upload_date>%Y|Unknown)s` - -1. **Replacement**: A replacement value can specified using a `&` separator. If the field is *not* empty, this replacement value will be used instead of the actual field content. This is done after alternate fields are considered; thus the replacement is used if *any* of the alternative fields is *not* empty. - -1. **Default**: A literal default value can be specified for when the field is empty using a `|` separator. This overrides `--output-na-template`. Eg: `%(uploader|Unknown)s` - -1. **More Conversions**: In addition to the normal format types `diouxXeEfFgGcrs`, `B`, `j`, `l`, `q`, `D`, `S` can be used for converting to **B**ytes, **j**son (flag `#` for pretty-printing), a comma separated **l**ist (flag `#` for `\n` newline-separated), a string **q**uoted for the terminal (flag `#` to split a list into different arguments), to add **D**ecimal suffixes (Eg: 10M) (flag `#` to use 1024 as factor), and to **S**anitize as filename (flag `#` for restricted), respectively - -1. **Unicode normalization**: The format type `U` can be used for NFC [unicode normalization](https://docs.python.org/3/library/unicodedata.html#unicodedata.normalize). The alternate form flag (`#`) changes the normalization to NFD and the conversion flag `+` can be used for NFKC/NFKD compatibility equivalence normalization. Eg: `%(title)+.100U` is NFKC - -To summarize, the general syntax for a field is: -``` -%(name[.keys][addition][>strf][,alternate][&replacement][|default])[flags][width][.precision][length]type -``` - -Additionally, you can set different output templates for the various metadata files separately from the general output template by specifying the type of file followed by the template separated by a colon `:`. The different file types supported are `subtitle`, `thumbnail`, `description`, `annotation` (deprecated), `infojson`, `link`, `pl_thumbnail`, `pl_description`, `pl_infojson`, `chapter`, `pl_video`. For example, `-o "%(title)s.%(ext)s" -o "thumbnail:%(title)s\%(title)s.%(ext)s"` will put the thumbnails in a folder with the same name as the video. If any of the templates is empty, that type of file will not be written. Eg: `--write-thumbnail -o "thumbnail:"` will write thumbnails only for playlists and not for video. - -The available fields are: - - - `id` (string): Video identifier - - `title` (string): Video title - - `fulltitle` (string): Video title ignoring live timestamp and generic title - - `url` (string): Video URL - - `ext` (string): Video filename extension - - `alt_title` (string): A secondary title of the video - - `description` (string): The description of the video - - `display_id` (string): An alternative identifier for the video - - `uploader` (string): Full name of the video uploader - - `license` (string): License name the video is licensed under - - `creator` (string): The creator of the video - - `timestamp` (numeric): UNIX timestamp of the moment the video became available - - `upload_date` (string): Video upload date (YYYYMMDD) - - `release_timestamp` (numeric): UNIX timestamp of the moment the video was released - - `release_date` (string): The date (YYYYMMDD) when the video was released - - `modified_timestamp` (numeric): UNIX timestamp of the moment the video was last modified - - `modified_date` (string): The date (YYYYMMDD) when the video was last modified - - `uploader_id` (string): Nickname or id of the video uploader - - `channel` (string): Full name of the channel the video is uploaded on - - `channel_id` (string): Id of the channel - - `channel_follower_count` (numeric): Number of followers of the channel - - `location` (string): Physical location where the video was filmed - - `duration` (numeric): Length of the video in seconds - - `duration_string` (string): Length of the video (HH:mm:ss) - - `view_count` (numeric): How many users have watched the video on the platform - - `like_count` (numeric): Number of positive ratings of the video - - `dislike_count` (numeric): Number of negative ratings of the video - - `repost_count` (numeric): Number of reposts of the video - - `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage - - `comment_count` (numeric): Number of comments on the video (For some extractors, comments are only downloaded at the end, and so this field cannot be used) - - `age_limit` (numeric): Age restriction for the video (years) - - `live_status` (string): One of "is_live", "was_live", "is_upcoming", "not_live" - - `is_live` (boolean): Whether this video is a live stream or a fixed-length video - - `was_live` (boolean): Whether this video was originally a live stream - - `playable_in_embed` (string): Whether this video is allowed to play in embedded players on other sites - - `availability` (string): Whether the video is "private", "premium_only", "subscriber_only", "needs_auth", "unlisted" or "public" - - `start_time` (numeric): Time in seconds where the reproduction should start, as specified in the URL - - `end_time` (numeric): Time in seconds where the reproduction should end, as specified in the URL - - `format` (string): A human-readable description of the format - - `format_id` (string): Format code specified by `--format` - - `format_note` (string): Additional info about the format - - `width` (numeric): Width of the video - - `height` (numeric): Height of the video - - `resolution` (string): Textual description of width and height - - `tbr` (numeric): Average bitrate of audio and video in KBit/s - - `abr` (numeric): Average audio bitrate in KBit/s - - `acodec` (string): Name of the audio codec in use - - `asr` (numeric): Audio sampling rate in Hertz - - `vbr` (numeric): Average video bitrate in KBit/s - - `fps` (numeric): Frame rate - - `dynamic_range` (string): The dynamic range of the video - - `vcodec` (string): Name of the video codec in use - - `container` (string): Name of the container format - - `filesize` (numeric): The number of bytes, if known in advance - - `filesize_approx` (numeric): An estimate for the number of bytes - - `protocol` (string): The protocol that will be used for the actual download - - `extractor` (string): Name of the extractor - - `extractor_key` (string): Key name of the extractor - - `epoch` (numeric): Unix epoch of when the information extraction was completed - - `autonumber` (numeric): Number that will be increased with each download, starting at `--autonumber-start` - - `video_autonumber` (numeric): Number that will be increased with each video - - `n_entries` (numeric): Total number of extracted items in the playlist - - `playlist_id` (string): Identifier of the playlist that contains the video - - `playlist_title` (string): Name of the playlist that contains the video - - `playlist` (string): `playlist_id` or `playlist_title` - - `playlist_count` (numeric): Total number of items in the playlist. May not be known if entire playlist is not extracted - - `playlist_index` (numeric): Index of the video in the playlist padded with leading zeros according the final index - - `playlist_autonumber` (numeric): Position of the video in the playlist download queue padded with leading zeros according to the total length of the playlist - - `playlist_uploader` (string): Full name of the playlist uploader - - `playlist_uploader_id` (string): Nickname or id of the playlist uploader - - `webpage_url` (string): A URL to the video webpage which if given to yt-dlp should allow to get the same result again - - `webpage_url_basename` (string): The basename of the webpage URL - - `webpage_url_domain` (string): The domain of the webpage URL - - `original_url` (string): The URL given by the user (or same as `webpage_url` for playlist entries) - -Available for the video that belongs to some logical chapter or section: - - - `chapter` (string): Name or title of the chapter the video belongs to - - `chapter_number` (numeric): Number of the chapter the video belongs to - - `chapter_id` (string): Id of the chapter the video belongs to - -Available for the video that is an episode of some series or programme: - - - `series` (string): Title of the series or programme the video episode belongs to - - `season` (string): Title of the season the video episode belongs to - - `season_number` (numeric): Number of the season the video episode belongs to - - `season_id` (string): Id of the season the video episode belongs to - - `episode` (string): Title of the video episode - - `episode_number` (numeric): Number of the video episode within a season - - `episode_id` (string): Id of the video episode - -Available for the media that is a track or a part of a music album: - - - `track` (string): Title of the track - - `track_number` (numeric): Number of the track within an album or a disc - - `track_id` (string): Id of the track - - `artist` (string): Artist(s) of the track - - `genre` (string): Genre(s) of the track - - `album` (string): Title of the album the track belongs to - - `album_type` (string): Type of the album - - `album_artist` (string): List of all artists appeared on the album - - `disc_number` (numeric): Number of the disc or other physical medium the track belongs to - - `release_year` (numeric): Year (YYYY) when the album was released - -Available for `chapter:` prefix when using `--split-chapters` for videos with internal chapters: - - - `section_title` (string): Title of the chapter - - `section_number` (numeric): Number of the chapter within the file - - `section_start` (numeric): Start time of the chapter in seconds - - `section_end` (numeric): End time of the chapter in seconds - -Available only when used in `--print`: - - - `urls` (string): The URLs of all requested formats, one in each line - - `filename` (string): Name of the video file. Note that the actual filename may be different due to post-processing. Use `--exec echo` to get the name after all postprocessing is complete - - `formats_table` (table): The video format table as printed by `--list-formats` - - `thumbnails_table` (table): The thumbnail format table as printed by `--list-thumbnails` - - `subtitles_table` (table): The subtitle format table as printed by `--list-subs` - - `automatic_captions_table` (table): The automatic subtitle format table as printed by `--list-subs` - - -Available only in `--sponsorblock-chapter-title`: - - - `start_time` (numeric): Start time of the chapter in seconds - - `end_time` (numeric): End time of the chapter in seconds - - `categories` (list): The SponsorBlock categories the chapter belongs to - - `category` (string): The smallest SponsorBlock category the chapter belongs to - - `category_names` (list): Friendly names of the categories - - `name` (string): Friendly name of the smallest category - -Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. For example for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `yt-dlp test video` and id `BaW_jenozKc`, this will result in a `yt-dlp test video-BaW_jenozKc.mp4` file created in the current directory. - -Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with placeholder value provided with `--output-na-placeholder` (`NA` by default). - -**Tip**: Look at the `-j` output to identify which fields are available for the particular URL - -For numeric sequences you can use [numeric related formatting](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting), for example, `%(view_count)05d` will result in a string with view count padded with zeros up to 5 characters, like in `00042`. - -Output templates can also contain arbitrary hierarchical path, e.g. `-o "%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s"` which will result in downloading each video in a directory corresponding to this path template. Any missing directory will be automatically created for you. - -To use percent literals in an output template use `%%`. To output to stdout use `-o -`. - -The current default template is `%(title)s [%(id)s].%(ext)s`. - -In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title. - - -#### Output template and Windows batch files - -If you are using an output template inside a Windows batch file then you must escape plain percent characters (`%`) by doubling, so that `-o "%(title)s-%(id)s.%(ext)s"` should become `-o "%%(title)s-%%(id)s.%%(ext)s"`. However you should not touch `%`'s that are not plain characters, e.g. environment variables for expansion should stay intact: `-o "C:\%HOMEPATH%\Desktop\%%(title)s.%%(ext)s"`. - - -#### Output template examples - -```bash -$ yt-dlp --get-filename -o "test video.%(ext)s" BaW_jenozKc -test video.webm # Literal name with correct extension - -$ yt-dlp --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc -youtube-dl test video ''_ä↭𝕐.webm # All kinds of weird characters - -$ yt-dlp --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames -youtube-dl_test_video_.webm # Restricted file name - -# Download YouTube playlist videos in separate directory indexed by video order in a playlist -$ yt-dlp -o "%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s" "https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re" - -# Download YouTube playlist videos in separate directories according to their uploaded year -$ yt-dlp -o "%(upload_date>%Y)s/%(title)s.%(ext)s" "https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re" - -# Prefix playlist index with " - " separator, but only if it is available -$ yt-dlp -o '%(playlist_index|)s%(playlist_index& - |)s%(title)s.%(ext)s' BaW_jenozKc "https://www.youtube.com/user/TheLinuxFoundation/playlists" - -# Download all playlists of YouTube channel/user keeping each playlist in separate directory: -$ yt-dlp -o "%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s" "https://www.youtube.com/user/TheLinuxFoundation/playlists" - -# Download Udemy course keeping each chapter in separate directory under MyVideos directory in your home -$ yt-dlp -u user -p password -P "~/MyVideos" -o "%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s" "https://www.udemy.com/java-tutorial" - -# Download entire series season keeping each series and each season in separate directory under C:/MyVideos -$ yt-dlp -P "C:/MyVideos" -o "%(series)s/%(season_number)s - %(season)s/%(episode_number)s - %(episode)s.%(ext)s" "https://videomore.ru/kino_v_detalayah/5_sezon/367617" - -# Download video as "C:\MyVideos\uploader\title.ext", subtitles as "C:\MyVideos\subs\uploader\title.ext" -# and put all temporary files in "C:\MyVideos\tmp" -$ yt-dlp -P "C:/MyVideos" -P "temp:tmp" -P "subtitle:subs" -o "%(uploader)s/%(title)s.%(ext)s" BaW_jenoz --write-subs - -# Download video as "C:\MyVideos\uploader\title.ext" and subtitles as "C:\MyVideos\uploader\subs\title.ext" -$ yt-dlp -P "C:/MyVideos" -o "%(uploader)s/%(title)s.%(ext)s" -o "subtitle:%(uploader)s/subs/%(title)s.%(ext)s" BaW_jenozKc --write-subs - -# Stream the video being downloaded to stdout -$ yt-dlp -o - BaW_jenozKc -``` - -# FORMAT SELECTION - -By default, yt-dlp tries to download the best available quality if you **don't** pass any options. -This is generally equivalent to using `-f bestvideo*+bestaudio/best`. However, if multiple audiostreams is enabled (`--audio-multistreams`), the default format changes to `-f bestvideo+bestaudio/best`. Similarly, if ffmpeg is unavailable, or if you use yt-dlp to stream to `stdout` (`-o -`), the default becomes `-f best/bestvideo+bestaudio`. - -**Deprecation warning**: Latest versions of yt-dlp can stream multiple formats to the stdout simultaneously using ffmpeg. So, in future versions, the default for this will be set to `-f bv*+ba/b` similar to normal downloads. If you want to preserve the `-f b/bv+ba` setting, it is recommended to explicitly specify it in the configuration options. - -The general syntax for format selection is `-f FORMAT` (or `--format FORMAT`) where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download. - - -**tl;dr:** [navigate me to examples](#format-selection-examples). - - -The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific. - -You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download the best quality format of a particular file extension served as a single file, e.g. `-f webm` will download the best quality format with the `webm` extension served as a single file. - -You can use `-f -` to interactively provide the format selector *for each video* - -You can also use special names to select particular edge case formats: - - - `all`: Select **all formats** separately - - `mergeall`: Select and **merge all formats** (Must be used with `--audio-multistreams`, `--video-multistreams` or both) - - `b*`, `best*`: Select the best quality format that **contains either** a video or an audio - - `b`, `best`: Select the best quality format that **contains both** video and audio. Equivalent to `best*[vcodec!=none][acodec!=none]` - - `bv`, `bestvideo`: Select the best quality **video-only** format. Equivalent to `best*[acodec=none]` - - `bv*`, `bestvideo*`: Select the best quality format that **contains video**. It may also contain audio. Equivalent to `best*[vcodec!=none]` - - `ba`, `bestaudio`: Select the best quality **audio-only** format. Equivalent to `best*[vcodec=none]` - - `ba*`, `bestaudio*`: Select the best quality format that **contains audio**. It may also contain video. Equivalent to `best*[acodec!=none]` - - `w*`, `worst*`: Select the worst quality format that contains either a video or an audio - - `w`, `worst`: Select the worst quality format that contains both video and audio. Equivalent to `worst*[vcodec!=none][acodec!=none]` - - `wv`, `worstvideo`: Select the worst quality video-only format. Equivalent to `worst*[acodec=none]` - - `wv*`, `worstvideo*`: Select the worst quality format that contains video. It may also contain audio. Equivalent to `worst*[vcodec!=none]` - - `wa`, `worstaudio`: Select the worst quality audio-only format. Equivalent to `worst*[vcodec=none]` - - `wa*`, `worstaudio*`: Select the worst quality format that contains audio. It may also contain video. Equivalent to `worst*[acodec!=none]` - -For example, to download the worst quality video-only format you can use `-f worstvideo`. It is however recommended not to use `worst` and related options. When your format selector is `worst`, the format which is worst in all respects is selected. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps` instead of `-f worst`. See [sorting formats](#sorting-formats) for more details. - -You can select the n'th best format of a type by using `best.`. For example, `best.2` will select the 2nd best combined format. Similarly, `bv*.3` will select the 3rd best format that contains a video stream. - -If you want to download multiple videos and they don't have the same formats available, you can specify the order of preference using slashes. Note that formats on the left hand side are preferred, for example `-f 22/17/18` will download format 22 if it's available, otherwise it will download format 17 if it's available, otherwise it will download format 18 if it's available, otherwise it will complain that no suitable formats are available for download. - -If you want to download several formats of the same video use a comma as a separator, e.g. `-f 22,17,18` will download all these three formats, of course if they are available. Or a more sophisticated example combined with the precedence feature: `-f 136/137/mp4/bestvideo,140/m4a/bestaudio`. - -You can merge the video and audio of multiple formats into a single file using `-f ++...` (requires ffmpeg installed), for example `-f bestvideo+bestaudio` will download the best video-only format, the best audio-only format and mux them together with ffmpeg. - -**Deprecation warning**: Since the *below* described behavior is complex and counter-intuitive, this will be removed and multistreams will be enabled by default in the future. A new operator will be instead added to limit formats to single audio/video - -Unless `--video-multistreams` is used, all formats with a video stream except the first one are ignored. Similarly, unless `--audio-multistreams` is used, all formats with an audio stream except the first one are ignored. For example, `-f bestvideo+best+bestaudio --video-multistreams --audio-multistreams` will download and merge all 3 given formats. The resulting file will have 2 video streams and 2 audio streams. But `-f bestvideo+best+bestaudio --no-video-multistreams` will download and merge only `bestvideo` and `bestaudio`. `best` is ignored since another format containing a video stream (`bestvideo`) has already been selected. The order of the formats is therefore important. `-f best+bestaudio --no-audio-multistreams` will download and merge both formats while `-f bestaudio+best --no-audio-multistreams` will ignore `best` and download only `bestaudio`. - -## Filtering Formats - -You can also filter the video formats by putting a condition in brackets, as in `-f "best[height=720]"` (or `-f "[filesize>10M]"`). - -The following numeric meta fields can be used with comparisons `<`, `<=`, `>`, `>=`, `=` (equals), `!=` (not equals): - - - `filesize`: The number of bytes, if known in advance - - `width`: Width of the video, if known - - `height`: Height of the video, if known - - `tbr`: Average bitrate of audio and video in KBit/s - - `abr`: Average audio bitrate in KBit/s - - `vbr`: Average video bitrate in KBit/s - - `asr`: Audio sampling rate in Hertz - - `fps`: Frame rate - -Also filtering work for comparisons `=` (equals), `^=` (starts with), `$=` (ends with), `*=` (contains) and following string meta fields: - - - `ext`: File extension - - `acodec`: Name of the audio codec in use - - `vcodec`: Name of the video codec in use - - `container`: Name of the container format - - `protocol`: The protocol that will be used for the actual download, lower-case (`http`, `https`, `rtsp`, `rtmp`, `rtmpe`, `mms`, `f4m`, `ism`, `http_dash_segments`, `m3u8`, or `m3u8_native`) - - `format_id`: A short description of the format - - `language`: Language code - -Any string comparison may be prefixed with negation `!` in order to produce an opposite comparison, e.g. `!*=` (does not contain). - -Note that none of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the website. Any other field made available by the extractor can also be used for filtering. - -Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. You can also use the filters with `all` to download all formats that satisfy the filter. For example, `-f "all[vcodec=none]"` selects all audio-only formats. - -Format selectors can also be grouped using parentheses, for example if you want to download the best pre-merged mp4 and webm formats with a height lower than 480 you can use `-f "(mp4,webm)[height<480]"`. - -## Sorting Formats - -You can change the criteria for being considered the `best` by using `-S` (`--format-sort`). The general format for this is `--format-sort field1,field2...`. - -The available fields are: - - - `hasvid`: Gives priority to formats that has a video stream - - `hasaud`: Gives priority to formats that has a audio stream - - `ie_pref`: The format preference - - `lang`: The language preference - - `quality`: The quality of the format - - `source`: The preference of the source - - `proto`: Protocol used for download (`https`/`ftps` > `http`/`ftp` > `m3u8_native`/`m3u8` > `http_dash_segments`> `websocket_frag` > `mms`/`rtsp` > `f4f`/`f4m`) - - `vcodec`: Video Codec (`av01` > `vp9.2` > `vp9` > `h265` > `h264` > `vp8` > `h263` > `theora` > other) - - `acodec`: Audio Codec (`flac`/`alac` > `wav`/`aiff` > `opus` > `vorbis` > `aac` > `mp4a` > `mp3` > `eac3` > `ac3` > `dts` > other) - - `codec`: Equivalent to `vcodec,acodec` - - `vext`: Video Extension (`mp4` > `webm` > `flv` > other). If `--prefer-free-formats` is used, `webm` is preferred. - - `aext`: Audio Extension (`m4a` > `aac` > `mp3` > `ogg` > `opus` > `webm` > other). If `--prefer-free-formats` is used, the order changes to `opus` > `ogg` > `webm` > `m4a` > `mp3` > `aac`. - - `ext`: Equivalent to `vext,aext` - - `filesize`: Exact filesize, if known in advance - - `fs_approx`: Approximate filesize calculated from the manifests - - `size`: Exact filesize if available, otherwise approximate filesize - - `height`: Height of video - - `width`: Width of video - - `res`: Video resolution, calculated as the smallest dimension. - - `fps`: Framerate of video - - `hdr`: The dynamic range of the video (`DV` > `HDR12` > `HDR10+` > `HDR10` > `HLG` > `SDR`) - - `tbr`: Total average bitrate in KBit/s - - `vbr`: Average video bitrate in KBit/s - - `abr`: Average audio bitrate in KBit/s - - `br`: Equivalent to using `tbr,vbr,abr` - - `asr`: Audio sample rate in Hz - -**Deprecation warning**: Many of these fields have (currently undocumented) aliases, that may be removed in a future version. It is recommended to use only the documented field names. - -All fields, unless specified otherwise, are sorted in descending order. To reverse this, prefix the field with a `+`. Eg: `+res` prefers format with the smallest resolution. Additionally, you can suffix a preferred value for the fields, separated by a `:`. Eg: `res:720` prefers larger videos, but no larger than 720p and the smallest video if there are no videos less than 720p. For `codec` and `ext`, you can provide two preferred values, the first for video and the second for audio. Eg: `+codec:avc:m4a` (equivalent to `+vcodec:avc,+acodec:m4a`) sets the video codec preference to `h264` > `h265` > `vp9` > `vp9.2` > `av01` > `vp8` > `h263` > `theora` and audio codec preference to `mp4a` > `aac` > `vorbis` > `opus` > `mp3` > `ac3` > `dts`. You can also make the sorting prefer the nearest values to the provided by using `~` as the delimiter. Eg: `filesize~1G` prefers the format with filesize closest to 1 GiB. - -The fields `hasvid` and `ie_pref` are always given highest priority in sorting, irrespective of the user-defined order. This behaviour can be changed by using `--format-sort-force`. Apart from these, the default order used is: `lang,quality,res,fps,hdr:12,codec:vp9.2,size,br,asr,proto,ext,hasaud,source,id`. The extractors may override this default order, but they cannot override the user-provided order. - -Note that the default has `codec:vp9.2`; i.e. `av1` is not preferred. Similarly, the default for hdr is `hdr:12`; i.e. dolby vision is not preferred. These choices are made since DV and AV1 formats are not yet fully compatible with most devices. This may be changed in the future as more devices become capable of smoothly playing back these formats. - -If your format selector is `worst`, the last item is selected after sorting. This means it will select the format that is worst in all respects. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-f best -S +size,+br,+res,+fps`. - -**Tip**: You can use the `-v -F` to see how the formats have been sorted (worst to best). - -## Format Selection examples - -```bash -# Download and merge the best video-only format and the best audio-only format, -# or download the best combined format if video-only format is not available -$ yt-dlp -f "bv+ba/b" - -# Download best format that contains video, -# and if it doesn't already have an audio stream, merge it with best audio-only format -$ yt-dlp -f "bv*+ba/b" - -# Same as above -$ yt-dlp - -# Download the best video-only format and the best audio-only format without merging them -# For this case, an output template should be used since -# by default, bestvideo and bestaudio will have the same file name. -$ yt-dlp -f "bv,ba" -o "%(title)s.f%(format_id)s.%(ext)s" - -# Download and merge the best format that has a video stream, -# and all audio-only formats into one file -$ yt-dlp -f "bv*+mergeall[vcodec=none]" --audio-multistreams - -# Download and merge the best format that has a video stream, -# and the best 2 audio-only formats into one file -$ yt-dlp -f "bv*+ba+ba.2" --audio-multistreams - - -# The following examples show the old method (without -S) of format selection -# and how to use -S to achieve a similar but (generally) better result - -# Download the worst video available (old method) -$ yt-dlp -f "wv*+wa/w" - -# Download the best video available but with the smallest resolution -$ yt-dlp -S "+res" - -# Download the smallest video available -$ yt-dlp -S "+size,+br" - - - -# Download the best mp4 video available, or the best video if no mp4 available -$ yt-dlp -f "bv*[ext=mp4]+ba[ext=m4a]/b[ext=mp4] / bv*+ba/b" - -# Download the best video with the best extension -# (For video, mp4 > webm > flv. For audio, m4a > aac > mp3 ...) -$ yt-dlp -S "ext" - - - -# Download the best video available but no better than 480p, -# or the worst video if there is no video under 480p -$ yt-dlp -f "bv*[height<=480]+ba/b[height<=480] / wv*+ba/w" - -# Download the best video available with the largest height but no better than 480p, -# or the best video with the smallest resolution if there is no video under 480p -$ yt-dlp -S "height:480" - -# Download the best video available with the largest resolution but no better than 480p, -# or the best video with the smallest resolution if there is no video under 480p -# Resolution is determined by using the smallest dimension. -# So this works correctly for vertical videos as well -$ yt-dlp -S "res:480" - - - -# Download the best video (that also has audio) but no bigger than 50 MB, -# or the worst video (that also has audio) if there is no video under 50 MB -$ yt-dlp -f "b[filesize<50M] / w" - -# Download largest video (that also has audio) but no bigger than 50 MB, -# or the smallest video (that also has audio) if there is no video under 50 MB -$ yt-dlp -f "b" -S "filesize:50M" - -# Download best video (that also has audio) that is closest in size to 50 MB -$ yt-dlp -f "b" -S "filesize~50M" - - - -# Download best video available via direct link over HTTP/HTTPS protocol, -# or the best video available via any protocol if there is no such video -$ yt-dlp -f "(bv*+ba/b)[protocol^=http][protocol!*=dash] / (bv*+ba/b)" - -# Download best video available via the best protocol -# (https/ftps > http/ftp > m3u8_native > m3u8 > http_dash_segments ...) -$ yt-dlp -S "proto" - - - -# Download the best video with h264 codec, or the best video if there is no such video -$ yt-dlp -f "(bv*[vcodec^=avc1]+ba) / (bv*+ba/b)" - -# Download the best video with best codec no better than h264, -# or the best video with worst codec if there is no such video -$ yt-dlp -S "codec:h264" - -# Download the best video with worst codec no worse than h264, -# or the best video with best codec if there is no such video -$ yt-dlp -S "+codec:h264" - - - -# More complex examples - -# Download the best video no better than 720p preferring framerate greater than 30, -# or the worst video (still preferring framerate greater than 30) if there is no such video -$ yt-dlp -f "((bv*[fps>30]/bv*)[height<=720]/(wv*[fps>30]/wv*)) + ba / (b[fps>30]/b)[height<=720]/(w[fps>30]/w)" - -# Download the video with the largest resolution no better than 720p, -# or the video with the smallest resolution available if there is no such video, -# preferring larger framerate for formats with the same resolution -$ yt-dlp -S "res:720,fps" - - - -# Download the video with smallest resolution no worse than 480p, -# or the video with the largest resolution available if there is no such video, -# preferring better codec and then larger total bitrate for the same resolution -$ yt-dlp -S "+res:480,codec,br" -``` - -# MODIFYING METADATA - -The metadata obtained by the extractors can be modified by using `--parse-metadata` and `--replace-in-metadata` - -`--replace-in-metadata FIELDS REGEX REPLACE` is used to replace text in any metadata field using [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax). [Backreferences](https://docs.python.org/3/library/re.html?highlight=backreferences#re.sub) can be used in the replace string for advanced use. - -The general syntax of `--parse-metadata FROM:TO` is to give the name of a field or an [output template](#output-template) to extract data from, and the format to interpret it as, separated by a colon `:`. Either a [python regular expression](https://docs.python.org/3/library/re.html#regular-expression-syntax) with named capture groups or a similar syntax to the [output template](#output-template) (only `%(field)s` formatting is supported) can be used for `TO`. The option can be used multiple times to parse and modify various fields. - -Note that any field created by this can be used in the [output template](#output-template) and will also affect the media file's metadata added when using `--add-metadata`. - -This option also has a few special uses: -* You can download an additional URL based on the metadata of the currently downloaded video. To do this, set the field `additional_urls` to the URL that you want to download. Eg: `--parse-metadata "description:(?Phttps?://www\.vimeo\.com/\d+)` will download the first vimeo video found in the description -* You can use this to change the metadata that is embedded in the media file. To do this, set the value of the corresponding field with a `meta_` prefix. For example, any value you set to `meta_description` field will be added to the `description` field in the file. For example, you can use this to set a different "description" and "synopsis". To modify the metadata of individual streams, use the `meta_` prefix (Eg: `meta1_language`). Any value set to the `meta_` field will overwrite all default values. - -For reference, these are the fields yt-dlp adds by default to the file metadata: - -Metadata fields|From -:---|:--- -`title`|`track` or `title` -`date`|`upload_date` -`description`, `synopsis`|`description` -`purl`, `comment`|`webpage_url` -`track`|`track_number` -`artist`|`artist`, `creator`, `uploader` or `uploader_id` -`genre`|`genre` -`album`|`album` -`album_artist`|`album_artist` -`disc`|`disc_number` -`show`|`series` -`season_number`|`season_number` -`episode_id`|`episode` or `episode_id` -`episode_sort`|`episode_number` -`language` of each stream|From the format's `language` -**Note**: The file format may not support some of these fields - - -## Modifying metadata examples - -```bash -# Interpret the title as "Artist - Title" -$ yt-dlp --parse-metadata "title:%(artist)s - %(title)s" - -# Regex example -$ yt-dlp --parse-metadata "description:Artist - (?P.+)" - -# Set title as "Series name S01E05" -$ yt-dlp --parse-metadata "%(series)s S%(season_number)02dE%(episode_number)02d:%(title)s" - -# Set "comment" field in video metadata using description instead of webpage_url -$ yt-dlp --parse-metadata "description:(?s)(?P.+)" --add-metadata - -# Remove "formats" field from the infojson by setting it to an empty string -$ yt-dlp --parse-metadata ":(?P)" -j - -# Replace all spaces and "_" in title and uploader with a `-` -$ yt-dlp --replace-in-metadata "title,uploader" "[ _]" "-" - -``` - -# EXTRACTOR ARGUMENTS - -Some extractors accept additional arguments which can be passed using `--extractor-args KEY:ARGS`. `ARGS` is a `;` (semicolon) separated string of `ARG=VAL1,VAL2`. Eg: `--extractor-args "youtube:player-client=android_agegate,web;include_live_dash" --extractor-args "funimation:version=uncut"` - -The following extractors use this feature: - -#### youtube -* `skip`: `hls` or `dash` (or both) to skip download of the respective manifests -* `player_client`: Clients to extract video data from. The main clients are `web`, `android`, `ios`, `mweb`. These also have `_music`, `_embedded`, `_agegate`, and `_creator` variants (Eg: `web_embedded`) (`mweb` has only `_agegate`). By default, `android,web` is used, but the agegate and creator variants are added as required for age-gated videos. Similarly the music variants are added for `music.youtube.com` urls. You can also use `all` to use all the clients, and `default` for the default clients. -* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details -* `include_live_dash`: Include live dash formats even without `--live-from-start` (These formats don't download properly) -* `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side) -* `max_comments`: Limit the amount of comments to gather. Comma-separated list of integers representing `max-comments,max-parents,max-replies,max-replies-per-thread`. Default is `all,all,all,all`. - * E.g. `all,all,1000,10` will get a maximum of 1000 replies total, with up to 10 replies per thread. `1000,all,100` will get a maximum of 1000 comments, with a maximum of 100 replies total. -* `max_comment_depth` Maximum depth for nested comments. YouTube supports depths 1 or 2 (default) - * **Deprecated**: Set `max-replies` to `0` or `all` in `max_comments` instead (e.g. `max_comments=all,all,0` to get no replies) - -#### youtubetab (YouTube playlists, channels, feeds, etc.) -* `skip`: One or more of `webpage` (skip initial webpage download), `authcheck` (allow the download of playlists requiring authentication when no initial webpage is downloaded. This may cause unwanted behavior, see [#1122](https://github.com/yt-dlp/yt-dlp/pull/1122) for more details) - -#### funimation -* `language`: Languages to extract. Eg: `funimation:language=english,japanese` -* `version`: The video version to extract - `uncut` or `simulcast` - -#### crunchyroll -* `language`: Languages to extract. Eg: `crunchyroll:language=jaJp` -* `hardsub`: Which hard-sub versions to extract. Eg: `crunchyroll:hardsub=None,enUS` - -#### crunchyroll:beta -* `format`: Which stream type(s) to extract. Default is `adaptive_hls` Eg: `crunchyrollbeta:format=vo_adaptive_hls` - * Potentially useful values include `adaptive_hls`, `adaptive_dash`, `vo_adaptive_hls`, `vo_adaptive_dash`, `download_hls`, `trailer_hls`, `trailer_dash` -* `hardsub`: Preference order for which hardsub versions to extract. Default is `None` (no hardsubs). Eg: `crunchyrollbeta:hardsub=en-US,None` - -#### vikichannel -* `video_types`: Types of videos to download - one or more of `episodes`, `movies`, `clips`, `trailers` - -#### youtubewebarchive -* `check_all`: Try to check more at the cost of more requests. One or more of `thumbnails`, `captures` - -#### gamejolt -* `comment_sort`: `hot` (default), `you` (cookies needed), `top`, `new` - choose comment sorting mode (on GameJolt's side) - -#### hotstar -* `res`: resolution to ignore - one or more of `sd`, `hd`, `fhd` -* `vcodec`: vcodec to ignore - one or more of `h264`, `h265`, `dvh265` -* `dr`: dynamic range to ignore - one or more of `sdr`, `hdr10`, `dv` - -#### tiktok -* `app_version`: App version to call mobile APIs with - should be set along with `manifest_app_version`. (e.g. `20.2.1`) -* `manifest_app_version`: Numeric app version to call mobile APIs with. (e.g. `221`) - -NOTE: These options may be changed/removed in the future without concern for backward compatibility - - - - -# PLUGINS - -Plugins are loaded from `/ytdlp_plugins//__init__.py`; where `` is the directory of the binary (`/yt-dlp`), or the root directory of the module if you are running directly from source-code (`/yt_dlp/__main__.py`). Plugins are currently not supported for the `pip` version - -Plugins can be of ``s `extractor` or `postprocessor`. Extractor plugins do not need to be enabled from the CLI and are automatically invoked when the input URL is suitable for it. Postprocessor plugins can be invoked using `--use-postprocessor NAME`. - -See [ytdlp_plugins](ytdlp_plugins) for example plugins. - -Note that **all** plugins are imported even if not invoked, and that **there are no checks** performed on plugin code. Use plugins at your own risk and only if you trust the code - -If you are a plugin author, add [ytdlp-plugins](https://github.com/topics/ytdlp-plugins) as a topic to your repository for discoverability - - - -# EMBEDDING YT-DLP - -yt-dlp makes the best effort to be a good command-line program, and thus should be callable from any programming language. - -Your program should avoid parsing the normal stdout since they may change in future versions. Instead they should use options such as `-J`, `--print`, `--progress-template`, `--exec` etc to create console output that you can reliably reproduce and parse. - -From a Python program, you can embed yt-dlp in a more powerful fashion, like this: - -```python -from yt_dlp import YoutubeDL - -ydl_opts = {'format': 'bestaudio'} -with YoutubeDL(ydl_opts) as ydl: - ydl.download(['https://www.youtube.com/watch?v=BaW_jenozKc']) -``` - -Most likely, you'll want to use various options. For a list of options available, have a look at [`yt_dlp/YoutubeDL.py`](yt_dlp/YoutubeDL.py#L191). - -Here's a more complete example demonstrating various functionality: - -```python -import json -import yt_dlp - - -class MyLogger: - def debug(self, msg): - # For compatibility with youtube-dl, both debug and info are passed into debug - # You can distinguish them by the prefix '[debug] ' - if msg.startswith('[debug] '): - pass - else: - self.info(msg) - - def info(self, msg): - pass - - def warning(self, msg): - pass - - def error(self, msg): - print(msg) - - -# ℹ️ See the docstring of yt_dlp.postprocessor.common.PostProcessor -class MyCustomPP(yt_dlp.postprocessor.PostProcessor): - # ℹ️ See docstring of yt_dlp.postprocessor.common.PostProcessor.run - def run(self, info): - self.to_screen('Doing stuff') - return [], info - - -# ℹ️ See "progress_hooks" in the docstring of yt_dlp.YoutubeDL -def my_hook(d): - if d['status'] == 'finished': - print('Done downloading, now converting ...') - - -def format_selector(ctx): - """ Select the best video and the best audio that won't result in an mkv. - This is just an example and does not handle all cases """ - - # formats are already sorted worst to best - formats = ctx.get('formats')[::-1] - - # acodec='none' means there is no audio - best_video = next(f for f in formats - if f['vcodec'] != 'none' and f['acodec'] == 'none') - - # find compatible audio extension - audio_ext = {'mp4': 'm4a', 'webm': 'webm'}[best_video['ext']] - # vcodec='none' means there is no video - best_audio = next(f for f in formats if ( - f['acodec'] != 'none' and f['vcodec'] == 'none' and f['ext'] == audio_ext)) - - yield { - # These are the minimum required fields for a merged format - 'format_id': f'{best_video["format_id"]}+{best_audio["format_id"]}', - 'ext': best_video['ext'], - 'requested_formats': [best_video, best_audio], - # Must be + separated list of protocols - 'protocol': f'{best_video["protocol"]}+{best_audio["protocol"]}' - } - - -# ℹ️ See docstring of yt_dlp.YoutubeDL for a description of the options -ydl_opts = { - 'format': format_selector, - 'postprocessors': [{ - # Embed metadata in video using ffmpeg. - # ℹ️ See yt_dlp.postprocessor.FFmpegMetadataPP for the arguments it accepts - 'key': 'FFmpegMetadata', - 'add_chapters': True, - 'add_metadata': True, - }], - 'logger': MyLogger(), - 'progress_hooks': [my_hook], -} - - -# Add custom headers -yt_dlp.utils.std_headers.update({'Referer': 'https://www.google.com'}) - -# ℹ️ See the public functions in yt_dlp.YoutubeDL for for other available functions. -# Eg: "ydl.download", "ydl.download_with_info_file" -with yt_dlp.YoutubeDL(ydl_opts) as ydl: - ydl.add_post_processor(MyCustomPP()) - info = ydl.extract_info('https://www.youtube.com/watch?v=BaW_jenozKc') - - # ℹ️ ydl.sanitize_info makes the info json-serializable - print(json.dumps(ydl.sanitize_info(info))) -``` - -**Tip**: If you are porting your code from youtube-dl to yt-dlp, one important point to look out for is that we do not guarantee the return value of `YoutubeDL.extract_info` to be json serializable, or even be a dictionary. It will be dictionary-like, but if you want to ensure it is a serializable dictionary, pass it through `YoutubeDL.sanitize_info` as shown in the example above - - - - -# DEPRECATED OPTIONS - -These are all the deprecated options and the current alternative to achieve the same effect - -#### Almost redundant options -While these options are almost the same as their new counterparts, there are some differences that prevents them being redundant - - -j, --dump-json --print "%()j" - -F, --list-formats --print formats_table - --list-thumbnails --print thumbnails_table --print playlist:thumbnails_table - --list-subs --print automatic_captions_table --print subtitles_table - -#### Redundant options -While these options are redundant, they are still expected to be used due to their ease of use - - --get-description --print description - --get-duration --print duration_string - --get-filename --print filename - --get-format --print format - --get-id --print id - --get-thumbnail --print thumbnail - -e, --get-title --print title - -g, --get-url --print urls - --match-title REGEX --match-filter "title ~= (?i)REGEX" - --reject-title REGEX --match-filter "title !~= (?i)REGEX" - --min-views COUNT --match-filter "view_count >=? COUNT" - --max-views COUNT --match-filter "view_count <=? COUNT" - - -#### Not recommended -While these options still work, their use is not recommended since there are other alternatives to achieve the same - - --exec-before-download CMD --exec "before_dl:CMD" - --no-exec-before-download --no-exec - --all-formats -f all - --all-subs --sub-langs all --write-subs - --print-json -j --no-simulate - --autonumber-size NUMBER Use string formatting. Eg: %(autonumber)03d - --autonumber-start NUMBER Use internal field formatting like %(autonumber+NUMBER)s - --id -o "%(id)s.%(ext)s" - --metadata-from-title FORMAT --parse-metadata "%(title)s:FORMAT" - --hls-prefer-native --downloader "m3u8:native" - --hls-prefer-ffmpeg --downloader "m3u8:ffmpeg" - --list-formats-old --compat-options list-formats (Alias: --no-list-formats-as-table) - --list-formats-as-table --compat-options -list-formats [Default] (Alias: --no-list-formats-old) - --youtube-skip-dash-manifest --extractor-args "youtube:skip=dash" (Alias: --no-youtube-include-dash-manifest) - --youtube-skip-hls-manifest --extractor-args "youtube:skip=hls" (Alias: --no-youtube-include-hls-manifest) - --youtube-include-dash-manifest Default (Alias: --no-youtube-skip-dash-manifest) - --youtube-include-hls-manifest Default (Alias: --no-youtube-skip-hls-manifest) - - -#### Developer options -These options are not intended to be used by the end-user - - --test Download only part of video for testing extractors - --youtube-print-sig-code For testing youtube signatures - --allow-unplayable-formats List unplayable formats also - --no-allow-unplayable-formats Default - - -#### Old aliases -These are aliases that are no longer documented for various reasons - - --avconv-location --ffmpeg-location - --cn-verification-proxy URL --geo-verification-proxy URL - --dump-headers --print-traffic - --dump-intermediate-pages --dump-pages - --force-write-download-archive --force-write-archive - --load-info --load-info-json - --no-split-tracks --no-split-chapters - --no-write-srt --no-write-subs - --prefer-unsecure --prefer-insecure - --rate-limit RATE --limit-rate RATE - --split-tracks --split-chapters - --srt-lang LANGS --sub-langs LANGS - --trim-file-names LENGTH --trim-filenames LENGTH - --write-srt --write-subs - --yes-overwrites --force-overwrites - -#### Sponskrub Options -Support for [SponSkrub](https://github.com/faissaloo/SponSkrub) has been deprecated in favor of the `--sponsorblock` options - - --sponskrub --sponsorblock-mark all - --no-sponskrub --no-sponsorblock - --sponskrub-cut --sponsorblock-remove all - --no-sponskrub-cut --sponsorblock-remove -all - --sponskrub-force Not applicable - --no-sponskrub-force Not applicable - --sponskrub-location Not applicable - --sponskrub-args Not applicable - -#### No longer supported -These options may no longer work as intended - - --prefer-avconv avconv is not officially supported by yt-dlp (Alias: --no-prefer-ffmpeg) - --prefer-ffmpeg Default (Alias: --no-prefer-avconv) - -C, --call-home Not implemented - --no-call-home Default - --include-ads No longer supported - --no-include-ads Default - --write-annotations No supported site has annotations now - --no-write-annotations Default - -#### Removed -These options were deprecated since 2014 and have now been entirely removed - - -A, --auto-number -o "%(autonumber)s-%(id)s.%(ext)s" - -t, --title -o "%(title)s-%(id)s.%(ext)s" - -l, --literal -o accepts literal names - -# CONTRIBUTING -See [CONTRIBUTING.md](CONTRIBUTING.md#contributing-to-yt-dlp) for instructions on [Opening an Issue](CONTRIBUTING.md#opening-an-issue) and [Contributing code to the project](CONTRIBUTING.md#developer-instructions) - -# MORE -For FAQ see the [youtube-dl README](https://github.com/ytdl-org/youtube-dl#faq) - - diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/RECORD b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/RECORD deleted file mode 100644 index 887c956..0000000 --- a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/RECORD +++ /dev/null @@ -1,1939 +0,0 @@ -../../../bin/yt-dlp,sha256=SYTGI31VmAFW7J1J4Sxkg0fhpeR_rtKeudLYYQ7qwIg,253 -../../../share/bash-completion/completions/yt-dlp,sha256=4PqeC3e6N9m-2bz7u8WEUDTww0T5_1uieihQvgEGcCw,5786 -../../../share/doc/yt_dlp/README.txt,sha256=okCgHh8S5s0cyz4If2peWC8feh9afEAnWn_ZqwiRtKM,125275 -../../../share/fish/vendor_completions.d/yt-dlp.fish,sha256=4aZwMktq8Pcxk3tkqA01uIFgG3FgBuxxBJ-ClrYPhiw,45620 -../../../share/man/man1/yt-dlp.1,sha256=kiUdVkMal50AxlkrlGReZsDTodExRf3K0QnT0J4bZsU,120599 -../../../share/zsh/site-functions/_yt-dlp,sha256=0ts-BswcTtbV3GSaOLPceoufO2iz_p8k89Gnqm2h8jo,5892 -yt_dlp-2022.2.4.dist-info/AUTHORS,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -yt_dlp-2022.2.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -yt_dlp-2022.2.4.dist-info/LICENSE,sha256=fhLl30uuEsshWBuhV87SDhmGoFCN0Q0Oikq5pM-U6Fw,1211 -yt_dlp-2022.2.4.dist-info/METADATA,sha256=eiAzwCFl3q3RzmO1RO_sDaLDDj_uuO03xtplptDHnP0,134580 -yt_dlp-2022.2.4.dist-info/RECORD,, -yt_dlp-2022.2.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -yt_dlp-2022.2.4.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110 -yt_dlp-2022.2.4.dist-info/entry_points.txt,sha256=3waLDIrlirkbfXwyX_Og7UDVaS5-C_epiwQ35ypk6uA,40 -yt_dlp-2022.2.4.dist-info/top_level.txt,sha256=tOAf2ifpXPnc7BTwtlvcX1Bl-AATuzfiRgy2s3pgqzI,7 -yt_dlp/YoutubeDL.py,sha256=jLXvbaQlNPhzXqkDL2y8P62tRWD0784_Yp4hhm7dA80,181730 -yt_dlp/__init__.py,sha256=sHizyVWVlmYVVQznhKndCDjrmaoXK6xz2AMJ_fdzIek,39289 -yt_dlp/__main__.py,sha256=ENpO7OoHiWvkGEf_yEx9nEOLvMi1_LQx4NdW65VN77M,452 -yt_dlp/__pycache__/YoutubeDL.cpython-310.pyc,, -yt_dlp/__pycache__/__init__.cpython-310.pyc,, -yt_dlp/__pycache__/__main__.cpython-310.pyc,, -yt_dlp/__pycache__/aes.cpython-310.pyc,, -yt_dlp/__pycache__/cache.cpython-310.pyc,, -yt_dlp/__pycache__/compat.cpython-310.pyc,, -yt_dlp/__pycache__/cookies.cpython-310.pyc,, -yt_dlp/__pycache__/jsinterp.cpython-310.pyc,, -yt_dlp/__pycache__/minicurses.cpython-310.pyc,, -yt_dlp/__pycache__/options.cpython-310.pyc,, -yt_dlp/__pycache__/socks.cpython-310.pyc,, -yt_dlp/__pycache__/update.cpython-310.pyc,, -yt_dlp/__pycache__/utils.cpython-310.pyc,, -yt_dlp/__pycache__/version.cpython-310.pyc,, -yt_dlp/__pycache__/webvtt.cpython-310.pyc,, -yt_dlp/aes.py,sha256=aAr6JKDcDyzQ9yKBUn6ZgfQgIidXzcnd2eqXyv1Qt-k,21157 -yt_dlp/cache.py,sha256=O10MJs0xR1HSkYAz5v1aY78FOjxJYawTCDxhHQTcKDM,3128 -yt_dlp/compat.py,sha256=KHZGwCOf0PKGuhgjZuxYNiIF9WPRonG50NSi2JP9BpQ,8840 -yt_dlp/cookies.py,sha256=xDFLugSp72ooiXnoJmEioHd3QQP9JF9X6t-UiXKsmGA,37559 -yt_dlp/downloader/__init__.py,sha256=335F5hE8Mlw_tDZeEfngk1XGWgw5y7IvkUBSTJC79kI,4427 -yt_dlp/downloader/__pycache__/__init__.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/common.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/dash.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/external.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/f4m.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/fragment.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/hls.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/http.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/ism.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/mhtml.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/niconico.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/rtmp.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/rtsp.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/websocket.cpython-310.pyc,, -yt_dlp/downloader/__pycache__/youtube_live_chat.cpython-310.pyc,, -yt_dlp/downloader/common.py,sha256=Eo4f_2ETroXezkLCAvyoidtYN3VrLnCCSfR8JzxtBHc,18377 -yt_dlp/downloader/dash.py,sha256=E9UNfZckVSs2uYi_SL7bpEZfZVAoGyX-b5lpZ8otGIg,3060 -yt_dlp/downloader/external.py,sha256=3_iyszcmW6u1b8xS1YmWnKfRTB6Uwkvm4CYtSm-mLKE,21400 -yt_dlp/downloader/f4m.py,sha256=HBIytHbivQn-l_YL9ApDLFeSk8StUvKsTcn4_Nva-2E,15548 -yt_dlp/downloader/fragment.py,sha256=owAGSCVUlyvcZbgsrX8ub2ReLDJYO4NQnP41SByxBRg,22149 -yt_dlp/downloader/hls.py,sha256=aiwLChW6lpvpqaBmLaFtN6OBhSwR0oLpdgUaYJ6EmNw,16270 -yt_dlp/downloader/http.py,sha256=XCfnAuBKwRTYUg2yo3CApTc6J0iN-eZHD_m2u9K4kLU,17301 -yt_dlp/downloader/ism.py,sha256=T513MbAvkTdd2cRpf6eePR1SzUCwV17hUFg1R4xmSXg,11729 -yt_dlp/downloader/mhtml.py,sha256=D9Kx3Vhyx4FNQ7ktokiIuwL-0uxZFhs519K4v9aMrBE,6441 -yt_dlp/downloader/niconico.py,sha256=c_q4g4XMh8WwlUn1JRoFi1wrzZLsZ30fIBgBArp19qY,2016 -yt_dlp/downloader/rtmp.py,sha256=-RbVr5ypQqCsqs_Hh29-qYivEw7iFkChk7IYTXa1Clg,9019 -yt_dlp/downloader/rtsp.py,sha256=gcAPy8-Ja7alg30Lj9nNZwX_JPUFfbaYlUIvKe01FXM,1569 -yt_dlp/downloader/websocket.py,sha256=MOvI2yKUzpJ8ZdYwWDI1ZDbZrbVVI6KsQwpbBwjh3Gc,1994 -yt_dlp/downloader/youtube_live_chat.py,sha256=h808kIygjIfo4oJGesHzt1AqWG-hfpglY_e7xxJTwLA,11285 -yt_dlp/extractor/__init__.py,sha256=iWItUfJy9wfIDuIoSxoakSqSu9y-pXFBIk0UCQn2du8,1486 -yt_dlp/extractor/__pycache__/__init__.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/abc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/abcnews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/abcotvs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/academicearth.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/acast.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/adn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/adobeconnect.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/adobepass.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/adobetv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/adultswim.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/aenetworks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/afreecatv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/airmozilla.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/aliexpress.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/aljazeera.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/allocine.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/alphaporno.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/alura.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/amara.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/amazon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/amcnetworks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/americastestkitchen.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/amp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/animelab.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/animeondemand.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/anvato.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/aol.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/apa.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/aparat.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/appleconnect.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/applepodcasts.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/appletrailers.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/archiveorg.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/arcpublishing.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ard.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/arkena.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/arnes.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/arte.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/asiancrush.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/atresplayer.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/atttechchannel.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/atvat.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/audimedia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/audioboom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/audiomack.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/audius.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/awaan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/aws.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/azmedien.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/baidu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bandaichannel.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bandcamp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bannedvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bbc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/beatport.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/beeg.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/behindkink.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bellmedia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bfi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bfmtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bibeltv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bigflix.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bild.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bilibili.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/biobiochiletv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/biqle.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bitchute.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bitwave.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/blackboardcollaborate.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bleacherreport.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/blinkx.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/blogger.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bloomberg.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bokecc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bongacams.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bostonglobe.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/box.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bpb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/br.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/bravotv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/breakcom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/breitbart.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/brightcove.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/businessinsider.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/buzzfeed.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/byutv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/c56.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cableav.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/callin.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cam4.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/camdemy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cammodels.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/camwithher.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/canalalpha.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/canalc2.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/canalplus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/canvas.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/carambatv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cartoonnetwork.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cbc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cbs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cbsinteractive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cbslocal.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cbsnews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cbssports.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ccc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ccma.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cctv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cda.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ceskatelevize.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cgtn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/channel9.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/charlierose.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/chaturbate.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/chilloutzone.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/chingari.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/chirbit.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cinchcast.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cinemax.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ciscolive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ciscowebex.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cjsw.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cliphunter.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/clippit.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cliprs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/clipsyndicate.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/closertotruth.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cloudflarestream.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cloudy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/clubic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/clyp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cmt.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cnbc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cnn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/comedycentral.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/common.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/commonmistakes.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/commonprotocols.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/condenast.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/contv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/corus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/coub.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cozytv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cracked.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/crackle.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/crooksandliars.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/crowdbunker.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/crunchyroll.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cspan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ctsnews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ctv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ctvnews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cultureunplugged.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/curiositystream.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/cwtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/daftsex.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dailymail.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dailymotion.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/damtomo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/daum.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dbtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dctp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/deezer.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/defense.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/democracynow.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dfb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dhm.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/digg.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/digitalconcerthall.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/digiteka.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/discovery.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/discoverygo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/discoveryvr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/disney.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dispeak.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dlive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/doodstream.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dotsub.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/douyutv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/drbonanza.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dreisat.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/drooble.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dropbox.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dropout.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/drtuber.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/drtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dtube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/duboku.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dumpert.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dvtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/dw.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/eagleplatform.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ebaumsworld.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/echomsk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/egghead.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ehow.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/eighttracks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/einthusan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/eitb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ellentube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/elonet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/elpais.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/embedly.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/engadget.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/epicon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/eporner.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/eroprofile.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ertgr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/escapist.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/espn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/esri.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/europa.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/europeantour.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/euscreen.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/everyonesmixtape.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/expotv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/expressen.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/extractors.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/extremetube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/eyedotv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/facebook.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fancode.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/faz.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fc2.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fczenit.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/filmmodu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/filmon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/filmweb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/firsttv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fivemin.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fivetv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/flickr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/folketinget.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/footyroom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/formula1.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fourtube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fox.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fox9.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/foxgay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/foxnews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/foxsports.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/franceculture.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/franceinter.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/francetv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/freesound.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/freespeech.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/freshlive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/frontendmasters.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fujitv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/funimation.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/funk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fusion.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/fxnetworks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gab.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gaia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gameinformer.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gamejolt.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gamespot.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gamestar.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gaskrank.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gazeta.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gdcvault.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gedidigital.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/generic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gettr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gfycat.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/giantbomb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/giga.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gigya.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/glide.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/globo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/glomex.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/go.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/godtube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gofile.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/golem.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/googledrive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/googlepodcasts.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/googlesearch.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gopro.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/goshgay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gotostage.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gputechconf.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/gronkh.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/groupon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hbo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hearthisat.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/heise.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hellporno.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/helsinki.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hentaistigma.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hgtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hidive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/historicfilms.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hitbox.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hitrecord.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hketv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hornbunny.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hotnewhiphop.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hotstar.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/howcast.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/howstuffworks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hrfensehen.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hrti.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hse.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/huajiao.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/huffpost.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hungama.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/hypem.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ichinanalive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ign.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/iheart.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/imdb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/imggaming.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/imgur.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ina.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/inc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/indavideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/infoq.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/instagram.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/internazionale.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/internetvideoarchive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/iprima.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/iqiyi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ir90tv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/itv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ivi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ivideon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/iwara.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/izlesene.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/jamendo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/jeuxvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/joj.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/jove.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/jwplatform.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kakao.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kaltura.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kanalplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kankan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/karaoketv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/karrierevideos.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/keezmovies.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kelbyone.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ketnet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/khanacademy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kickstarter.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kinja.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kinopoisk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/konserthusetplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/koo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/krasview.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ku6.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kusi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/kuwo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/la7.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/laola1tv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lazy_extractors.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lbry.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lci.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lcp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lecture2go.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lecturio.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/leeco.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lego.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lemonde.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lenta.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/libraryofcongress.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/libsyn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lifenews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/limelight.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/line.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/linkedin.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/linuxacademy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/litv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/livejournal.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/livestream.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lnkgo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/localnews8.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lovehomeporn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lrt.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/lynda.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/m6.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/magentamusik360.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mailru.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mainstreaming.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/malltv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mangomolo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/manoto.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/manyvids.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/maoritv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/markiza.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/massengeschmacktv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/matchtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mdr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/medaltv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mediaite.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mediaklikk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/medialaan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mediaset.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mediasite.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/medici.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/megaphone.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/megatvcom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/meipai.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/melonvod.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/meta.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/metacafe.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/metacritic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mgoon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mgtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/miaopai.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/microsoftstream.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/microsoftvirtualacademy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mildom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/minds.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ministrygrid.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/minoto.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/miomio.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mirrativ.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mit.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mitele.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mixch.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mixcloud.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mlb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mlssoccer.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mnet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/moevideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mofosex.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mojvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/morningstar.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/motherless.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/motorsport.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/movieclips.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/moviezine.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/movingimage.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/msn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/muenchentv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/musescore.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/musicdex.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mwave.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mxplayer.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/mychannels.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/myspace.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/myspass.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/myvi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/myvideoge.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/myvidster.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/n1.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nate.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nationalgeographic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/naver.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nba.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nbc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ndr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ndtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nebula.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nerdcubed.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/neteasemusic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/netzkino.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/newgrounds.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/newstube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/newsy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nextmedia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nexx.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nfhsnetwork.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nfl.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nhk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nhl.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nick.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/niconico.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ninecninemedia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ninegag.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ninenow.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nintendo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nitter.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/njpwworld.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nobelprize.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/noco.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nonktube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/noodlemagazine.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/noovo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/normalboots.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nosvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nova.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/novaplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nowness.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/noz.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/npo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/npr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nrk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nrl.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ntvcojp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ntvde.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ntvru.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nuevo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nuvid.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nytimes.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nzherald.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/nzz.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/odatv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/odnoklassniki.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/oktoberfesttv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/olympics.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/on24.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/once.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ondemandkorea.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/onefootball.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/onet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/onionstudios.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ooyala.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/opencast.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/openload.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/openrec.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ora.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/orf.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/outsidetv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/packtpub.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/palcomp3.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pandoratv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/paramountplus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/parliamentliveuk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/parlview.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/patreon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pbs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pearvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/peertube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/peertv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/peloton.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/people.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/performgroup.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/periscope.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/philharmoniedeparis.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/phoenix.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/photobucket.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/picarto.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/piksel.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pinkbike.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pinterest.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pixivsketch.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pladform.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/planetmarathi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/platzi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/playfm.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/playplustv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/plays.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/playstuff.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/playtvak.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/playvid.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/playwire.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pluralsight.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/plutotv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/podomatic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pokemon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pokergo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/polsatgo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/polskieradio.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/popcorntimes.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/popcorntv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/porn91.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/porncom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pornez.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pornflip.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pornhd.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pornhub.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pornotube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pornovoisines.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pornoxo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/presstv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/projectveritas.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/prosiebensat1.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/prx.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/puhutv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/puls4.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/pyvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/qqmusic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/r7.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiko.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiobremen.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiocanada.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiode.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiofrance.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiojavan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiokapital.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radiozet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/radlive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rai.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/raywenderlich.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rbmaradio.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rcs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rcti.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rds.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/redbulltv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/reddit.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/redgifs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/redtube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/regiotv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rentv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/restudy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/reuters.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/reverbnation.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rice.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rmcdecouverte.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ro220.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rockstargames.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/roosterteeth.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rottentomatoes.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/roxwel.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rozhlas.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtbf.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rte.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtl2.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtlnl.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtnews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtrfm.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rts.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtve.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtvnh.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rtvs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ruhd.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rule34video.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rumble.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rutube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/rutv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ruutu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ruv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/safari.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/saitosan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/samplefocus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sapo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/savefrom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sbs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/screencast.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/screencastomatic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/scrippsnetworks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/scte.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/seeker.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/senategov.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sendtonews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/servus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sevenplus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sexu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/seznamzpravy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/shahid.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/shared.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/shemaroome.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/showroomlive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/simplecast.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sina.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sixplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/skeb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sky.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/skyit.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/skylinewebcams.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/skynewsarabia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/skynewsau.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/slideshare.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/slideslive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/slutload.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/snotr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sohu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sonyliv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/soundcloud.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/soundgasm.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/southpark.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sovietscloset.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/spankbang.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/spankwire.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/spiegel.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/spiegeltv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/spike.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sport5.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sportbox.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sportdeutschland.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/spotify.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/spreaker.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/springboardplatform.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sprout.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/srgssr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/srmediathek.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/stanfordoc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/startv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/steam.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/stitcher.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/storyfire.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/streamable.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/streamanity.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/streamcloud.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/streamcz.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/streamff.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/streetvoice.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/stretchinternet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/stripchat.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/stv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sunporno.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sverigesradio.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/svt.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/swrmediathek.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/syfy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/sztvhu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tagesschau.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tass.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tastytrade.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tbs.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tdslifeway.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/teachable.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/teachertube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/teachingchannel.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/teamcoco.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/teamtreehouse.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/techtalks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ted.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tele13.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tele5.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/telebruxelles.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/telecinco.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/telegraaf.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/telemb.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/telemundo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/telequebec.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/teletask.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/telewebion.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tennistv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tenplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/testurl.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tf1.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tfo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/theintercept.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/theplatform.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/thescene.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/thestar.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/thesun.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/theta.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/theweatherchannel.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/thisamericanlife.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/thisav.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/thisoldhouse.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/threeqsdn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/threespeak.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tiktok.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tinypic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tmz.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tnaflix.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/toggle.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/toggo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tokentube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tonline.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/toongoggles.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/toutv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/toypics.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/traileraddict.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/trilulilu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/trovo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/trueid.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/trunews.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/trutv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tube8.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tubitv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tudou.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tumblr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tunein.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tunepk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/turbo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/turner.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tv2.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tv2dk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tv2hu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tv4.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tv5mondeplus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tv5unis.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tva.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvanouvelles.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvc.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tver.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvigle.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvland.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvn24.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvnet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvnoe.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvnow.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvopengr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvp.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tvplayer.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/tweakers.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/twentyfourvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/twentymin.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/twentythreevideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/twitcasting.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/twitch.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/twitter.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/udemy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/udn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ufctv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ukcolumn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/uktvplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/umg.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/unistra.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/unity.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/uol.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/uplynk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/urort.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/urplay.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/usanetwork.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/usatoday.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ustream.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ustudio.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/utreon.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/varzesh3.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vbox7.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/veehd.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/veo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/veoh.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vesti.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vevo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vgtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vh1.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vice.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vidbit.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/viddler.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/videa.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/videodetective.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/videofyme.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/videomore.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/videopress.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vidio.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vidlii.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vidzi.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vier.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/viewlift.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/viidea.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/viki.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vimeo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vimm.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vimple.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vine.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/viqeo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/viu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vlive.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vodlocker.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vodpl.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vodplatform.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/voicerepublic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/voicy.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/voot.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/voxmedia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vrak.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vrt.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vrv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vshare.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vtm.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vuclip.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vupload.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vvvvid.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vyborymos.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/vzaar.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wakanim.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/walla.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/washingtonpost.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wat.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/watchbox.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/watchindianporn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wdr.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/webcaster.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/webofstories.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/weibo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/weiqitv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/whowatch.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/willow.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wimtv.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wistia.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/worldstarhiphop.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wppilot.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wsj.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/wwe.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xbef.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xboxclips.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xfileshare.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xhamster.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xiami.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ximalaya.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xminus.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xnxx.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xstream.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xtube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xuite.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xvideos.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/xxxymovies.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yahoo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yandexdisk.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yandexmusic.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yandexvideo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yapfiles.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yesjapan.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yinyuetai.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/ynet.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/youjizz.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/youku.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/younow.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/youporn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yourporn.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/yourupload.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/youtube.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zapiks.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zaq1.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zattoo.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zdf.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zee5.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zhihu.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zingmp3.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zoom.cpython-310.pyc,, -yt_dlp/extractor/__pycache__/zype.cpython-310.pyc,, -yt_dlp/extractor/abc.py,sha256=px3qxvn7IrypwOTohRBomyWiXPAXqKTZPwvnO_rA3F4,12841 -yt_dlp/extractor/abcnews.py,sha256=v_OXq1sV87eF2W5px0EWg8ULe5SqW5Zq4kA8CzzHiaY,6386 -yt_dlp/extractor/abcotvs.py,sha256=_w7MM3PJ_eIO0D0H8ZD9Tn49qhp1xYElxJKWUz2xNd0,4726 -yt_dlp/extractor/academicearth.py,sha256=wHYzyKiiJzKRzE28dbAH4NdijUuTts06BQ_00KF8kcU,1399 -yt_dlp/extractor/acast.py,sha256=idDTLwpS8Jh9sXm1lHKM5S_hD_VaUDPkXG9u_vLubw8,4479 -yt_dlp/extractor/adn.py,sha256=lMMn4pYm2h9H-NLxpTIFAXhu-iw2uX9enfWUSqcFANE,10921 -yt_dlp/extractor/adobeconnect.py,sha256=3pCzh2cXCXPyaED68_ITr2TDm-jg_u2oaYY0frtfHR0,1308 -yt_dlp/extractor/adobepass.py,sha256=G2poHminccj93P6ruorVziB9idmTOMB8hRKNNrRWeK4,49176 -yt_dlp/extractor/adobetv.py,sha256=Nyb46GUGmPS3cpYhy1btz6Z3RO9xCCwCWv3IKMFVnng,10284 -yt_dlp/extractor/adultswim.py,sha256=tQQE_OfXJtZflz7Sv1zvBmWX7l5nRLlATsxHD7TExJg,7835 -yt_dlp/extractor/aenetworks.py,sha256=vwXKMEc-oMEXIB1motHadQUhnsFGCd5LEJTiKxo0s-Q,12068 -yt_dlp/extractor/afreecatv.py,sha256=_RUl9reqhnXlkEooGeqlt8d_mP4Bg1vyN4umvaW3yVw,19334 -yt_dlp/extractor/airmozilla.py,sha256=yyMaDXt1NW9vLnUuLXpkFSXKFVZXH2JXUjNpldtUc0w,2697 -yt_dlp/extractor/aliexpress.py,sha256=93Z9klWzyJgfIJuDRcMwgNs12kxwO2QNExrIXG3nK3A,1581 -yt_dlp/extractor/aljazeera.py,sha256=6UvtPXkrp5iP-sdE13O3efB5ZlCd_G2r4hSImtv8lAM,3421 -yt_dlp/extractor/allocine.py,sha256=Lo5ktZLZ0MnfMtl_dOTPj9UmyGOVfHZix7RLPyIygo0,4962 -yt_dlp/extractor/alphaporno.py,sha256=XOHWtJKVAGHtWS1_PmTQ30ZMKoMiWJzf1lg-wujyUsU,2724 -yt_dlp/extractor/alura.py,sha256=iWoTMaTv4bDabtpK5SSq_kFIgNOggcDQ9ciwN80HK74,6698 -yt_dlp/extractor/amara.py,sha256=O_5iljzf-IRg43ZQhMd9Zu_PJtTPgcUXPu6y0x-Y5PI,3583 -yt_dlp/extractor/amazon.py,sha256=3lwEwg4ojd-1ewdrD26s11T0LhbYaMNOSbv6rnuEHi0,2064 -yt_dlp/extractor/amcnetworks.py,sha256=ABWeZj2mGIAK_MTlWCt2GJ7MF7-psOKasLDuxWAQ57U,6173 -yt_dlp/extractor/americastestkitchen.py,sha256=awp7quP2YZWKDgaVzna4ut2fiaaQ14_qpGI6YsmgFlI,6331 -yt_dlp/extractor/amp.py,sha256=jN2rdNXDOCBgAHhJdKdkAqr4azheetiLuvj5FlDW9SE,4088 -yt_dlp/extractor/animelab.py,sha256=inXaLC_7Cv62D-IXBMcgeID6YGKADxwkYU4qzzGa59g,11269 -yt_dlp/extractor/animeondemand.py,sha256=WjuhQbJay4XLmd8N-TcB04xz8Lu_s23adCaWxJxFeoM,12144 -yt_dlp/extractor/anvato.py,sha256=_Y-ibrkdWUJliBVCF1fjVKuk8ziqfdF8egLkNLfSy4M,25606 -yt_dlp/extractor/anvato_token_generator/__init__.py,sha256=6OJbKiRPVyG7Giq67L9DT1EFecgiO9SzM6EZexwDjdA,116 -yt_dlp/extractor/anvato_token_generator/__pycache__/__init__.cpython-310.pyc,, -yt_dlp/extractor/anvato_token_generator/__pycache__/common.cpython-310.pyc,, -yt_dlp/extractor/anvato_token_generator/__pycache__/nfl.cpython-310.pyc,, -yt_dlp/extractor/anvato_token_generator/common.py,sha256=59AXTU3EjK5ZUL8ZJGlBwRg-4L0qH0pqDLk0ikq2D5Y,187 -yt_dlp/extractor/anvato_token_generator/nfl.py,sha256=OuaMatpN_zOyIJbS2kgImSmRbAWqluFCHe4vz6VSSTQ,1011 -yt_dlp/extractor/aol.py,sha256=AqgHQ-7Pq562VlamGvyMHjDE0Z3_hdniWLH0u5emvQs,5498 -yt_dlp/extractor/apa.py,sha256=NJPTD2KdUC-EmwFihicB9NRmGRq6wguQcA6MOdK-J8c,3348 -yt_dlp/extractor/aparat.py,sha256=_b_RmuIXYuasWXGKJbiKT3j1fHQSARTOl-IacwfxqcM,3381 -yt_dlp/extractor/appleconnect.py,sha256=P2uoUrH1UxCvRXyYTYSi8EwbNn9HKt0gRDIZa1jahd8,1909 -yt_dlp/extractor/applepodcasts.py,sha256=D3pK_p3HfeGGN_TuYmA160Ed0pbCTHf1cIycGSls8S8,2378 -yt_dlp/extractor/appletrailers.py,sha256=GBx4KzFpFng_zX0RwEolOsFIH5YYOuXwOuP5X831sa4,10322 -yt_dlp/extractor/archiveorg.py,sha256=3APfzSSHeuP0YfeWW3XFUPv04pPZGptnIHYM5v-88_s,31636 -yt_dlp/extractor/arcpublishing.py,sha256=tnJah6cCWxojhLKlHOuRatNMw518gxMMqsSl6HI07dc,7636 -yt_dlp/extractor/ard.py,sha256=wuuuk22HiEyqv7e7rWksS9Ip2G7pBPppg7pw4XLhIww,27224 -yt_dlp/extractor/arkena.py,sha256=wugwbnYC0K4IjedDl3PTFpT4PoOV4zKf11OF8C3NVe8,7450 -yt_dlp/extractor/arnes.py,sha256=VSpTdY9g_V2gKoVJGsEzsMRMmkWmCj75lzSCJoc-Nkk,3652 -yt_dlp/extractor/arte.py,sha256=q771eXdmvytefI5Df-uVTr9yi1nxKobfK12MNFmdOMc,10652 -yt_dlp/extractor/asiancrush.py,sha256=W-S9zQ-GQyLxBy3mvwamGVkobK5BYB8kozIN2JtQAMY,7799 -yt_dlp/extractor/atresplayer.py,sha256=jVEWbhgXiP4D4H6GsbzCvM28S-Ej0-Qz5tDFfM_SdaU,4359 -yt_dlp/extractor/atttechchannel.py,sha256=t971NT2OsTae2Us57Ov_g9p3LnVGSKPZOOUrKlaI2dg,1962 -yt_dlp/extractor/atvat.py,sha256=_ItAQIdAS9-jgAAN9UImS2Vn9cdFmWsxmyKDehnIpSk,4031 -yt_dlp/extractor/audimedia.py,sha256=KPchAGXdjQkOJKMEVS-d1nLkwFzUZFws0XZmZJEC1YU,4026 -yt_dlp/extractor/audioboom.py,sha256=Jk6zmgFu9xg4eC5iy3VFPkCPv-BUsqVyn6tDEpKIPhM,2659 -yt_dlp/extractor/audiomack.py,sha256=43rqzXvl3Wa-AI3j41Ub7MNh1a5rPCXCxJGxERZSui0,6208 -yt_dlp/extractor/audius.py,sha256=gOeVEe59USdxb8CBpTLJzEiC2HAi743Eac3mbLcagrc,10748 -yt_dlp/extractor/awaan.py,sha256=2PeieW0zqbxIGbfiFg2f2ifBYZjpoV79FWCDxs3fXRQ,7193 -yt_dlp/extractor/aws.py,sha256=jcCiwmXbDdpv3c7eLN1DlOtiwnRaF1Ixnut1WQyPj6I,3092 -yt_dlp/extractor/azmedien.py,sha256=q3HoURO60xYhhedVOOwvqaa_w5bdXlqzOZMWRP4spik,2252 -yt_dlp/extractor/baidu.py,sha256=QalvwXEI_7fNuwffliOFBvL-r0xL00QzoHlNOfW693I,1966 -yt_dlp/extractor/bandaichannel.py,sha256=r8Sic893e8glav_W8tmQFH1k9ETdQVkkN6qrwCkBQkk,1300 -yt_dlp/extractor/bandcamp.py,sha256=yH8gMhzvLXpej1JhYH5A8MDRAjJjwHYOUypNDI6nvvc,15678 -yt_dlp/extractor/bannedvideo.py,sha256=YucVNPl5nNHTsEFHqegVA4t8_7X25thkdbV7zffGj_o,5380 -yt_dlp/extractor/bbc.py,sha256=cZJGWz6MKAD1xXtbLFAl-mzuHUs8y9WsxLjoqBoCn_c,70278 -yt_dlp/extractor/beatport.py,sha256=516AlMYHBKmjNvyi04viIoPpTitTbAA5ZqQS6LW9fC0,3339 -yt_dlp/extractor/beeg.py,sha256=kwkQ5FoMCW7xJWNO8-yccuyMKCAot90R0jHDFEh00B8,3531 -yt_dlp/extractor/behindkink.py,sha256=E8KyyAPheXH1Gnp2MlZAmoLu_QMEg-vo-a412Lg0zqk,1633 -yt_dlp/extractor/bellmedia.py,sha256=o3viR6gfzaTlG7-i53v8gtohSDvYMPMzL6amt5sTyiY,2957 -yt_dlp/extractor/bet.py,sha256=r2Uvpp-Bla5eaIF3477wAek9TYGB_xZOkAJ8Gt0_XTE,2822 -yt_dlp/extractor/bfi.py,sha256=Mx957MVrUQ56Usod5I5l5u5zGXs0FMGjGirErZ7AA0Y,1341 -yt_dlp/extractor/bfmtv.py,sha256=2efBwjuLe2vm0JSDaVvGwRbVksPHsUM_hCd1WiX4v_E,4216 -yt_dlp/extractor/bibeltv.py,sha256=OnbmEi42LbcG3MdtqRS9kTIdzu8oVnat1KTcz_M1VEU,1102 -yt_dlp/extractor/bigflix.py,sha256=CNqvhiUdP-QPcyp7zOu0pswnDyGP6svM0eLm0o8hhQk,2334 -yt_dlp/extractor/bild.py,sha256=DF3B86NxCeLI8682I1Y3yxN7ZBjLor-jbzfNqUjYS5Q,1363 -yt_dlp/extractor/bilibili.py,sha256=YX9DWt1g7uFXQtGRHjCc4aG1EWVG-AV1Pg-nFmI_QrI,38442 -yt_dlp/extractor/biobiochiletv.py,sha256=KUFs79galXxI8iO77-9vIMD5web4qrX2tqVynDQo7a8,3511 -yt_dlp/extractor/biqle.py,sha256=Nt2jIi-eN7wBYVCMOU7XcXVxSNIzruhhvEF0lqdmD6k,3864 -yt_dlp/extractor/bitchute.py,sha256=8_LHzTSwOD2BFmUtxREMaP9OqJ9t1jc3vm537FAqYAU,5970 -yt_dlp/extractor/bitwave.py,sha256=Qpiy5dQIpZ2nfNSkjo2OlmvP2IUL7KYSByzWlv-ITR4,1846 -yt_dlp/extractor/blackboardcollaborate.py,sha256=lNleNs9GdUGU0EovaVf_Z5EUYrU92NEeBKI5gVWq4Vw,2414 -yt_dlp/extractor/bleacherreport.py,sha256=V6NBGaFYMjTww6CIS5my3GFdBZzYlcOnW5BssiOiR7o,4370 -yt_dlp/extractor/blinkx.py,sha256=5xl14ZYYwZG5eiSIPbMsAPqet7-eZyrtuXNGLvuFSRI,3225 -yt_dlp/extractor/blogger.py,sha256=-hdr_uymefLuePd0xDo9EzWot2o9-CGCk7M2lMLZrLI,2010 -yt_dlp/extractor/bloomberg.py,sha256=yY_B_-bTqtlHD8-tqdMdw7Rl1BtkUh2ehu-wneVBGDw,3323 -yt_dlp/extractor/bokecc.py,sha256=nYzeAyqHKGJL-Z8TlVawyU5k0dOszZg6T6ibsxW1THQ,2041 -yt_dlp/extractor/bongacams.py,sha256=XrHYNWNbD6s6x0zfN9D0tjbJ7MHLCkiQr9FJWPEB7V8,1871 -yt_dlp/extractor/bostonglobe.py,sha256=_MgorvRUogcdrJw0U5ErcZiOJn2PrcBzy9rqTzgUJpI,3192 -yt_dlp/extractor/box.py,sha256=Vow9CMLgxP1Vl5sc_MDs5WA9y2QSmbLe41hKaKs1lOY,3781 -yt_dlp/extractor/bpb.py,sha256=E9hgpCuaR3ZNICRx_0Sff9RXxyK2FS1Qiwx9ScTanIk,2201 -yt_dlp/extractor/br.py,sha256=cELz9ogrSXlD863dnYC9DgD4-0WAxZIOSukLL7oHWUI,11876 -yt_dlp/extractor/bravotv.py,sha256=LOng5Y8xdPnOoplIFo14mHknNb4aJqL84h4sDQQVCt0,5099 -yt_dlp/extractor/breakcom.py,sha256=sBFWGgI3mw5TKZBdX5FNndgqpTu1sS8XtkyOWxZK8kI,2936 -yt_dlp/extractor/breitbart.py,sha256=jnXJtWqvmJNWk79zqZ0eZnyu-GK7CGUkQYvKYa5hKo4,1546 -yt_dlp/extractor/brightcove.py,sha256=t4hLFlIxjMyxXPcHbDWPI3GzIUNEh6GCyM1KgyBK8AQ,30759 -yt_dlp/extractor/businessinsider.py,sha256=svjPnxX_g7HYlejUzF07uD2WkhYG3gFCRcbpluv9H0g,1998 -yt_dlp/extractor/buzzfeed.py,sha256=6WMrGPF9nLaYlM0JG3sSr1ATKMiNifDehmof2Hk0Eg0,3655 -yt_dlp/extractor/byutv.py,sha256=xoPzvO4rQEmWUwb1B6pn9DUNHuKpI3G1tvwaqShMhYk,4483 -yt_dlp/extractor/c56.py,sha256=Ndkl-_uImTP2ADb_rMMmJ8zEP-3Yhkg88EE0pA6q-Mc,2051 -yt_dlp/extractor/cableav.py,sha256=Ghm3_DJ0FkKNVOhm4Zc2-ZSjAU7dmBXZ0llkhwM2hIo,1175 -yt_dlp/extractor/callin.py,sha256=2t-sSh5QNSeQSMeYidctioTDUulYafPfSa871_iejlY,5144 -yt_dlp/extractor/cam4.py,sha256=Uqc8YHMPLztaYqy4EhC0BDvoE2XPK6SUM6rHDxaMsMI,1208 -yt_dlp/extractor/camdemy.py,sha256=El3jwjpdxfZEf7JvGv2aqPMgNn3wAce5lGET6FKZQW8,5772 -yt_dlp/extractor/cammodels.py,sha256=_x0wWqBwXPxrB5AsdGfBkIAly98a-zAKPm0TSXRpNdU,3458 -yt_dlp/extractor/camwithher.py,sha256=AJNmGrsOq99-yUb7-LOf3_TjbxbzEFP-vE1GpYEtWRI,3229 -yt_dlp/extractor/canalalpha.py,sha256=sRcR_0ENAGQiwr-1L5NQ6u2-4bB9BUi-6LSZKfM0TZM,4362 -yt_dlp/extractor/canalc2.py,sha256=hENzjlT9ESGePiXIGvlR2YismmB2JPVBft5_DC-UrtI,2317 -yt_dlp/extractor/canalplus.py,sha256=74-TcDOUEwx0c8jBBHS9k_WfzPcCyvb74I7G4rmlapc,4457 -yt_dlp/extractor/canvas.py,sha256=mmSiNEG0MJLaCd2LL_SOUn0OutdUo_OmKR7NyD_Aijs,16114 -yt_dlp/extractor/carambatv.py,sha256=2zVlCstgnhj7Ncj81ehnZooHE0wFebs8j8Bj3gvIgL8,3526 -yt_dlp/extractor/cartoonnetwork.py,sha256=WcsKHOp0dGokEIc5911ssrU5g3TfJpVUWpGh97wcKxc,2374 -yt_dlp/extractor/cbc.py,sha256=zJsHiHX7gc0Xpz4qmNS9dA4ijIAIKAesW6IM4yT0l9g,22137 -yt_dlp/extractor/cbs.py,sha256=yHVZkOelmmsi4UQT6hLTDm_HCzE6jnyZEQxTW8pwJj8,7178 -yt_dlp/extractor/cbsinteractive.py,sha256=2fTIT_xX9cCWc-MGflpN_G9fvty53he60oW8URttND4,4030 -yt_dlp/extractor/cbslocal.py,sha256=ivJXdiQJDkLei83mrd7GrW6LwLUjGBwJsp_D98J0IFk,4117 -yt_dlp/extractor/cbsnews.py,sha256=4ihf-Y7stUspvPe2igFmtAQhoYkqrP5WiP9VjiJIlpE,7633 -yt_dlp/extractor/cbssports.py,sha256=Ls8cCYYBT3_JdJL8_AwK5JBlomRP2yN1BN9TlTzks-k,4898 -yt_dlp/extractor/ccc.py,sha256=XtmLWuyAW8kjgUPmbe2P2rEDPz1zLkWiGtb80R3wYmg,3875 -yt_dlp/extractor/ccma.py,sha256=m8muUa-9EcG57szfUULDrGda29wmQkicMfV8uvGNBB8,5468 -yt_dlp/extractor/cctv.py,sha256=_CAZTnaI0cOf-EZ_GOcFw35SclsAZAc6C9j4V1X45-4,6941 -yt_dlp/extractor/cda.py,sha256=oPDMe0Lr0Isrt1SL11hvy3Zvyd6yJO-ee31TpnARlIU,9488 -yt_dlp/extractor/ceskatelevize.py,sha256=BEur9LW1PlVB4AK72cAVZAMU-SLn63ixY9bEx8CcFYI,10565 -yt_dlp/extractor/cgtn.py,sha256=lHs-1Dv-_THTiJSLGKvJU7VO5VFY38qFz4GSW_xSJT8,2894 -yt_dlp/extractor/channel9.py,sha256=PbIXihb41x9cJhNvZ0QeqZWJauQxU-jlNmmo2qiIbI4,10260 -yt_dlp/extractor/charlierose.py,sha256=Buer_xlI05rijsYbUsge1sYFrDk5hgPRg6Z7lBs4UAk,1833 -yt_dlp/extractor/chaturbate.py,sha256=fKGZork5BLK9J7Ja6pXyxgL9SYF-6ThJlQnTXPiZGWw,3893 -yt_dlp/extractor/chilloutzone.py,sha256=-8rEBu8xTp8yh2pMFrieiipbN2TQUahgTnRd-F1dNHE,3487 -yt_dlp/extractor/chingari.py,sha256=vYAm51tB07uc05LrxcpS0AOraxxRKntg108UlzqcA5Y,8711 -yt_dlp/extractor/chirbit.py,sha256=_FBq1kLCYy8qQcQFFil86GIjo5z3X7utrQLRkR7RP7o,2946 -yt_dlp/extractor/cinchcast.py,sha256=Kxi8jDUsQxCsaiCj0sNomO-_GRJ5gKwseLd7--9_EZo,1997 -yt_dlp/extractor/cinemax.py,sha256=2OF9APr5a4APjhg4W4lSXZqQ0V_L521rUK0Pc0LMr60,935 -yt_dlp/extractor/ciscolive.py,sha256=Ug-pOw2-J0TBNwN4XJPAoRobZPJx7kRqb1wB3v4URjw,5915 -yt_dlp/extractor/ciscowebex.py,sha256=cVsTvr6n67yzGRDMGtagUeKoeDukQ4Ppy_aMbA4VZVA,3675 -yt_dlp/extractor/cjsw.py,sha256=N7EPF68-_MgsNrZhGLsaeTXKrvY8UOo1-KkE7tw6mAo,2398 -yt_dlp/extractor/cliphunter.py,sha256=XNi5PzG14pujij9ZtHoFsSCFpkMk3Cb7OeOxKGjjHQk,2533 -yt_dlp/extractor/clippit.py,sha256=ErRu1a933Hkkb_rp6HgGFfeoQkCj1tjwyhR3Lv9JawE,2551 -yt_dlp/extractor/cliprs.py,sha256=HZ9hFAtjrt0wWeZTWDWCi-nmN5eDrYoz1qe24Src-wU,1030 -yt_dlp/extractor/clipsyndicate.py,sha256=rZrmkrII23KsqvBr19G9UaHXNsxfX3aXztgU4ul_ysA,1812 -yt_dlp/extractor/closertotruth.py,sha256=yLlHaaoqensA6vgtR_1_RZf-MObccEd6MYNe1PaRNlY,3095 -yt_dlp/extractor/cloudflarestream.py,sha256=Hmng7SBbYOlwly_nyMiLzI9mtz_eCovPgx3azYuVlqY,2677 -yt_dlp/extractor/cloudy.py,sha256=8Ljv6LT5n99iO3p-IrJp3zbX8idSdtNC8mIeGTX7eOA,1898 -yt_dlp/extractor/clubic.py,sha256=GTH-nPfGmVJdtJzXRUu6t6d3OESeAe2jpAXHaFw_pJI,1976 -yt_dlp/extractor/clyp.py,sha256=fF6Ez7k3HnuMpq9lT65wftLnFAuIJwmgO4rjVo4mnh4,2387 -yt_dlp/extractor/cmt.py,sha256=XQ1xdpGTWozk4L1vt46OtfEPYUZBnziOAW5Q9b5uD8U,2222 -yt_dlp/extractor/cnbc.py,sha256=suGVPDrX1m1OKH1E36y0tbk-Rj8ylj1ROhOqgEatJp4,2279 -yt_dlp/extractor/cnn.py,sha256=hVe0i8lebEDXr5JGy-y3Qr8TYXG-g33L9_fBotwEOGw,6510 -yt_dlp/extractor/comedycentral.py,sha256=4rsgQPqqsmzwVlgncSZUtI8i0TKB-nmsaBPvNoZPgi4,2185 -yt_dlp/extractor/common.py,sha256=gHtdDi969Iv8Rx5MUGga9hZx-MzOmYIfxiVd1EFQ6n8,181415 -yt_dlp/extractor/commonmistakes.py,sha256=0pJgLH-NBoaMm7BEksVtKdPPMWYFHAwWG9QTCVkjamY,1518 -yt_dlp/extractor/commonprotocols.py,sha256=J9TXF3iXDQC5VSyTYH12qOKg_IdH6cYOX0WrHwZxpus,2006 -yt_dlp/extractor/condenast.py,sha256=vQCrPt3zM4sJOHGLny2KOGx00wRoG4WZhIBDeKsb--E,9733 -yt_dlp/extractor/contv.py,sha256=EC7EcYD3Clx8eHppDAeIYeYgVLll7raop5ttlx0XU0w,4196 -yt_dlp/extractor/corus.py,sha256=q-NAUO-6i4-GTUY6QSH7FEMc2G-OhwpPe5jP-G71AGM,6313 -yt_dlp/extractor/coub.py,sha256=Q82naXzGCSA3HlNXaU582SJVKWmv_FG4q54ljKyaoWQ,4653 -yt_dlp/extractor/cozytv.py,sha256=8-xQ_hMxDOesbMiPo7OIyCmt1TAqdaaglKvOJ8adR1c,1429 -yt_dlp/extractor/cracked.py,sha256=kS6U6ncv6yXg8zBSp8ECStpBAoeevONsk0Y3oLwH8vA,3138 -yt_dlp/extractor/crackle.py,sha256=-SYCk988IgYdahGsrGNnPiV_w67Xbl9GHDjGnsHdNxk,9781 -yt_dlp/extractor/crooksandliars.py,sha256=_YnZw9fC_xyq-hlnfRhzkU_UtDNNdUfUsxH7N4v1jPQ,2061 -yt_dlp/extractor/crowdbunker.py,sha256=wtJbskmG9aywlk9F9WtWHGx0yIy88v-DjEi2GU8nEz8,4255 -yt_dlp/extractor/crunchyroll.py,sha256=jiK-Vo-d_jOU_1I1tIe2el8jh1nJpu6IWV_LT3kMPXE,37360 -yt_dlp/extractor/cspan.py,sha256=s1fbh9v0Hi6uQ1ZOTZL6wfmmbQ6ZNuKrZZ1-SyAEawU,10276 -yt_dlp/extractor/ctsnews.py,sha256=hEl04crS3UHNxtKXsXOf2ElmHGTxL5ROieCigurNX3U,3648 -yt_dlp/extractor/ctv.py,sha256=QNsHs218dge0ze3d6SjOXhb7UkxSwuTxxA14ngQzlvs,1772 -yt_dlp/extractor/ctvnews.py,sha256=vK6YNGXldADzsOLOgwyrN8IpKMEyRTG09gfr2b-61UU,2611 -yt_dlp/extractor/cultureunplugged.py,sha256=IPZSbFtFLCbk4IVEz3gLgOxXDRt9KnuB6N15PJKCp1A,2494 -yt_dlp/extractor/curiositystream.py,sha256=XgRzvtwaQTHzHR-SLHHv7ek6t8E5h_tneWnEVmGKE68,8123 -yt_dlp/extractor/cwtv.py,sha256=yLeeNyB-ZhS6EVj3YbknK9eOXnkBb3Y6m-9weR30NQk,3840 -yt_dlp/extractor/daftsex.py,sha256=poadr53956UwE6-YeTWIRziHibzyP9f7L7sWyx7lRFo,2890 -yt_dlp/extractor/dailymail.py,sha256=kJPEFOuCZRjA2HFfIRDUDxDW5K8Bq15b_kQoiaRgrjg,3138 -yt_dlp/extractor/dailymotion.py,sha256=0BxTR55OIH4dPCZmn54235SmP5ARkTsvbHsdpMCIaBA,14833 -yt_dlp/extractor/damtomo.py,sha256=WZ2IhR71HhTFT1ietZ4jXNjIjFJMOBmfwnNygrdx81A,5626 -yt_dlp/extractor/daum.py,sha256=iyzonPXnHof1dddzk8eCzT-B3XsYq26UdwCTRFCvIv0,9405 -yt_dlp/extractor/dbtv.py,sha256=OPmyy7sjkreOZ-mcwmY5mjeUXh2XuTI7hEOeB6RiCSs,1951 -yt_dlp/extractor/dctp.py,sha256=7mVjUn-B4rnU4Y3kEqVQnwaiHxQXh5-wpq538Ic1kkc,3588 -yt_dlp/extractor/deezer.py,sha256=8X3nZhPQYUo_nrOFc1YQy8yTmJ9FhdBCHlHaVl8SkTY,5275 -yt_dlp/extractor/defense.py,sha256=iO_Gu4-dmSyVOd4FBbkhQKnGY3NEbVuUPHXlfgArOtw,1242 -yt_dlp/extractor/democracynow.py,sha256=5GxV8LMOmztZR377OFMu28wsQPoEiOVk-28TowHhMYw,3101 -yt_dlp/extractor/dfb.py,sha256=T8CB5PSWv1X3OnrCOrppMsuWKM8JQdDW2PkI6oEVbak,2241 -yt_dlp/extractor/dhm.py,sha256=OCPENDENLTQ65k_gLbwJZWGyO8ZvCsfLmV_047Ed1OE,2091 -yt_dlp/extractor/digg.py,sha256=DCVLBoex0eg7m-RPB65VDs_kcsFIyr6yTcTXCmUZLlc,1882 -yt_dlp/extractor/digitalconcerthall.py,sha256=XhVgyzDRr0mkHmHYRe0OZoC-ccydymIoo-JwofnOn3k,5813 -yt_dlp/extractor/digiteka.py,sha256=aDHCPBZUV85nGLE7D6zGamKfzoXfqrUaTohUawkjmaQ,3505 -yt_dlp/extractor/discovery.py,sha256=p6kEneQlyfn2_o5L6USfQF7m7HrLOO3NgHjAd93ZFUI,4898 -yt_dlp/extractor/discoverygo.py,sha256=jOXobYWnTxbbfL4kYKzgzFNZMfzTGkEocYu01mODADw,6079 -yt_dlp/extractor/discoveryvr.py,sha256=EokWC3iyvXpdrlomLvuCaw4mA2p8240qn_sFETg50mE,2129 -yt_dlp/extractor/disney.py,sha256=iEZgRS3vvUmIdeHZDMu8PCsJWBy7eme4DHst3GkB4_Y,6863 -yt_dlp/extractor/dispeak.py,sha256=Uuojr2F-xx3TCwgq9goN-6-8pwQ22zmowaS13q3I0U4,5029 -yt_dlp/extractor/dlive.py,sha256=yJHwy_gsw9EoQIHagSto_J_V-TEfAVNmW1QeRDlfZPI,3089 -yt_dlp/extractor/doodstream.py,sha256=Jlx2vpRl3pAx4ICuFBS2vNNU5aPWtvtaA7xr7C1ubWo,2872 -yt_dlp/extractor/dotsub.py,sha256=SmxJbUFtXxMPtg4UDbO0Am36jCDmMgMJMdA1FdmDoCs,3079 -yt_dlp/extractor/douyutv.py,sha256=Iu2FyWuprjMa3lM_glNKy541il_RYcCIwZp2rH1NO9w,6860 -yt_dlp/extractor/dplay.py,sha256=d6VWEnS6syNpJBxG_w3dUYyKHvwtNVBuj0mf3PKyFS0,36170 -yt_dlp/extractor/drbonanza.py,sha256=-iiUKXzb9QeKVGVKmMv1PyO7S7KTgvIaWvm6FHLkhws,1967 -yt_dlp/extractor/dreisat.py,sha256=WRny3tNvF9Osic4YE7xnyI27CkfWXEzisGG1kFgNuRA,1614 -yt_dlp/extractor/drooble.py,sha256=888Nf_klm3eEN_c34qjd5lEVy6RHtmFoE5De6GwrVj4,4244 -yt_dlp/extractor/dropbox.py,sha256=EG-IXscp5zD8fhJ7gJvbHtjZ9eZGUQohSSTDxHHWYMw,3395 -yt_dlp/extractor/dropout.py,sha256=uKBuZXjKpdKGZ7-jsqVb9hQk-bstzOL-KjIPQ85gYos,9350 -yt_dlp/extractor/drtuber.py,sha256=nAQAHrHt5s8uCLAAQKqNyWqSPDNTHeaGbF7lbzTeizA,3964 -yt_dlp/extractor/drtv.py,sha256=KkLEWVrYa3bMtFaS5iCuOTo_WI6Jt9L8emXo5cmSCf4,13760 -yt_dlp/extractor/dtube.py,sha256=u8gkaHDKREI8FHBc0jt4RcF7238yb5TSk86d0bLd4fQ,2784 -yt_dlp/extractor/duboku.py,sha256=4ksF5XN9aUukkCz7TCqQrlGGt80Z_35nTl6dHo1eTsA,8313 -yt_dlp/extractor/dumpert.py,sha256=IopTcem6vpcQjMxOjRf1uQDNfv8w343iLlxuCBS-Law,2701 -yt_dlp/extractor/dvtv.py,sha256=lMjGanNp4IU-J1Ar0tosqsmElavbKPmnUZy7CO7Y_bs,7528 -yt_dlp/extractor/dw.py,sha256=d5it-6h_cQeqx-tL5jJsiJhVOoDNkzS49gu_ZY7cEus,4099 -yt_dlp/extractor/eagleplatform.py,sha256=stJbHBNW10LWDdEQOla7cw_ZQXNPk5vAq83OnMVae8w,7732 -yt_dlp/extractor/ebaumsworld.py,sha256=TMFEr805YK-ZrmbludqtKSN8CA8jJuUlVDwzTkIZvdk,1086 -yt_dlp/extractor/echomsk.py,sha256=YVN2auiHqboHxHJ8MHTISOOIacCWKBNkri2uyD7uJwE,1317 -yt_dlp/extractor/egghead.py,sha256=LIJ3AO9-6sfvzw_6UYl5PK72IOfIf3ETA6S2EmJ-QE8,5029 -yt_dlp/extractor/ehow.py,sha256=uHLC6daoEOgd6TiSWH0VYUX0cbIK6OJlE14ugTgif38,1518 -yt_dlp/extractor/eighttracks.py,sha256=vD4pnSrdvoBwJhmDHvrmn162sm6IigWgJVRHxkQMOBs,5868 -yt_dlp/extractor/einthusan.py,sha256=wc6KA1UAWq353Vi77rWbLGoYLqSeDaFFq-1kfDnn2ts,3706 -yt_dlp/extractor/eitb.py,sha256=tEm6mdL_Hesx_hviIsKwtfYZcnuaS52evCVknqIBTks,3278 -yt_dlp/extractor/ellentube.py,sha256=OHb5EkTmNMyALSvyKUCTd0tNKZllpwvr8FMQZRuQR0M,4909 -yt_dlp/extractor/elonet.py,sha256=1cKv0sCHipUkmALxFbQDaPTtiSgmPT6uIV6hLa64Gz4,3593 -yt_dlp/extractor/elpais.py,sha256=k2-CBrBHwdhvrZUvjwzgzG9TeUbjHs6M6lkzQHNwfRk,4364 -yt_dlp/extractor/embedly.py,sha256=S1FtaC5S-9eof5RzC6yXX9c8ki0rJNp7LwKhMt4PxDE,842 -yt_dlp/extractor/engadget.py,sha256=F5dk1P04pNJ1AuxwzeNd1N_Pm0gN36n4nwpNiZOWMZU,807 -yt_dlp/extractor/epicon.py,sha256=Y6e_LxmImaF5K5gXyiKT1p2qtAUMkM4AY6ld6BX0iDo,4288 -yt_dlp/extractor/eporner.py,sha256=OGAxQhG5d58Mmo2UStdouCaG5RQPH1sEJ6mRSJ9PvNM,4656 -yt_dlp/extractor/eroprofile.py,sha256=X2TgARGJA5qSdIKJKKdqBd9xrVWELT6N3WK84M95nq4,4638 -yt_dlp/extractor/ertgr.py,sha256=7uLcfIT2OaKhPeb96VT7FEPBNULinAhVp4iJX3zqL6s,12953 -yt_dlp/extractor/escapist.py,sha256=KbYLPwhi2hyfAHP2rdGtgLkGIVMN_8Y73LRvNw1WhCE,3622 -yt_dlp/extractor/espn.py,sha256=IluEs-f65-BCczjRNZveLfUmG6q7A5ix0KRkQuQRGxc,10566 -yt_dlp/extractor/esri.py,sha256=FoLv_t1njCscGYPo9EEen4R8ApAg7omB9XWexhEQszY,2628 -yt_dlp/extractor/europa.py,sha256=El0sNcmXLyw7u32DxTWsrJ3j5J3pkfYXTnAFpUO79GM,3344 -yt_dlp/extractor/europeantour.py,sha256=AtWpCT-zccaDeye1184vIBaTzQlB7FbLfG7aBQ268KI,1322 -yt_dlp/extractor/euscreen.py,sha256=2N_pB2Y0_tWqV7iimxGb6n4BbZCIHehkEK-8oslwELI,3418 -yt_dlp/extractor/everyonesmixtape.py,sha256=QOS-CXEdP2h-hAL2AhLAoLG6vhxaBF9c7NNxK-aXI-U,2805 -yt_dlp/extractor/expotv.py,sha256=NoW5aHnKfHSqK6_--N-qy2kP6SWAR8Wo8_PsyIg3dCI,2913 -yt_dlp/extractor/expressen.py,sha256=lHFk_GFkvxHb9Em8bnjL7dak9PSJx4sXCJbw9JbkN88,3865 -yt_dlp/extractor/extractors.py,sha256=zaEFY0b4MovGgf4YC7YddkvDYI3leXBUlWWPDO2maFQ,44074 -yt_dlp/extractor/extremetube.py,sha256=FVQEdBkP1baLIo26IU63-JxBlQUzwkbhfq5jkVc0ANg,1747 -yt_dlp/extractor/eyedotv.py,sha256=oY7jGaOMxDXDgw1mdEWLG3WLpE5uXg9XWb9fy47RVQk,2687 -yt_dlp/extractor/facebook.py,sha256=jwQOBtbpGxIl01K4uO1Pw5MBaakT3KwVctb9nWlWU3k,35034 -yt_dlp/extractor/fancode.py,sha256=gNDEjnciIfFub356XTNqoY96aNh8aq9r68X94s-1H3U,7386 -yt_dlp/extractor/faz.py,sha256=dkqraWVCiQx5rHPqWFuIXKoFveT3kV3USubmZuXnDNU,3586 -yt_dlp/extractor/fc2.py,sha256=y71Fdh0t7J4T_v-6ChvjzBmMAO4gPgyTjMQ1SCx_fU4,5236 -yt_dlp/extractor/fczenit.py,sha256=ZjI3Yw6Jh4yJognle8JUo1C_1c5vdRfYvxgmPama7WI,1760 -yt_dlp/extractor/filmmodu.py,sha256=xaUR15JHDHMVZcXi9wU0-FFxHaD2cREdnaEoRreUwQk,2648 -yt_dlp/extractor/filmon.py,sha256=9nc8pqGuMFmCsv6usH7Pp8lhWsZNieq6kAi3IWBfg2U,6009 -yt_dlp/extractor/filmweb.py,sha256=pUR8oZR8U1LZoqQsS6PBgeXsjoeUUWCLK6mb0UkVZC4,1465 -yt_dlp/extractor/firsttv.py,sha256=r55yC7-yluanh2gM4BUJAGcJ1-ZCgv4g2GREPnojTAY,6458 -yt_dlp/extractor/fivemin.py,sha256=n57QO56Mjz7v7Q1qKM0vFKjWGuyQpDBNiBNe4Gl2ais,1917 -yt_dlp/extractor/fivetv.py,sha256=fGbJyJsCj84iEKO9OiSbkJHGtoOpoAt6X8ohffOCVok,3195 -yt_dlp/extractor/flickr.py,sha256=exigKGgPBYAkn4ces3AuFIOA627SzKBT4gAcZLHoekc,4785 -yt_dlp/extractor/folketinget.py,sha256=QZg_bsepZk3n7Gg8nS-SKT6rH_XFVGRgkirstL9prA8,2643 -yt_dlp/extractor/footyroom.py,sha256=gSxBraqnRnIYgqxtHgJwcrd0Q8SJr8shT5QUKK3wzbU,1875 -yt_dlp/extractor/formula1.py,sha256=Sp5pciuyEkrxlN6Il_ua-JIhqZCX9eQR09XBNMkEqus,995 -yt_dlp/extractor/fourtube.py,sha256=hUzLjHH5BPwTfEWpzlyqSayGCShrpMK75mEYwehidsU,11578 -yt_dlp/extractor/fox.py,sha256=OBLpxHmMUydDE2REf3wG2qaVAkmR5FypJXbBX91AdoE,6532 -yt_dlp/extractor/fox9.py,sha256=d_mSVr6qWNTnE3ehwCrZJh5ZoHq0bEw9wp0AP8xYMFw,1466 -yt_dlp/extractor/foxgay.py,sha256=gco9bK1t81oW88PZHnWD7_iyPwmX6WSWRnTUVM3tqJU,2203 -yt_dlp/extractor/foxnews.py,sha256=R7_Su6xibTaSDzGN_9NzZdNAUBjFxc3Z5yNMNQt6VhA,5152 -yt_dlp/extractor/foxsports.py,sha256=_zdEug3E6BN1LxswtXEQm6-dVBa_h0uXhTDjzv45Kek,1097 -yt_dlp/extractor/franceculture.py,sha256=CB6W_Rk81dm8Q4ptzj6hk7GZiE7NvYrWmEt1_SqD4s4,2866 -yt_dlp/extractor/franceinter.py,sha256=R30dXcU_c4Oj2HE9UXbqM3LbHBa-AuUu-2MVkLIqpSw,2215 -yt_dlp/extractor/francetv.py,sha256=eC8BVhktsrC84Vcm5wsmEE7-tA-ygO-87OoJ39Iofk4,15109 -yt_dlp/extractor/freesound.py,sha256=GYHfLkU6MGRn0hZooNHrnfum9DbEcVptXZOXkRu7iIs,2496 -yt_dlp/extractor/freespeech.py,sha256=HdHjYKQ-KRPnKj9sDpH_t_Buhbf8Go7_OzhdCm4Uvy4,1057 -yt_dlp/extractor/freshlive.py,sha256=FGf8y9XNjcKvgz7U4E96OGylXSzEnPwkMgLNFX7KtW0,2641 -yt_dlp/extractor/frontendmasters.py,sha256=KDYXKLMYHKMZNnrnxG1dmP0MxFKJ6Zcz_u15hoQHYQY,8803 -yt_dlp/extractor/fujitv.py,sha256=1lH2Q8xQSfzP-nJO1JdKtT_mFEwkL2Jzy5Qn-vsZwP0,2304 -yt_dlp/extractor/funimation.py,sha256=d_uk54RyyEmVKA8XAtNNYSphIySnAXNWg5tz87tNE4g,14930 -yt_dlp/extractor/funk.py,sha256=gRzJKdqvOpSCTtxc2imuKeZL9UXxgPk5D-7FAlzpI7E,1705 -yt_dlp/extractor/fusion.py,sha256=oc6pxu9uBtbqLA_xbrTpI4KmsPHbQ8EKxgL3qaiV4Vk,3168 -yt_dlp/extractor/fxnetworks.py,sha256=bvpDTYAUs3ALaJqH7qKkaSMyeMot6EA9-jsrOYQBQvg,2944 -yt_dlp/extractor/gab.py,sha256=da-GXerxrv0LDj8FmHGzd3v1LmSxiQN3Rt0R3yYiJ1A,5842 -yt_dlp/extractor/gaia.py,sha256=gawdj_uegHhkFSuQR2wCYZITu9PaCRZ8ga8Axf_09FM,4685 -yt_dlp/extractor/gameinformer.py,sha256=9w9gH_413XjBJFSjUPxnkDpVzmwoHvx7Dblm5WPfoy8,2122 -yt_dlp/extractor/gamejolt.py,sha256=CSabA3Y18OXBr77XMzuzkZPm613sv5NDe8M6ZyX6Xr4,25208 -yt_dlp/extractor/gamespot.py,sha256=Oe-ZnBquzKSJcRu7pVGexz04lYjepezOe-gMhVnpaDc,3156 -yt_dlp/extractor/gamestar.py,sha256=mIZM-yAXWLBH6L1wZNKy3V4hpfCv2csiqJhvkyBXu60,2610 -yt_dlp/extractor/gaskrank.py,sha256=RZFbynKkrkgsFq_avR3kcuzJZHR3xzsMUlCq7G2zwGw,3843 -yt_dlp/extractor/gazeta.py,sha256=wOb5_Kxa4GSPGGeGaRxnWVUE-fmpITIdnBsXmSP-NbA,1941 -yt_dlp/extractor/gdcvault.py,sha256=ZD-9G9wTpS35ke_IQ8XAO9vhaz-GVZYO_vPt1OA2KUw,8592 -yt_dlp/extractor/gedidigital.py,sha256=0KtW4HDlTvt0A1-JxmcdMN0mC2VUNL-r6r4pSooQlVQ,8818 -yt_dlp/extractor/generic.py,sha256=ZKnFtyeJ33DlAlZYh1lbLI5hwG8LSjnec12Sa66o4Wg,169480 -yt_dlp/extractor/gettr.py,sha256=Kff8R6Q9kOMYQtD-uaV7HVMOlKJgZ_BiX9_l5tqAvqs,3730 -yt_dlp/extractor/gfycat.py,sha256=jEMYXCE10v3g5xYCDYzmv3IrU-RbOYkwXOtsOi4Busg,5384 -yt_dlp/extractor/giantbomb.py,sha256=EpNBL9IyyO19PO9dci3sPNKIqUHes9isrAoJYbZHe3w,3042 -yt_dlp/extractor/giga.py,sha256=tNMPRYimHGve_dQHQtwaOocf7kZ5A7Nc0lr6QyNfovY,3820 -yt_dlp/extractor/gigya.py,sha256=mOOEvZYGSkjopGbL13ScQmE2lEHGKrk9Ktz-KUgfrqk,677 -yt_dlp/extractor/glide.py,sha256=VzUsfDQA1nkKrfEsxQzC6MzfZkJwYnvJlw2moXsSPts,1543 -yt_dlp/extractor/globo.py,sha256=ZXb6voZYmPhcXNilvGfEWzyA2CQ1cWDxbdmEbSAwNtA,9278 -yt_dlp/extractor/glomex.py,sha256=8oAOzFz01wH_6RyVFp9ZNG-EaX63W9zScde6k34GZ28,8614 -yt_dlp/extractor/go.py,sha256=oRb572mKuEJKUiL8QiWy5S6e3x7VuURGHx3YViSqVak,13108 -yt_dlp/extractor/godtube.py,sha256=6tgsH4iFTIRfNsPct8NXXMNTISEm4u9m_WC0ys9fNqM,1772 -yt_dlp/extractor/gofile.py,sha256=3hWdAmODW8VtBgWoAtLVuXk9Y5gCMRG8CZM1zoyDFCg,2839 -yt_dlp/extractor/golem.py,sha256=ajnZwWxCgGRBxUZyiS8nah-o_2KqYxXBMMI16TAm8N0,2209 -yt_dlp/extractor/googledrive.py,sha256=nTjxmpqkW6qcjmbfFLADW1_CBvU67GZAvIZtZ8phKyM,10979 -yt_dlp/extractor/googlepodcasts.py,sha256=ksvt_1ve6SA2HyY5JeMpk6oZ1TCf8B7IfQ2mHX5q9So,3398 -yt_dlp/extractor/googlesearch.py,sha256=aeYg538eapZ-ed9HgJmUaXvRUCxf80zordCj_rmyl7o,1204 -yt_dlp/extractor/gopro.py,sha256=_OayzL1s2DjwyPYGhlOqdcuQCMw4TgdWQf3TxJy77gE,3880 -yt_dlp/extractor/goshgay.py,sha256=Aej31Z8pE_rFyf-JX7QzLpYCNqJSrvezXIRRyLuo3Rg,1542 -yt_dlp/extractor/gotostage.py,sha256=LRbW80gOBoMCLhVobgv2qanyGBJG6H7p0usMmcdXg7w,2784 -yt_dlp/extractor/gputechconf.py,sha256=PfPExGJv4Pi0drGWPGaVhkkb50y1Ll7U3reSPcfCesw,1201 -yt_dlp/extractor/gronkh.py,sha256=kwkGf-Eos7ItkybFmGUqyaCxhfpa9ySrabshSKmUxu8,1714 -yt_dlp/extractor/groupon.py,sha256=9q8Zd2ehZFBJoblwrb6_v1MQTGn1oyAaqOYrRpmSRi8,2599 -yt_dlp/extractor/hbo.py,sha256=QNnEcIRxyxqGXxCukpmN30bldM2HeLDbT2PEviLutm0,6128 -yt_dlp/extractor/hearthisat.py,sha256=o6-WaXwh-doPa5UWm-8OZwt6PQ78gYBkJ6pi-bkATos,3583 -yt_dlp/extractor/heise.py,sha256=01khFSoq7XoH8mlH-n37AgCZF2dPXPjoKJ9VW74pWxE,6543 -yt_dlp/extractor/hellporno.py,sha256=U8dJZSQkjMlIao2UOgprx2DEzwqpAjEhpwHrGPmxhh0,2716 -yt_dlp/extractor/helsinki.py,sha256=WdB7tDiMeAjDdzOcKQMW5G418xe5Zw9Fs38dq2OO7xM,1352 -yt_dlp/extractor/hentaistigma.py,sha256=MCyuul0BoOqAfK3wg552DNr8v6YK6ZuUxsu7BCIk9kw,1201 -yt_dlp/extractor/hgtv.py,sha256=uLr8rf8JoEDHlaXeiwDpN0OOt_l8IT55a4K80hnabBw,1435 -yt_dlp/extractor/hidive.py,sha256=Fx0ELXdXZv3vatEm2CdlvoFT4ZWA7pS8GhTlkC0EhMk,4977 -yt_dlp/extractor/historicfilms.py,sha256=_lzlox0fH4N0NKzM8c_0nsAzPIAFIqESVgtztD9YmRo,1581 -yt_dlp/extractor/hitbox.py,sha256=q0e3JlHwyaxAmDS1hIsWccpGK7BmhcgfaA0FHLpvQj0,7396 -yt_dlp/extractor/hitrecord.py,sha256=2hGrdycMsxhLmX2W6Fn8EY4iVItR7Ur13TWy5dlbJYk,2269 -yt_dlp/extractor/hketv.py,sha256=bLhJTGgN1FYbF1T35Ts-Md6mkfTYmmXKlJgMBsZJobM,6965 -yt_dlp/extractor/hornbunny.py,sha256=qE5PYvUG5FZ7gpJpTARpYjXifF2GzExxgGURcO-sOpU,1527 -yt_dlp/extractor/hotnewhiphop.py,sha256=xgjkDOUAWvc3vWDLR8uItt-XTtLmJA6sNvUX2sFmMQE,2283 -yt_dlp/extractor/hotstar.py,sha256=205OslpunsJYr-78Au3-Lq4AClfNHzWWyL70MFHzc20,13617 -yt_dlp/extractor/howcast.py,sha256=k59o2rOOFIlj85U8iyUmOsjAPmJwqhAZLw5xOsv7Xgc,1370 -yt_dlp/extractor/howstuffworks.py,sha256=10-Nq-zfO-QxEd27Uw83-RFLxrNMCLkPiumVOP4STIs,3465 -yt_dlp/extractor/hrfensehen.py,sha256=U8oCLrb8iXfCmiqHNuq_mrPfp-qzrZuIusi1YZz5BYQ,3860 -yt_dlp/extractor/hrti.py,sha256=k_yfJ7RGJ9CSyi50JFJId2scCwp7W-7DMfTS0a4gofI,7184 -yt_dlp/extractor/hse.py,sha256=47r2fKcJkvGdM4MNbeXksXfDDep06DlC2hEub6rU9o4,3652 -yt_dlp/extractor/huajiao.py,sha256=Xps-r8pzcb2Fg2irAKxGkAuzTJdjsuWZELQRaJBHXSQ,1850 -yt_dlp/extractor/huffpost.py,sha256=PqVoBaTcADZyYMR79j4jEAr4W_bG1LmPAq08oPrLpBQ,3389 -yt_dlp/extractor/hungama.py,sha256=aMwgh5rPb8auDjngk47hWSKooiYPVO9uK74JhbO2HPk,4929 -yt_dlp/extractor/hypem.py,sha256=DY1lqmsK_OTzB_z4A3bOVunAHolNa8fYwOGMMlwcRCI,1551 -yt_dlp/extractor/ichinanalive.py,sha256=VbqymUiZNFVRiTV9GCzqgbOXIcWH17VvsSXp1fcYunY,5918 -yt_dlp/extractor/ign.py,sha256=aFcyu1nE3x3uDsKox9iqeDBTkV-S-e9ilMYQnYN_Zj8,9495 -yt_dlp/extractor/iheart.py,sha256=ksa17Xp5ZRP9ww9quiI0LL7GB3gbzI9zYciakhsSLgE,3465 -yt_dlp/extractor/imdb.py,sha256=qMfT7DSEnTgff_h-0CET-o7yxU61TeApx7TOpaq69I0,5238 -yt_dlp/extractor/imggaming.py,sha256=upQGXZldWzkjFjORdRZp4iuMXen7csj0gO2tpLU5ZfI,4850 -yt_dlp/extractor/imgur.py,sha256=eR0mp_PLMOwb5IS3jVxjGpmkqWRbgInFjIoUms91Zj0,5104 -yt_dlp/extractor/ina.py,sha256=rUnzflYt3-gf27R7fLe1eSw1GbGjuhpRnw5HeZvst5o,2948 -yt_dlp/extractor/inc.py,sha256=b77TZL9BRdd7ImJpdet5U1eSRN4cUS3ARW9fcbJZAgM,2299 -yt_dlp/extractor/indavideo.py,sha256=VCY-zasXS5F2scDUljtGXOBAfTY6N7Em5_j3O7-VSS8,4415 -yt_dlp/extractor/infoq.py,sha256=NeKQEBMT0CtQZGD05QPyX2KBZZ89L7PBMx478OkoyVc,5072 -yt_dlp/extractor/instagram.py,sha256=p-dgI_Z3Fc9ddhXssnDvPe1VCMPTUgkjEshJ9Rl85tA,27113 -yt_dlp/extractor/internazionale.py,sha256=B2iFbcleU47Yzo70f3s91utPk875X4GlBxWndf1OQbk,3196 -yt_dlp/extractor/internetvideoarchive.py,sha256=C5jk9K8elv1dn6K3YIQEBXAq3Te0RuSOmy49XdiOgWA,2338 -yt_dlp/extractor/iprima.py,sha256=uz1I8IxSiQBzSldyrUt7FH64K0Xt4tXwAXd7ww9M-vc,9813 -yt_dlp/extractor/iqiyi.py,sha256=q0VGflnnY72pAXMchTU8PoOhgJIln-dXPAcBs39YxRM,31144 -yt_dlp/extractor/ir90tv.py,sha256=hCDQzUeIFbYVym-eWukWK4ja3kZ8xpVoHN7CCYgx3Nw,1739 -yt_dlp/extractor/itv.py,sha256=MSBiApuF3lF8y-ZniHpCTNZkMyTz_NpKAohQdwMylkc,10775 -yt_dlp/extractor/ivi.py,sha256=CDTcmb1why6w92iMDfFWY0bBFfAMZUVb8szXagzXbTA,10413 -yt_dlp/extractor/ivideon.py,sha256=2U_nmDUYF0eOcak8Tg6EdQjgP77-H5r7xXdolqBqhjs,3222 -yt_dlp/extractor/iwara.py,sha256=UG3ArHIbVi4EnX0IfkzcUaedZzJI2KZnJCMo88K4KKc,4413 -yt_dlp/extractor/izlesene.py,sha256=ZD-Dow8KiuZi0hvfNyV9zpPJuvIhDsRtKi7pw0vh9XY,4152 -yt_dlp/extractor/jamendo.py,sha256=AjdKFcawiSSthP9pOGkdlv9paiInhNv3A01di1H6PXs,6898 -yt_dlp/extractor/jeuxvideo.py,sha256=qdz-2drLFFG9Si_-m7TyG4UB_tShWkpqA2kG1hCnxO4,2027 -yt_dlp/extractor/joj.py,sha256=RMqasY5OfCO1BhdNqK-uOEwrhz-QUhPjcAkAv9JdFr8,3660 -yt_dlp/extractor/jove.py,sha256=JaEwYaTrmsxIGfeYmZftuo7SOxx3bYk6FU6anp3vIjs,3061 -yt_dlp/extractor/jwplatform.py,sha256=CA8W1Fo7Njzje01o2jWv_1l0hCaoViW_ohio-Y6B3MI,1963 -yt_dlp/extractor/kakao.py,sha256=eGPsOuxbsTICAy4bZpwEOTzQgHRcBjShEZTU37NFNmo,5972 -yt_dlp/extractor/kaltura.py,sha256=GPv5aJUJpcHjf_iwgjd8WIzBLIQTpCApBToCm587UMc,15470 -yt_dlp/extractor/kanalplay.py,sha256=9QeLQlRIDRVSuSIlkeRc1CLm1ZwWtWjo6hE2g0GvTT8,3269 -yt_dlp/extractor/kankan.py,sha256=kTAwO-AUZteUShParwREw0ByP_mes9owDpaJzGbw1Tg,1738 -yt_dlp/extractor/karaoketv.py,sha256=JTGbgX75XcNXzAl2KYh5MM1IIeOMnz82k-nGi2FsKjM,2340 -yt_dlp/extractor/karrierevideos.py,sha256=44nvkoLp-OnVLh-Hlc2Gozz-3mAdhwup5vl2JoNdF0Q,3379 -yt_dlp/extractor/keezmovies.py,sha256=f5LbWSZ0-UQTkYeCO57KW2N3KvpUXu5KUTFWbZuMVIA,4733 -yt_dlp/extractor/kelbyone.py,sha256=bvbremUPMVg1WfHOMM9-ub5keBb2BGs7S1uF-pPYytk,3564 -yt_dlp/extractor/ketnet.py,sha256=4EXoKWdRY8ld6WvEdS7RIN3Owl-3AupJ2CLqljp-1TM,2526 -yt_dlp/extractor/khanacademy.py,sha256=2o7sQ_Zdp1ufmmBd7NSXz_mpiGfwznApbSZUQgxGd4Q,3919 -yt_dlp/extractor/kickstarter.py,sha256=Fbys-clnWrnxJSWymRcnQmBPvS369M5FM1Pq74hJrzU,2708 -yt_dlp/extractor/kinja.py,sha256=jXnaSoO_9UaE6yWmZj4Q9jL6OHtKZgNU6VN6ly_2k_4,8564 -yt_dlp/extractor/kinopoisk.py,sha256=kyY5ILHD9UIix3VpmWJHt1tEQSgG-BdGEtPTif1a3Nw,2161 -yt_dlp/extractor/konserthusetplay.py,sha256=YkSRuwUivhoQ4nuOPklGLXMeEaPshSulcZsPxCkVChA,4505 -yt_dlp/extractor/koo.py,sha256=hf8r9vNJc_seVFBNjFq8ae9znaf6BCH4Am94pi8hewk,4778 -yt_dlp/extractor/krasview.py,sha256=7Ei6_0MOYdMj1ZfVbfUpIID6Jrp3R6maJEm7fDuc4V8,1957 -yt_dlp/extractor/ku6.py,sha256=yqBQKvz5vS1BWh07t03mVpJB0ydgX20jCcSo1b9YLZo,992 -yt_dlp/extractor/kusi.py,sha256=SW2a8uSFG8r9epL-brCeGV2h4XUvUJG65G7Thl_fjZM,3104 -yt_dlp/extractor/kuwo.py,sha256=vWdecIAP3duharRzMhQnM6JmgYGXg7dOyWoLiCLfGn0,12533 -yt_dlp/extractor/la7.py,sha256=agf8JtWu5F6f3xeGtmG59skTbtWVtzgVg9IS_Rfkdv0,8705 -yt_dlp/extractor/laola1tv.py,sha256=rnRkkdmkC83xRPxT0vFM_cSa686S9jpwugt1veZoWZA,9374 -yt_dlp/extractor/lazy_extractors.py,sha256=5R2BK-6KAyfP6M_N1Bgpy3nBgF__fo6k9NuYuJGeETU,517194 -yt_dlp/extractor/lbry.py,sha256=6l3Wevtr7gQXhmoqO0aOp5dbPdBHVhg9TIHl-UiwH2E,12268 -yt_dlp/extractor/lci.py,sha256=YGqGuGYof7L--KcZLPER-0PvtZR0dShtkVR4MZLN_sY,953 -yt_dlp/extractor/lcp.py,sha256=w9_kD-hhvYHTNtmZAy43bXzEGQCSaesCiobYplNJx90,2953 -yt_dlp/extractor/lecture2go.py,sha256=_Yd-Jzj0xjLfPnFG2Mt1r6zqwHCfy7tgLi3W85tThys,2402 -yt_dlp/extractor/lecturio.py,sha256=kB7kwLsNg5B8l45LC-QYKdAsOyRbEHPbWtd-TiXxOPU,8429 -yt_dlp/extractor/leeco.py,sha256=JfiCa1ZcHOYZvE_18K0BpDgkGHwbmYRC7Dx1IYxbDR8,13087 -yt_dlp/extractor/lego.py,sha256=41hn7KVRmg6W1swGjjuUPkl31-M07kpKF3_OFIsBpS4,5984 -yt_dlp/extractor/lemonde.py,sha256=WUurznnMznn9UaziVRFQjXcIXUgFhsQz5OjsBj0iSKQ,2329 -yt_dlp/extractor/lenta.py,sha256=PevswvoABNTd6ji44aopP2nhuH-6tdrS-zUB0wMHh1Q,1682 -yt_dlp/extractor/libraryofcongress.py,sha256=b71UKQx7R9Gu3GW99Y56He2WqJWsOQtpWQRZOEr-HeE,5029 -yt_dlp/extractor/libsyn.py,sha256=MDvbBLfj1MF24_trhgVlAzq-_GwvAdmn6How1x3H70k,3623 -yt_dlp/extractor/lifenews.py,sha256=rkUO5lfvLIUpj-7kJnrI8x1JUZoZnFQU6zC_TBUH91I,9618 -yt_dlp/extractor/limelight.py,sha256=W57jXiga1W4PE8zdJ1Cn_X3gsoDHWFCkGgoQSorLiv0,15094 -yt_dlp/extractor/line.py,sha256=LX_YSPiiOOYjwwzeP3OZ4TUNhQz5A2cdmnj_cxnf2Zw,5523 -yt_dlp/extractor/linkedin.py,sha256=8eaX_YCQ-UjmDQFIppG8kVd_Nh07JRSciu8qfauQBi8,10117 -yt_dlp/extractor/linuxacademy.py,sha256=KODfoK6z5eYHcsnW9oGEi_lPJLjEpmhrILOgCKJI1uc,9973 -yt_dlp/extractor/litv.py,sha256=ihIHipoQQri8RUJuI4GbT5izCQy1f0rOrs1ayUhtGb8,5788 -yt_dlp/extractor/livejournal.py,sha256=T2d5464NaMaO6cV9zp3o30Adr79usZldYjmhC2DSn8o,1530 -yt_dlp/extractor/livestream.py,sha256=sL4_6UR-aXez9pC0kPXpo6rI_8wUXXF9X7ZnMMiVR8Y,13619 -yt_dlp/extractor/lnkgo.py,sha256=o327BgBbmE_tD6__S0jGjVHK-oBeDhJuDFg2sH9t_b8,6432 -yt_dlp/extractor/localnews8.py,sha256=GdkBlqZslc9V-02U-uI_nEXIHklVCfwMMtNSIafI1-A,1714 -yt_dlp/extractor/lovehomeporn.py,sha256=a1tXvzWNBZOMlnM0-Zx3niQ0u9S4pmbl_NpYpCCYAJ4,1172 -yt_dlp/extractor/lrt.py,sha256=JA78-DjvqgfB23TIKzvQY0p-qPO-jk7NB4mAc_EhUlg,2569 -yt_dlp/extractor/lynda.py,sha256=zP06cUAASDI85uBuUHW12ujP29pIqhFa_vV3Fa3vmNQ,12683 -yt_dlp/extractor/m6.py,sha256=__8WyguAQGNR5ImIRY7NIjTcZJte_SzROPvkrnksS5s,896 -yt_dlp/extractor/magentamusik360.py,sha256=IZhZa3f7y_LNnPn4EtHylXM6dp0o9W24AR-cm0Gd8Jo,3497 -yt_dlp/extractor/mailru.py,sha256=SRdDKZeSX-zpa_eRkgElCYX4RLJVgyi4bkJl8ZRVF9Q,12113 -yt_dlp/extractor/mainstreaming.py,sha256=Aw5_Y1r8N3AJkadxEvzjV-jz0RHoiCR08lCh8Ip5_Oo,9449 -yt_dlp/extractor/malltv.py,sha256=8sVn-GdE2Ux9Q0oG2fZPPx4Sws300WYwIG0TlfKAbFg,3298 -yt_dlp/extractor/mangomolo.py,sha256=ln5RkFs-3xKPeCCeNuH_CkugchtSL0HiVipJ6urwmJs,1975 -yt_dlp/extractor/manoto.py,sha256=VEypBAvWSdtpOnbzEiqhCRFaJB5wPSXk3h-WHY4tAFQ,5277 -yt_dlp/extractor/manyvids.py,sha256=SFWQPYOrKGVQ4sw9lKvvyUW-HNuXG30mUUKY7Kv4srk,3121 -yt_dlp/extractor/maoritv.py,sha256=Egv59DxkpKSV0DxvliJDQCKERKC9jEDoBpoqgaMQ6Uk,1235 -yt_dlp/extractor/markiza.py,sha256=2S4wDvO381nCnG6H-XWDmgxgCTlqOq3Hlu6DD2OnJzE,4522 -yt_dlp/extractor/massengeschmacktv.py,sha256=5byuRLW_Bmsi3wKlLO00u3UvJUpoFzCWfOatZaJc7ZM,2648 -yt_dlp/extractor/matchtv.py,sha256=RS-pUrMXQDnT8mEcim3i19BmVXjI11XpxP6luCzkK3s,1793 -yt_dlp/extractor/mdr.py,sha256=Ftt7-h1om2BQcr5ix24H9HWK5_H4YPuQ1BDslJ-7JMc,6930 -yt_dlp/extractor/medaltv.py,sha256=YSK5xPePW_UeHUmA12ojPqaVA4b5ITK5YG5eZ-xUaQc,4878 -yt_dlp/extractor/mediaite.py,sha256=DSmqOeer7i_sNkyWsXL9qrt1xQs-BiY1gCrF9o6jmvI,4263 -yt_dlp/extractor/mediaklikk.py,sha256=Qe0McLwvwO1aBY4ENoKMSAqnKu5G41zEI6E1x24PHIc,4427 -yt_dlp/extractor/medialaan.py,sha256=hxraPDy6Uet9GuQRnd-XeOvgSpxHJ0l84u7cdrfEMck,4184 -yt_dlp/extractor/mediaset.py,sha256=iAKoIpEteqvzvcH8i09Rh07fFOZaC_STOtemON0N0ew,15076 -yt_dlp/extractor/mediasite.py,sha256=iEi4cW5Wcur3C2iTy6UwZIkSCA2ngNlYj-zjehn8p_8,16688 -yt_dlp/extractor/medici.py,sha256=to8aopo0qtTwbA3I6xLDr6FokMeCmwx6uEAGUlvbazE,2298 -yt_dlp/extractor/megaphone.py,sha256=SGclfYSSyivew67bvCpA59rJo0qvol1sZUIWiEUj60w,1770 -yt_dlp/extractor/megatvcom.py,sha256=YBwZKQRbDDYUFm4IIsbRSmg5fhC-PFMnFWUnSQZ6hkI,6969 -yt_dlp/extractor/meipai.py,sha256=7TXsTZOkGAcjO2N8CNGOSMRbxkpFWtV-aYEfhDJpKlU,3709 -yt_dlp/extractor/melonvod.py,sha256=qOTOj136TFrmJWp0sWvOWRj4vbENV1MpxMhcaKTcNRc,2251 -yt_dlp/extractor/meta.py,sha256=_MN4r2yx7UsYrCF68JHV-MqYYCLocERLGSvizXhxbGg,2623 -yt_dlp/extractor/metacafe.py,sha256=33rUQaoN68tiWrbWo1lwEEZrNTZQV7yd0lBCLyWs4YU,11709 -yt_dlp/extractor/metacritic.py,sha256=-SlqHztKDlM0gFeaAhnIEZFdplnEDZ44YzwblF0arsQ,2671 -yt_dlp/extractor/mgoon.py,sha256=jmsxIiM-avfgRRcvKP4nHN9FJ4hrjy4XyR8JKk_80dA,2682 -yt_dlp/extractor/mgtv.py,sha256=vhUyOntYs31pz5NGVmIk2VuX5rOHuzwQ2FO1sRhOhTs,3548 -yt_dlp/extractor/miaopai.py,sha256=DaCK6HpVwKCjSQskKHVk01GUTZ2ldgB-IkOgYGfQ8AA,1498 -yt_dlp/extractor/microsoftstream.py,sha256=QmRB74na3CCI-JEGUbwSJ56T1gTMfjvmZpmAiHkG3RQ,5482 -yt_dlp/extractor/microsoftvirtualacademy.py,sha256=AG65AgDKsFT0IP6N--qd2v2QjzWP6vbAx3kjbgijWKs,7468 -yt_dlp/extractor/mildom.py,sha256=r6spXs11RJWbtulJbe1W-X2vSqQ8PL8Ev4-ifUxhDbg,11850 -yt_dlp/extractor/minds.py,sha256=sN4Na_YtebbPGz7lxD8QNJNaXTm-eIp2uu2Sb0LOpyE,6931 -yt_dlp/extractor/ministrygrid.py,sha256=SbPHzHs0X879Wcyo49MXPq7nm2MwUEkQDv7Asman8TQ,2167 -yt_dlp/extractor/minoto.py,sha256=SoyfJaFCJBiR3N2SoP-n88xptwXY091b7qOqpjsUKGY,1905 -yt_dlp/extractor/miomio.py,sha256=aHGGTBxgXfm7FH_9RdZC2gNeLNhkBrFF-cSBCknJjj4,5068 -yt_dlp/extractor/mirrativ.py,sha256=zcPFQKk6ggDxuYnNWkSZRmckT0TOzIcDVo3naXYlaG4,5181 -yt_dlp/extractor/mit.py,sha256=jALREZ6NrHjW46CPFygWRCtsTCQw766oFtgI5tNbKa4,5206 -yt_dlp/extractor/mitele.py,sha256=eYqop_uplI6FKIpadS__kCTTSrMqlzQextdpbxwRfXI,3220 -yt_dlp/extractor/mixch.py,sha256=LAFY-4JFpp4IKtm_Iw-uDakmr3B1ULI_F8zTqJKAKFE,3025 -yt_dlp/extractor/mixcloud.py,sha256=kK5gztr8LlRtjoF399wIs_VxfHv0iB34mwbHnQMER-M,12238 -yt_dlp/extractor/mlb.py,sha256=GeaDdUSBaQMStsVk2t-eWVhZqmMNq6tY7ixlEaU5PBU,9355 -yt_dlp/extractor/mlssoccer.py,sha256=ELsNr20FNYcNkaZEc3EfflTxNFIelIYJaaC7ka6YiqY,6753 -yt_dlp/extractor/mnet.py,sha256=aj08wW0K8pmEEnbm32AoaY_6fuuzFqHhTKDjnujbfK4,2925 -yt_dlp/extractor/moevideo.py,sha256=UblJFdrm84lokZH1O82MRngzGOOXxkM-odg_BihN6yY,2821 -yt_dlp/extractor/mofosex.py,sha256=ftAiyC_3aKOkdPHWNNkV7-v18vj2KKIfvtgEo2FTQ38,2758 -yt_dlp/extractor/mojvideo.py,sha256=Honj9JrvguOz5g6ASWi_UN77urNFusqvySr7VzdGAOY,2080 -yt_dlp/extractor/morningstar.py,sha256=WzuCTgxpXWDJPOZQRqXtgE9Mo8VLAHUYbtV_bS0xpvI,1848 -yt_dlp/extractor/motherless.py,sha256=zVkj_8KLzZZCA_bCuKvWJf5rGH6zqqqGuWLAvDlrW1g,9709 -yt_dlp/extractor/motorsport.py,sha256=5A0Jyrbc0xQ_F6QjuhTdc8yLK_RUb5xl1ySWnR9K4co,1804 -yt_dlp/extractor/movieclips.py,sha256=7hRbBekKgLZI2msJKcDlJgzP0tEKBWmVvBaff1stxnw,1891 -yt_dlp/extractor/moviezine.py,sha256=ZsZN0VzTTWxEzKQsoTwLQmeOfFQ4yNhGs3LsFeygsN4,1386 -yt_dlp/extractor/movingimage.py,sha256=QK9El5pvONVsvMTJ8qJ5SwBKmlz_EUMjWi_iXZhYlRY,1774 -yt_dlp/extractor/msn.py,sha256=KT5F7A7igl6LYAxKTc9Lbf0aoNrTpdRXwMxxsTtW-MU,7577 -yt_dlp/extractor/mtv.py,sha256=jjOaOubhpapGw9JkZW0BjMQRet2UGLJdGkft7bvIZgU,25806 -yt_dlp/extractor/muenchentv.py,sha256=h-YhgYjGkHmJjYCNb-3Yk6-EBHcpjvNa5WFQ7TISWlM,2153 -yt_dlp/extractor/musescore.py,sha256=ZDu60_SRaK1wcr2Bl_rV5hiJiCQmJjrwsZ1clifz41M,2712 -yt_dlp/extractor/musicdex.py,sha256=QYLG0zLNnwGDb69Ilyok8U2hHA9deVykawD29TP_V0k,7152 -yt_dlp/extractor/mwave.py,sha256=ebvab_jrHE7M1He1s1jYsmRTIbjYaQKKFbYY-nOlxwg,3279 -yt_dlp/extractor/mxplayer.py,sha256=W_f1CIZrlbjc2D1K8o5vTsIJkTDwt3kZpDQiT0VqgIo,9639 -yt_dlp/extractor/mychannels.py,sha256=7VfODSIti8vQKIAZjeLA14GNgA1IqTKE4WoZaRUHEyw,1576 -yt_dlp/extractor/myspace.py,sha256=wdBKQPtpqOJo6_nLtdMl4tFA2Rft_Unc_ARZmmkqHCM,7902 -yt_dlp/extractor/myspass.py,sha256=W_WmoT11ZvC80oRyJnwKF3zjonyQqEbxibL6LSEIHLo,3771 -yt_dlp/extractor/myvi.py,sha256=siu_7l9irpZrJnDdsuoTQnSzVQ4vj021oytaUOnzFyQ,4018 -yt_dlp/extractor/myvideoge.py,sha256=wkNYmMWXOEKu_NyG_TBO_G2vY-ACOpKgCR7QwtaCuWU,1917 -yt_dlp/extractor/myvidster.py,sha256=rwLc7XNz7RvRpFuPJngVlg7JxBUY0uer1U18MmjC7uQ,918 -yt_dlp/extractor/n1.py,sha256=ow3uhI_b-_5Lq8zyOg1utoEkvQvRFpaqgiOuPO4YWBI,5480 -yt_dlp/extractor/nate.py,sha256=RFw5evtCEQkrDAWMEobxOAuDmvaF8OD748s4iMG8sAw,4396 -yt_dlp/extractor/nationalgeographic.py,sha256=zbhiU1W2MhsRFEBZwAasK67UqDxr8tyuvTSLqGHq13c,2924 -yt_dlp/extractor/naver.py,sha256=YWOCW4Ro2CGM7yTdJOrxuEQae6FO-T8pfWqJ9XccEzw,9591 -yt_dlp/extractor/nba.py,sha256=eUn0hVzCPyQsuu4gEkKIzzvoUcGZX-Bg-Qkf1L5g9pc,16270 -yt_dlp/extractor/nbc.py,sha256=8HrEag_n7HcjLlFRlbPTOmhBvJzNF9nHs2rFVNc3xA0,23550 -yt_dlp/extractor/ndr.py,sha256=hhVQS3AuY69F_CBpYeMauNKozefLgQmQaJL5Dt9DPe0,17215 -yt_dlp/extractor/ndtv.py,sha256=MzO6ROgtpD8q_0XfkCzECK4AZcTg9lus4Fog0u-ZhUk,4606 -yt_dlp/extractor/nebula.py,sha256=eAqRLCOU3XyPuczzvZQhsffjIcUi79H9r_8ogQzgY7o,12093 -yt_dlp/extractor/nerdcubed.py,sha256=vOz9DmjOo1lkr82HcUYnBthBP49MTNbXfZWovRDLuNY,1131 -yt_dlp/extractor/neteasemusic.py,sha256=0osqe6J6lrPr4ESm-V6rRP5cUpEZZiyv8sdgXM8xqgM,17001 -yt_dlp/extractor/netzkino.py,sha256=FiA7Vdt99IbM_wl5_OqE6ZK6Tk5AI_rTGzSDXHUIU2c,2995 -yt_dlp/extractor/newgrounds.py,sha256=HaekXEJX5rFTcBPS9qCWGWWbtgfDNpKG4SSz6fZlxw0,10081 -yt_dlp/extractor/newstube.py,sha256=SdWWBxSGDyxYG-5le1nr4_Kizc0s9kVroFTHF18QCt4,3004 -yt_dlp/extractor/newsy.py,sha256=78MzVBufLsvrMDZLlUFt3jq7IhYl2s0N0fpaBk9qFhQ,1923 -yt_dlp/extractor/nextmedia.py,sha256=0u8bWIDjEl08piHQCXjTtDvOIg4HRlOXGmpPn1Ukdig,8975 -yt_dlp/extractor/nexx.py,sha256=JOowk9vYrrj1RRJHIY-Tcmhexng8PnR3cVRQ_Yb4B4c,21547 -yt_dlp/extractor/nfhsnetwork.py,sha256=-ITOWPLE3FrqapVj-RiScdw5vSim828O9BhHDf-secI,5916 -yt_dlp/extractor/nfl.py,sha256=njguzD7yms8ZJONUKC_6NvQD97mEJ2BAQTlK6jSV0T0,6691 -yt_dlp/extractor/nhk.py,sha256=skcw3aP3R9GeVlEkMVwANlxGOf9rJ0zwtlaXEtvNmHA,6733 -yt_dlp/extractor/nhl.py,sha256=aa3xjsCjlPEbCkqBG3fjttn5maimWns2qTo7koe7UZg,4990 -yt_dlp/extractor/nick.py,sha256=AEqTv0b0EEe3w80wpo-bJeuOUIxNy2wTAvlOIX7dxJU,11247 -yt_dlp/extractor/niconico.py,sha256=pPNenB5_WGmjp-v1lMI9cdFBdPD_FVf3fbanj-PB6zE,32380 -yt_dlp/extractor/ninecninemedia.py,sha256=wUPv-tzSdG-D2dHtAUKj8FyqYgGX0h-hYUcrpuOd2KU,5228 -yt_dlp/extractor/ninegag.py,sha256=tQnrR7dctuv4hnTMsoYMLSU7qGgQ8ifV-GD4JCOmUqk,4248 -yt_dlp/extractor/ninenow.py,sha256=vHkpFSBzGmh_BmKPAsoxUAnahQb9E56erGj-cjZx_ao,5355 -yt_dlp/extractor/nintendo.py,sha256=JgtlYNKwV_b8AB2K_GhEIsP1duaWR_73mfH5CN7d8HQ,1882 -yt_dlp/extractor/nitter.py,sha256=xVJbgkgDi1ArKFUxUB7DVOIQI5nbzZDPWmtkEVTF8vM,10031 -yt_dlp/extractor/njpwworld.py,sha256=yn0xTPBbXLAlpmRImodwQho_cdXtRMbl8HlgGboSmwo,3376 -yt_dlp/extractor/nobelprize.py,sha256=O1e6ijE0rwztBqb7BF6n0Lqce2UwHNV9tVxR909EJSU,2123 -yt_dlp/extractor/noco.py,sha256=BbPLL7iG_7uhDu3-VPBq0OssRnUD1QaAU8TW_00iG1g,8397 -yt_dlp/extractor/nonktube.py,sha256=X3iaGnZeLUNGxXyUZJeZEexGmwVlNDS2deju6GdpnlE,1151 -yt_dlp/extractor/noodlemagazine.py,sha256=CHn9h7c-SUORGF2cv4qz1rTNw29NU9yIOFJec7mCGXY,2671 -yt_dlp/extractor/noovo.py,sha256=PwukJGpVQ08IY8kOYYQKSQ57Ion-7hAy9qlN9Jj5KUY,3658 -yt_dlp/extractor/normalboots.py,sha256=m4lNclfqHBbl-nHf3-4HN1WNyxPaTNx8NBAYbIsSIXo,2181 -yt_dlp/extractor/nosvideo.py,sha256=Vzgy5VwzgjTHb64cT_jAKTEh51-lOzzaG4j8JfboI4k,2480 -yt_dlp/extractor/nova.py,sha256=gvZVztQ9fiy-9GXxIGKbaox7rr0BN8NxLS83HdbhUTU,12921 -yt_dlp/extractor/novaplay.py,sha256=9gFrpiIHsFsHa46-DSVNl4trKIwFRRHhtbkKZaXaPwM,2648 -yt_dlp/extractor/nowness.py,sha256=RdinTo8Md8oN0ZN_PJZ__fYGKqdp5UzFGmVpqCg7YQQ,6020 -yt_dlp/extractor/noz.py,sha256=4DTlw3uK2ZSVDoXcnxVKOjWoWh11ndz4Q08zLn8t74E,3665 -yt_dlp/extractor/npo.py,sha256=Bw5PUoHb3tENeHXEOA4no-wIRx90QvHyTKveLn2GvrM,28126 -yt_dlp/extractor/npr.py,sha256=rvDV91IxW3oZVVzUfCTXdj0WUPBgJfZIKYsk-lZZqjM,5104 -yt_dlp/extractor/nrk.py,sha256=3Eta2Mr3n6XyTf6jchkhwISWQzYMygpoC8ZNP4f6yEc,31918 -yt_dlp/extractor/nrl.py,sha256=YlHme2n17UDtOfRdlfLErFm8vi6UimIMEGKj9ccjaB8,989 -yt_dlp/extractor/ntvcojp.py,sha256=uFxiFXbYX_ZRRRSKHRsVba6J6ffw6mE0AsjMLJxZyYo,2334 -yt_dlp/extractor/ntvde.py,sha256=6jKqkafrJ68RoUKiJeENc6GFTC7Cw2_XLqSzlY-7US4,3192 -yt_dlp/extractor/ntvru.py,sha256=EOuyH_yrShkfVEoa-G7AQQoGtwABAklhX5tdg4RYBMs,5054 -yt_dlp/extractor/nuevo.py,sha256=z2bYgJF6PSe95Br-BBffs4ycLUy-L5e3UzTgNde6IJI,1209 -yt_dlp/extractor/nuvid.py,sha256=x9IDr0v7rgDaAeai6T_F-5MAxTjbVU1lOP3Re1s32es,2484 -yt_dlp/extractor/nytimes.py,sha256=huwydQsBMi7BIdHtJc7F3Gok6IHqACO8VH9ZjQkCDx0,10404 -yt_dlp/extractor/nzherald.py,sha256=IAbkuae48elDaFEGjmrl3baQDSf6e_jcxZonn0Bf4TY,4327 -yt_dlp/extractor/nzz.py,sha256=J6BpQBg5m0hslpsjL0NoJ9T3zDNGcrOFUHNHNZMGz8U,1409 -yt_dlp/extractor/odatv.py,sha256=Pe5IHNta7zhuwSBo6Kvgh7HFzy9z0U68x9Y_pZEE7fc,1497 -yt_dlp/extractor/odnoklassniki.py,sha256=mJyYdfgpzxRilrVPW8dPJSdJuX4EuklW92ggO3-n0zA,12611 -yt_dlp/extractor/oktoberfesttv.py,sha256=gvRARJMfC5pun6uSAKudAo_OpW6Pw87x0aOgLHq5uRE,1486 -yt_dlp/extractor/olympics.py,sha256=aeQNwHCGd26CNTplDm785SvoRoruU-qiD9gylpM7TsM,2954 -yt_dlp/extractor/on24.py,sha256=q0HrKx3wj0z0zN1pfJJAcofQnGCqoKzRBzBZCGfLx5s,4017 -yt_dlp/extractor/once.py,sha256=tMdzAiJAwtdb6VFLgRRqa_4mIfe5-YUNeALmySR8M4Q,2167 -yt_dlp/extractor/ondemandkorea.py,sha256=oV5D6E__G_CYAD7E9Lolj3UcSRXIhbc1zzNMpNcmsPM,3184 -yt_dlp/extractor/onefootball.py,sha256=bXTJ4b9gVPkvF8pSrmBPmrZz53UDMqhMFWDbI93HggI,2198 -yt_dlp/extractor/onet.py,sha256=PNe0NCpGrxs2Cal9VxwsIdUxhLC_NoaIlpXFJ1hvgfQ,9938 -yt_dlp/extractor/onionstudios.py,sha256=FfEUOhdQ9qt6XtVsuCIS2lsK9GE-DeOLnXubN8P5W7Y,1931 -yt_dlp/extractor/ooyala.py,sha256=rDDhxhQ74bkMcl-wgJbD0ZD4LkKDnWfBDNDo0hZ1XGc,8751 -yt_dlp/extractor/opencast.py,sha256=C68-P6PRSMZMhA73pc7elXxXTMpLYl1iGHdXiaGxYL4,7622 -yt_dlp/extractor/openload.py,sha256=OsETtwAjw137pn6-dQTn8zkZVXJpYCpweyNvz9CpvXY,8183 -yt_dlp/extractor/openrec.py,sha256=WELt335gEh5_Lkn52nurV3FEsDRnLT0CcyOzYLUAXHE,5442 -yt_dlp/extractor/ora.py,sha256=okFZ6qG-NIFpM1aVEW4TqRhKVXDca9z1B1PJ-FOr0OI,3223 -yt_dlp/extractor/orf.py,sha256=Tfy66cONUSDSUtPD2vLru-E45-cZ9awIxUM0UeSBdCE,21459 -yt_dlp/extractor/outsidetv.py,sha256=ERY6jBJgOA1vs4dYwWzZvGqdWh2UQiMMWSo8VpECws8,995 -yt_dlp/extractor/packtpub.py,sha256=18YApXd2pgWcek3ZHyWoD0wAnQ6lyYoJCz0Jv-FGGFs,6309 -yt_dlp/extractor/palcomp3.py,sha256=CeWpOcRMm-zioG-hB4CtBVvDrIOfEE2hOIZaC1QNDcM,4513 -yt_dlp/extractor/pandoratv.py,sha256=JdxK-c-oHS9U4lXOmkIRQQ_s67qFoKcXUYR_K9S1RDs,4732 -yt_dlp/extractor/paramountplus.py,sha256=qUhu7aMzH8-OGBD9Q2w7qIXmb6-zdcF37AoE11sOoMY,5769 -yt_dlp/extractor/parliamentliveuk.py,sha256=8EvQ-nvMftymvY5VRRhAyeDL3VgT3m3qSCpI2Vg4swg,3238 -yt_dlp/extractor/parlview.py,sha256=YUQj55sVfqp_QO7m7DEkKR4dxyEEkrSmmI5txLeK8rc,2845 -yt_dlp/extractor/patreon.py,sha256=rSUcX5NyD0GR1AGnMJ_tgu0zc_U_LoU6NbhejZ5r_EE,9138 -yt_dlp/extractor/pbs.py,sha256=lhCqlcuO2dVTCO7MX_KCiV-jcpb1Q_qIPtpR-4C-mbE,36233 -yt_dlp/extractor/pearvideo.py,sha256=kAHBNNbAN83BbfAzRTf9eHIe3z1xyeJ8VS6VKzOe1rU,2103 -yt_dlp/extractor/peertube.py,sha256=JUdGOQ22R0_nO-Zf27CMSaR0OA-YFyc7EB3ezsilXlY,65926 -yt_dlp/extractor/peertv.py,sha256=fuJb3iqSiuWHpECq9YvrmsokjLkDj6mMpo6avHnclQM,2268 -yt_dlp/extractor/peloton.py,sha256=NnGZoyf90X0cJghb3XUlziyMgKL89rxajfv1KbA4IxY,8982 -yt_dlp/extractor/people.py,sha256=KwULIvpQQ6x1DpNPjGEfZ2VlZWzAlRIjWOgW4ufwAhQ,1140 -yt_dlp/extractor/performgroup.py,sha256=CVFjLIruw964LG5GrFPBfWAywaJyHIafJiNvAKkC8e0,3340 -yt_dlp/extractor/periscope.py,sha256=n_yKrr3Ex13BKeL7efpcWJHntqnfSFxoYvvPqxP349E,7182 -yt_dlp/extractor/philharmoniedeparis.py,sha256=CoNvSerEuxRdMwAlBfoiGwQDpJI4tV84YYPwsJ7kXSY,3803 -yt_dlp/extractor/phoenix.py,sha256=xgww-tBADTMPLytYQwhn-JiabgAmjzmLccCHbIDFqcw,5018 -yt_dlp/extractor/photobucket.py,sha256=Is2zEZ7XyK22WB7dDok_edifhJTFeqVZ_6l8gRJ-dmQ,1774 -yt_dlp/extractor/picarto.py,sha256=evEvctUL2I04zPle7R21M2wmzktoOIbOZUfWbooYXSs,3904 -yt_dlp/extractor/piksel.py,sha256=2eoc1wRKn0WeWB7hj3A3nOTILhYozAmqfRtP0GvCfmw,6979 -yt_dlp/extractor/pinkbike.py,sha256=WK-0E27z-8M2UHnT766Cxu_kHuQKjSR-BVj8oKP4vKM,3446 -yt_dlp/extractor/pinterest.py,sha256=1ubGCgLlHgeK3Gq33p1V3o0qU_79iDqX0ckQmwZuAlo,7509 -yt_dlp/extractor/pixivsketch.py,sha256=zuY0EKOz0qpPngT27swzCXlq6I7gnpMZye8VsNA1e0k,4845 -yt_dlp/extractor/pladform.py,sha256=jzUDeDyC_KPTdHP5qLqjo76UrodiDvFW3BnwGVIuH8Q,5194 -yt_dlp/extractor/planetmarathi.py,sha256=1PCSDj5AYD-hihF44bTRDDipQNGlhFqoSG_YKTn7mzc,3058 -yt_dlp/extractor/platzi.py,sha256=G49pMH_i-qQGTS1kuGNMfKrSjgtjAvLJ3Fc8jWJsej8,7630 -yt_dlp/extractor/playfm.py,sha256=wBO0keYkI_hgOSYyuGMeV9ML_dGmLTGGjvHGWwE_YcQ,2585 -yt_dlp/extractor/playplustv.py,sha256=4aTLBYKRXLNJgSIfoCa73IHPWumPoAuDNioF89WDqJg,3710 -yt_dlp/extractor/plays.py,sha256=u4UJtVWOKKMnvJ8GmUoKG_AmtV76WeBZ0Hx-RJjdw3c,1841 -yt_dlp/extractor/playstuff.py,sha256=sbBS_ykorVbPxPIY1HL6MGR2hRWrWgqd9vfy3EGWwqI,2254 -yt_dlp/extractor/playtvak.py,sha256=VJpv0nIqWy73LAoueAzbfUrCr6ih6ATbWhNSXMqJ710,7208 -yt_dlp/extractor/playvid.py,sha256=ZEQqcKgKHgcO-ulHicwQVSH0_kNxFDBDolpDDxGPRLE,3299 -yt_dlp/extractor/playwire.py,sha256=zlFsnJpuZd2H3qJyqRitQQPjXWXH3vrhMbPAbcpXOzw,2394 -yt_dlp/extractor/pluralsight.py,sha256=xxDMVY5ZwNSozAM2xyFqw3yiFLbUScEZN1MhrlKEoow,18581 -yt_dlp/extractor/plutotv.py,sha256=G0EM49iZI2RYuJGnH79vQY9nF44BzXbmy3LpUKutMH4,7639 -yt_dlp/extractor/podomatic.py,sha256=AVd4GjtxDhw9GlFqz08yNvQ9fjFZ-P2iVkPlKTebbkE,2626 -yt_dlp/extractor/pokemon.py,sha256=iK7geoHR8uoS9GfDU5-6cM4bPMp3uteWDi7yOPS4dIM,5533 -yt_dlp/extractor/pokergo.py,sha256=u6Ne7CG3gXHqcTlIDwKNaaAliF5sAt2mQwzp8MTLCTE,4265 -yt_dlp/extractor/polsatgo.py,sha256=8FjpZIoLI2CMPei0jRHgjVuA_iU5iRkSF_Blv0O2ask,3249 -yt_dlp/extractor/polskieradio.py,sha256=i4reK75fxIBcKQVQqeeWFR17uSWGpiazFwYJTyXT13s,16221 -yt_dlp/extractor/popcorntimes.py,sha256=vWPVySI0tYXpABkfPlCKW2qHz58msnb3ojHLphfV-zE,3348 -yt_dlp/extractor/popcorntv.py,sha256=ygMtAHETgYEAedboSL21KG5bdG3hAguuMCSiBYloBuM,2672 -yt_dlp/extractor/porn91.py,sha256=3o1tzJy6excO0QQ-7BNkdD0psUbHBRYUA05wt-ObgTw,2116 -yt_dlp/extractor/porncom.py,sha256=XNETupObtXwBSLSx6x6Qn8zCQj6lDBJ2itgBRlQHuLI,3868 -yt_dlp/extractor/pornez.py,sha256=v8Wf8PGl6zQTCLfCq6mLxk5tcDd5yhZkii_-7hwc7TI,1751 -yt_dlp/extractor/pornflip.py,sha256=FYVdCLmLoj-Rybq4LBSFtcKzUim2_htqxmonsklNobc,3593 -yt_dlp/extractor/pornhd.py,sha256=ARE8-IiR_Jx9OELu3dFQb80cJI6QvOyAO11Gs2GWlLg,4624 -yt_dlp/extractor/pornhub.py,sha256=ze-Xp-8FGt6CeulCt3CCXGb0dYCYVtvQpRMI3ePLBdM,30858 -yt_dlp/extractor/pornotube.py,sha256=DmZN5JDsPGe4coifOCEC0wnFRkEd4_xvvVJ99D_91s8,3154 -yt_dlp/extractor/pornovoisines.py,sha256=CO-ffjrU0r84UcbOiVBf6u-kPm9KCzSfWUR6tWHwJfs,3989 -yt_dlp/extractor/pornoxo.py,sha256=62y046xeFiyRsgtohSl6JAu4ldpbZGllZpw-mVFS1yo,1925 -yt_dlp/extractor/presstv.py,sha256=k4ihqIs1jmV0RGrKhtwYY9rxMJsZMAbqlTklQWlD8tA,2376 -yt_dlp/extractor/projectveritas.py,sha256=_tFev7iJBQZ7S2D67DfZxotrG5LbflFFnjG40BH8zDo,2513 -yt_dlp/extractor/prosiebensat1.py,sha256=9QUAoJ79kEP67N7VYDUp9im-kX4IInTm-4c57vzX5VU,21586 -yt_dlp/extractor/prx.py,sha256=4_BJjjLEEYsqIelJZevIb3XjOVXsSxWJ9s6i-AsA-Vs,16122 -yt_dlp/extractor/puhutv.py,sha256=rtsBjUPrMoHsJIt8u9uCajVteTe5dj0yOnJqxn3SKtU,8458 -yt_dlp/extractor/puls4.py,sha256=SV_mZLVRwT37KJQazbaeZIRhD3ftQvKck_YLu88u2M4,2295 -yt_dlp/extractor/pyvideo.py,sha256=AWKt8IEQ-Bk8l86y0qW2hv7zpTH0Nm_3-gdot_xoOwA,2760 -yt_dlp/extractor/qqmusic.py,sha256=1T0LSInrJAtxoCa1N0HUHKfSPXbrEdSQSwrzLAHSalc,13643 -yt_dlp/extractor/r7.py,sha256=HN74Xmlcy2MdKsc9RPOL93ErN9YbLIYJcwzu0x2EiiI,4600 -yt_dlp/extractor/radiko.py,sha256=pl27JgmhZ6_h9FY5V0zz0jdH6xiePULij8JGJEkqJvc,8362 -yt_dlp/extractor/radiobremen.py,sha256=y7aULrO82HC7UIvrimKLt-40XVlvTEcETbJHokbkugA,2431 -yt_dlp/extractor/radiocanada.py,sha256=3kL2vttMmSJ68pQAkMrFOfQYLbk2UsN1c6YsGI9nFdI,6335 -yt_dlp/extractor/radiode.py,sha256=i3OI0qnkPoIg0Xana0avDXFWdQ6jLBJLf43c6tkOL58,1802 -yt_dlp/extractor/radiofrance.py,sha256=9cBFa_IKcVgm8KlQLw0Z4MMRUPYKQ8ojmtpzwsbdn3Y,2082 -yt_dlp/extractor/radiojavan.py,sha256=32LqzS4lJJptg3gwktxnejKiKLQIDHcA-ER0KdXwUJg,2761 -yt_dlp/extractor/radiokapital.py,sha256=wkVRr8rSoUHXUjXSUHuCDVBOPCXwJijgImq99mTiYo4,3381 -yt_dlp/extractor/radiozet.py,sha256=TdqN2gqmN8XFvCZMeZqUUEe-E7h2TZuBiCgnFGOfs9s,2195 -yt_dlp/extractor/radlive.py,sha256=8cO-9YCT4zIXjF_MNfjj3G3tZXujvF992lBxnc0GPW0,7019 -yt_dlp/extractor/rai.py,sha256=eOwczBPXPcUQmm1zfRejq48eQZEyUzWJVdnpPsVsLwo,28372 -yt_dlp/extractor/raywenderlich.py,sha256=kuaJnaRcqkndWQwsKcQBXMPsh5vkKiwHHZOJ5CvGsr0,6165 -yt_dlp/extractor/rbmaradio.py,sha256=3SVeyzQR0aRMD_XawFEiuXMd5mVLxCXv8zDQZW6Ijq8,2392 -yt_dlp/extractor/rcs.py,sha256=XTxQE6_jlIgTw57e1LmULRQL4c9_DrVYUzK5wgCmN70,17879 -yt_dlp/extractor/rcti.py,sha256=MTorHWub7em53au050CR1DdBTjuzlXmABz0bc0NbDk0,16567 -yt_dlp/extractor/rds.py,sha256=dwSOmqmwp3C9zpVtz8EXcAwMCuaiwlir9MzemUTn74c,2881 -yt_dlp/extractor/redbulltv.py,sha256=hZANdPBru1e01j_FtC3Go3cUczRbKwcNCGVRUJ5a_qc,9314 -yt_dlp/extractor/reddit.py,sha256=iBzVQSIXWuvj_hbi4UrMfQvvkWIHFnhvdqhUEJW0PzU,6235 -yt_dlp/extractor/redgifs.py,sha256=MyHsNfus0246d5tF-vX32QzEN_RSHFldEHTmnTNMQ1A,8233 -yt_dlp/extractor/redtube.py,sha256=XlI31ocRyhjjEOtFpWoLNMCX_9oKBvUN--xYVk4Nit4,6256 -yt_dlp/extractor/regiotv.py,sha256=RA6VQecsCgyz1jAGhCsPdPlnplLx_1ORYh3h6mIIglY,2261 -yt_dlp/extractor/rentv.py,sha256=QwfIfVkwYXyvZQmBJ8xyJjxz55J9_FAXfuc94U-m7Ek,4156 -yt_dlp/extractor/restudy.py,sha256=MdxQ4qg72vrBzSkBmP8f1h0RWlKwommFm3ZUR3QOgGk,1351 -yt_dlp/extractor/reuters.py,sha256=RDhED4kf5pdFtwFpf-5q3Cc2iv8W2evgtfjHl7W8_yU,2438 -yt_dlp/extractor/reverbnation.py,sha256=9NSXxqKFGWzTcRkk7vg0SDL-6z7e3sBpMqgYoSUZr48,1627 -yt_dlp/extractor/rice.py,sha256=V1oRzhYkXq4l4kLvDl4faPqeDaQ7eow-XEorQz-CXtg,4576 -yt_dlp/extractor/rmcdecouverte.py,sha256=4YM0bykbmp1oR2gLcC8DZD2L0nMAZ8lEECjy8-bqhCI,2837 -yt_dlp/extractor/ro220.py,sha256=7Mp2ZOu7abBbP-zt9Bt8kumj_FLxeJxBaSFOv5C6_BY,1452 -yt_dlp/extractor/rockstargames.py,sha256=AV3EgHBLonRlSWqbzG60QIApweFVuB2_fKrJgoF_X1U,2248 -yt_dlp/extractor/roosterteeth.py,sha256=0OkwtRAFQ5_JUaUrN9cnsWgkf_02uC_DRlAgtpNkFNo,9034 -yt_dlp/extractor/rottentomatoes.py,sha256=Ez7Y9YzrL1qN1hje2rMgfP9-x-BweRft5SIVGvUpjzM,1281 -yt_dlp/extractor/roxwel.py,sha256=7wnynxs6NSHe03s1-Ij6Nclu75ReWQY9CGCQIciGnH4,1956 -yt_dlp/extractor/rozhlas.py,sha256=GjIxekwC6-6e5uehrT2F9bqmIpUr404VEoeNMYTWeCQ,1902 -yt_dlp/extractor/rtbf.py,sha256=H9W-m74K5bjwGjAgoqoCnKVi2SlJdScmi3w21mS1jKg,5538 -yt_dlp/extractor/rte.py,sha256=tZzja_0seBTeIuNDOwoVFL2_8eKppozzOQILWnftvOA,6289 -yt_dlp/extractor/rtl2.py,sha256=siHpOXy_t-UO36qVBJEChrIV_kKR1r4vVHBgvkix09E,7217 -yt_dlp/extractor/rtlnl.py,sha256=QnIYrKIA45fADK3yHszA_8oMvKS_sl9vBEI44nMg_-8,5896 -yt_dlp/extractor/rtnews.py,sha256=NOGp8fBFPMFeC4DoFmJR48gSQZNg7SvWHhRivb7__I0,7702 -yt_dlp/extractor/rtp.py,sha256=vhFVXHtiLHbbU1bLOA4nVenM-vazq2m3cTsouglPiak,3428 -yt_dlp/extractor/rtrfm.py,sha256=00NF_OId3_mh66an4qOOa4qCPEvDRoAw74SlLLjMoqI,2797 -yt_dlp/extractor/rts.py,sha256=6Bk0Z97fxjjxX6TpGSq9f0UepiwnmdySlKWsG5F3ims,9570 -yt_dlp/extractor/rtve.py,sha256=2lTSX7osGirDA8_6I_aCx4Hg2QyZEhQi9FhdD0qGfWU,13000 -yt_dlp/extractor/rtvnh.py,sha256=Wl8UjhKEqCRdCEndi5zuuS5_t18E_3AtOH0zHDU24DM,2266 -yt_dlp/extractor/rtvs.py,sha256=aUiPgAIX_n-HdUv4b-6OVz8Wq-WvLXXCqYmCggmRc9Q,1476 -yt_dlp/extractor/ruhd.py,sha256=ldHUBCjz5hBkFREb6HHgkYZ7iA0sblLB1OxJHP-n-IA,1586 -yt_dlp/extractor/rule34video.py,sha256=5yUw1B15soMNZxqp6Efw35uBPkwGkLEI26DFii_2aEM,2345 -yt_dlp/extractor/rumble.py,sha256=_48EuvcGuwmoqfJBdF3WOpUfnzpIfJYLLkzpu-GQ5xw,3781 -yt_dlp/extractor/rutube.py,sha256=x-K84MpohcGDLu2-Sa3vR7t3brl5XDrW-4ZSy5oAPtw,11777 -yt_dlp/extractor/rutv.py,sha256=4rCqfmSuvngfsnDM7FOkAjyI5w_n9scS9j0ah5QHl6c,8016 -yt_dlp/extractor/ruutu.py,sha256=Y97dGRv-jjPKXGuqh-epFTaVMALt_qA02grUY_Z0NKM,9355 -yt_dlp/extractor/ruv.py,sha256=zE_8TAMDDKynpjJt56gsd-6H7d1wfy3HQyghje_rBuc,3359 -yt_dlp/extractor/safari.py,sha256=nnYMIuZGIJQLO4SlNqTx9xRR_ihkxZndiu7eYvP9Afk,10007 -yt_dlp/extractor/saitosan.py,sha256=LR0dSstFZsZI6-doflEPFvloHmar-NW5t8dMB8swQE0,3048 -yt_dlp/extractor/samplefocus.py,sha256=kmsolwwkuHeCzS-sj1l-n_A2MB9rIckBprOfK3iEyJE,3874 -yt_dlp/extractor/sapo.py,sha256=FY56i4wdbrdGQ2BXMjxTBq3QngLK9lVuKMH_TsRqiyk,4492 -yt_dlp/extractor/savefrom.py,sha256=DpFA85cAnU_hj00YdapMwOx9ONaiDbyT_lLoeOdOwh0,1067 -yt_dlp/extractor/sbs.py,sha256=R33ZfFdZQrNOcL3q9FMwITc2X2jZwFsxfpNpmRi7GFw,3700 -yt_dlp/extractor/screencast.py,sha256=1uVxKy19k4ihcCQXrW0cgjVazq3dQuZQNfWygf5ZhvU,4680 -yt_dlp/extractor/screencastomatic.py,sha256=a8TmnROh3kYub6zy56P4qxrGbhttQDE9A2qg08vGi38,1985 -yt_dlp/extractor/scrippsnetworks.py,sha256=xkbnVcyeg9IwjDCkp2BEo25xq8iNO574OibBEnlVLEI,5614 -yt_dlp/extractor/scte.py,sha256=T5XZ7h7_RUXGUVdCrZx7adADIb8rKpINPmqlh8yBfF0,4993 -yt_dlp/extractor/seeker.py,sha256=QpEsoWIcKSVwWNl5EireDgJwCaihITf8_jfCqSTB4Fw,2297 -yt_dlp/extractor/senategov.py,sha256=iQ-6rKFdET5hrAFpitR0d6lwQwr_W9DGDQksguYt6Ns,9007 -yt_dlp/extractor/sendtonews.py,sha256=AKgKERQwuNyB6otgpZyuz6muaj-P2NIHjW5u-s6mlBU,3972 -yt_dlp/extractor/servus.py,sha256=spz_fOliGNHSATzQ30BIkvB2z5E0XN4qYjWBA30dcik,5663 -yt_dlp/extractor/sevenplus.py,sha256=2hyjCRC-x2f2DhtQno1LHWBKnRGRPQGNwiMo5OhqQkQ,4985 -yt_dlp/extractor/sexu.py,sha256=NPH8_EnbfGQmQr5Q-HXn4OKCQKqBvHewNTH-AtF28Vw,2000 -yt_dlp/extractor/seznamzpravy.py,sha256=E_JR_FMP6u3K9-h3afra0gCd5WZG5j9tgq2zmTDN-eg,7861 -yt_dlp/extractor/shahid.py,sha256=PMc0rrpKBfPTB74PX6b7KX2Nv96o5v8vCgU4EvOMHZw,8453 -yt_dlp/extractor/shared.py,sha256=56Xz0RnSt1S0fal3IE9jbbdcAoz6K3wBo9Jv4GxgukQ,4385 -yt_dlp/extractor/shemaroome.py,sha256=qeQf2NWWPJqaDj9DUSdW4yNiFNmcPIYsuAZXfY5h9cY,4334 -yt_dlp/extractor/showroomlive.py,sha256=QbxCbHGAaKmv-5e9R3ozfzxql3Te9dFqWDAK7nnJVME,3109 -yt_dlp/extractor/simplecast.py,sha256=3COjfbshPKzhCcYmt_KBrmCRduydunTOZA1E0v3l6Ww,6219 -yt_dlp/extractor/sina.py,sha256=gXn_K_QddCk9knwtd3T9m27xacfgcdQ00ah0s5I0_rc,4292 -yt_dlp/extractor/sixplay.py,sha256=1B5OCLFxK6IXCC78JrS8O3vuSytrXWxKSYMM62wAq0A,5153 -yt_dlp/extractor/skeb.py,sha256=x6qArwgy7ecC1v1aKUfNEgIrsINZpV1gTNALPdFKvuA,5632 -yt_dlp/extractor/sky.py,sha256=RG21Cb07smG3JnRB5FgKvn3HxRR0_KBMn-FNNJq-QHk,6571 -yt_dlp/extractor/skyit.py,sha256=a0E_yLYbeYHTuVeDQaaOALlHuwAcInUk6ZQNtaqapxM,8767 -yt_dlp/extractor/skylinewebcams.py,sha256=dJ9qTwZ7Y2n8SJ9PXRBCRww3ajUoiZFU9c6I0IT3UwA,1434 -yt_dlp/extractor/skynewsarabia.py,sha256=Fr2v4LrDOHIzOLrynf_KrhvKAEZp1cT_dn94sxZxcU0,5382 -yt_dlp/extractor/skynewsau.py,sha256=GQkFDiVTf6kOP-zZNkyKdc2BefmJ0ac_BG5gWMKng2k,1834 -yt_dlp/extractor/slideshare.py,sha256=J8FH7VnyoWiBEEMxHwiuy-bI81x-a90teQQNAeWw0EA,2118 -yt_dlp/extractor/slideslive.py,sha256=rse7tjkEdEyRnp6uUFOgzrPrUeCkA5AbmoOHl6MRSvE,4164 -yt_dlp/extractor/slutload.py,sha256=ZWBRokCYQ5J1Vh_0fGT7G46FMQ_57LWy-WKx-kIr4so,2350 -yt_dlp/extractor/snotr.py,sha256=iGHjOyp3ceV2IFGF2MWOh5yFxv1HmApEzUP_bEoM6NY,2487 -yt_dlp/extractor/sohu.py,sha256=6nx1N7HAZW-W_txThzkR--gL5s948MXWygsZeovemOQ,6907 -yt_dlp/extractor/sonyliv.py,sha256=V3RSr11OlHlHChEtbY2UurkS8wWUZg4ce8snm2HJyEo,9488 -yt_dlp/extractor/soundcloud.py,sha256=W8AxcHBWtF_HZssYJrQbUAxfxgeukokTZjSPVdLcACc,35875 -yt_dlp/extractor/soundgasm.py,sha256=NZ1-eAc0KScOOv7sngcjnDrzANG2kCsrqjQ5WDooRMo,2410 -yt_dlp/extractor/southpark.py,sha256=0ASqtk7fmzNmkv4HPhOqa3zekRsV6jM_ng9xxS1o6uA,5934 -yt_dlp/extractor/sovietscloset.py,sha256=de_-95yeobLo1jyJxgUMNbQyVy4HedoiJvtxEKzf0PQ,8285 -yt_dlp/extractor/spankbang.py,sha256=qxf8_0aNn2lKS3C_nF5AUhhyyP_quSbOqjWEjSknkvk,7332 -yt_dlp/extractor/spankwire.py,sha256=TcYgBPvp-HHVtT2yNYvdff19jmguIh0gCpUuhv03p2E,6434 -yt_dlp/extractor/spiegel.py,sha256=EgDUyICkkU_bgXU7KxEfde5c-9KFJzoYaT5k6yONfCk,2383 -yt_dlp/extractor/spiegeltv.py,sha256=7zq7XM1qw3OAFfGlr_51uUL_X4Igk653mdfdn56-Wss,504 -yt_dlp/extractor/spike.py,sha256=baMyaa6Kg_BZlVepxWdJlzoOeuxYeWKeM06odw5MQ0g,1702 -yt_dlp/extractor/sport5.py,sha256=WHbhI3jhK4u0tmHD1B2cTi8cPU5AazAmBlO5KfY5C9U,3241 -yt_dlp/extractor/sportbox.py,sha256=LEkNZvI-Apa4qQGrzcP0O46Bpl2GM805ct5VzMM2dOI,3421 -yt_dlp/extractor/sportdeutschland.py,sha256=LBNgJZ6tfh0KUYW4hxQsVRO1YQ0WvwZS010-Mdf5eCg,3990 -yt_dlp/extractor/spotify.py,sha256=JLC6GYO8NxmRkQUqIAW3uj9ne7BQIHDYhR3eKl2Qqpw,5739 -yt_dlp/extractor/spreaker.py,sha256=QPH_duizGh1F8jGTtHEBjwF1Q8xy_WV2duY_njLC3ZU,6030 -yt_dlp/extractor/springboardplatform.py,sha256=_-tgRbwvy-ZksEVsCbX2ocYiq47_DJpzsIfbHhtFYmE,4238 -yt_dlp/extractor/sprout.py,sha256=tdxLll4wGsf2j5sznBsCrjUNIOUDs9yq9ChP8uK_2xw,2326 -yt_dlp/extractor/srgssr.py,sha256=OAEgNmtZpsvf7Sb7QIi8Ji6fV9aAs0QKPA6TZu3ovbY,10031 -yt_dlp/extractor/srmediathek.py,sha256=MbwS2m5zHZ8_faf0yyOwpHDBlY94mAfjkTpPxiOY_gc,2324 -yt_dlp/extractor/stanfordoc.py,sha256=gvdQubKgRuIgqrcIdHEBrhysEEA7-Y9InJgzmq_YXO4,3526 -yt_dlp/extractor/startv.py,sha256=6abzbhklCF9azevdgJ3r-s9OfWeVBoZL_TLBqLPTjxk,3587 -yt_dlp/extractor/steam.py,sha256=Sd4MFj3i1mDU7rIAVt95FF4FTBXFNzM8RYyl2ac4uQA,5008 -yt_dlp/extractor/stitcher.py,sha256=xnSx4bY1ZyjzZ9V_xJFb23O8mgaXUd_fkn87VEYfOmk,5371 -yt_dlp/extractor/storyfire.py,sha256=AtWm5l7iRunDjruobwrZUnOiO2q5GGiJnWhjlZYV1UU,4842 -yt_dlp/extractor/streamable.py,sha256=T6RMQR5bEAwmt_oAdlwslF5KITBnmWo1XO_ACgB1b44,4171 -yt_dlp/extractor/streamanity.py,sha256=6Z_-hXbf99chavjInoJkTFtTgN6iHALJjmhJquLwBow,1877 -yt_dlp/extractor/streamcloud.py,sha256=HwTjFC2YWqVtf40vHtHviJ2Ih6df5I9fJuGOq1D2cqA,2558 -yt_dlp/extractor/streamcz.py,sha256=soiFw8HOwO2F0sJPFk4f5GNNJtnSp5lB2cGC6Q_c25w,4335 -yt_dlp/extractor/streamff.py,sha256=cG51NMpkI0Ju8DlvUarsS0OKUn_Ciu8FJVNzVEC-1nI,1035 -yt_dlp/extractor/streetvoice.py,sha256=nlFc2xuZU1M6oE6UuelkO5zLCbeIvsLbY47Yy9pZ7i4,3624 -yt_dlp/extractor/stretchinternet.py,sha256=QCpA8rRYVStNPwXN4ValhSgqxhAwLyiQtqpk0g5-bD8,1339 -yt_dlp/extractor/stripchat.py,sha256=_QfJyk13caneke1Jcew3z_vkgRsHj6JcEfEaVIs7Xk0,2453 -yt_dlp/extractor/stv.py,sha256=nOHXIk2T1NIvYhI2Mu2AHYgwpoiKdv-dFc33x3MgxKY,3293 -yt_dlp/extractor/sunporno.py,sha256=-NOofT29vmPf8v-TVaGI0OL5NoSNmZ7WTWdGYlfNb2U,2653 -yt_dlp/extractor/sverigesradio.py,sha256=BcFOLpV4WcTkloyjzq0wojcT8P5Hz0VPaE0khb_Y1kM,3910 -yt_dlp/extractor/svt.py,sha256=PHE5gzyZslXks9DprfTqX_nY7LGjDekdWsRk1F-Eh08,15178 -yt_dlp/extractor/swrmediathek.py,sha256=gHyGGJ3Z_nIDUeyG4OjrMb0OqAWZVWuoZ6lf1zySAe8,4375 -yt_dlp/extractor/syfy.py,sha256=m0X0lV2xID1igP0AIXTWOu-NsLT5HElPOr7dUcX8YIE,2046 -yt_dlp/extractor/sztvhu.py,sha256=lZI5WJr6CERT-CrhzBMpfWcLh0fam6ERX0s4fL0IH5s,1673 -yt_dlp/extractor/tagesschau.py,sha256=0UGti1T7X49IFZs_Ez7gw2OT0-vgAD79Hz_TV9vGNNA,5845 -yt_dlp/extractor/tass.py,sha256=mgewmCf7AKgZJbnAKTnN1rIoDiveaiWhY14u1twvqQI,2016 -yt_dlp/extractor/tastytrade.py,sha256=s3PM4cHXXvP4BDO4LUKLeUpou1gH-L59d2kw1MuJg3I,1452 -yt_dlp/extractor/tbs.py,sha256=KzFWPQNtsB2eyfFp5wBkF1J1YN_ZEMXSLjVN46ju4YM,3544 -yt_dlp/extractor/tdslifeway.py,sha256=XdAGbQFWTo3eZc5EcRQimHSpetLiOoTPQrNurFg6p_U,2482 -yt_dlp/extractor/teachable.py,sha256=buje-lz0WJvy0wAyXcNzGIfFqPP4av7luM6659jtR4A,10491 -yt_dlp/extractor/teachertube.py,sha256=U1ckr-k91fGlQIZykzgXRYFrJgxJkywmd5kOQdHRwhU,4413 -yt_dlp/extractor/teachingchannel.py,sha256=B1HdMyCS2G-jWosPC49PiF25rgJlZ0_I7N4xGQ182-g,1062 -yt_dlp/extractor/teamcoco.py,sha256=v8MipSFQZxCXqFa2VkGQGF_yy1RdrkgHcBIWRnaq6nc,7310 -yt_dlp/extractor/teamtreehouse.py,sha256=m_ZZRONLNZU6drrmsyuss-8AHumDLtEpxyk70yYM6po,5504 -yt_dlp/extractor/techtalks.py,sha256=jkeuaVqojqoI89oQPAi55nvw5_Pn-Fd3LWJMFFGUIU0,2525 -yt_dlp/extractor/ted.py,sha256=RA0qy_m3TJ0ip9xu8cidw_kkZD5vk_4RYZNAlEIpTXE,9978 -yt_dlp/extractor/tele13.py,sha256=IladQcY9GAUrZbc1fLHoNjdeKmPFQgB2c5TI3te5o3s,3342 -yt_dlp/extractor/tele5.py,sha256=zepZYmhndXr0YmHcRRUxxidMJRvnCTdkvwxgN15eK4c,3646 -yt_dlp/extractor/telebruxelles.py,sha256=OVYdjBTyYBwv7sTZqLZT2vnxghnLv2WA5CivETndy1s,2909 -yt_dlp/extractor/telecinco.py,sha256=m41pTjkE2Aw70iqfHokkdWdkvw-eRgsdejrl4zJveGM,6226 -yt_dlp/extractor/telegraaf.py,sha256=XqfN0HDiYKS3b6ETlUGwQb8yeBev69e7-W6wa9Bq-F0,2995 -yt_dlp/extractor/telemb.py,sha256=BhZq2pKqwmkGjXw4PMcPzBG1tnfo-v6lsh8qygnrEgk,2963 -yt_dlp/extractor/telemundo.py,sha256=7flA_lSP_JN5jQcxXmzHlljWAkO96A8lIw8dNt2s_iU,2465 -yt_dlp/extractor/telequebec.py,sha256=KCorumS_-su68pARrbUcyYtlZsO7PpSaTB2SCTKCf0s,9044 -yt_dlp/extractor/teletask.py,sha256=KM53eddFquvkzy8G1ZDytCDCbkqu3TNcUp7LRRgzDOY,1739 -yt_dlp/extractor/telewebion.py,sha256=aUymG85PJ6_YlDO5HHMneabKI-jiSXLXb7Jbnyrp7cM,1843 -yt_dlp/extractor/tennistv.py,sha256=U7q0iEcX-PtpDkvhYlRp0KsgUqKsR8UB9PMMQX_5c08,3989 -yt_dlp/extractor/tenplay.py,sha256=Vap9xL_92kcH2hQZBWJNhXqt9z7NjHO2yDCFDAD-Ze4,3332 -yt_dlp/extractor/testurl.py,sha256=6Mmx1Hv1nJMAfBg3WIOPVkpcvffJXXC0PP2xuMct8Co,2107 -yt_dlp/extractor/tf1.py,sha256=HKgyQZwjNLuony4x5Mq95gR0uZ8LIgWibtkbQgJp2Kg,3024 -yt_dlp/extractor/tfo.py,sha256=vohOTvff5RWo-4RvIgHVFrWfR443W7nBk3r-jtsf_3k,2038 -yt_dlp/extractor/theintercept.py,sha256=1_QlPe4dkBHsyGaohQXEBs8snAqfUxf2glzqzVKH1SE,1801 -yt_dlp/extractor/theplatform.py,sha256=gVeXx9_IkcaSVWdyvKlCYBGN2GGd5aU7dZMxYauf-pQ,17381 -yt_dlp/extractor/thescene.py,sha256=ynX1Emxc8aPg-yNq6d2MXe5n8xFwHA-oLaWExVsMnAg,1373 -yt_dlp/extractor/thestar.py,sha256=gr_84HjbU-mokGY1_8nPtuXXPlEOfknbFRlePIRlHKU,1404 -yt_dlp/extractor/thesun.py,sha256=oNQ7quHxhiifx-fti-BHo6gnkMmum1Cx4WSCrWtv09k,1402 -yt_dlp/extractor/theta.py,sha256=vxOeIJVEJcoKjnGbDX9ZzsuLO4m0CF_GaSnp-cExni8,3708 -yt_dlp/extractor/theweatherchannel.py,sha256=Xi5PVoGnpxac6RcWdEJ8GPoTGjgoZjHBhaZ-Kg2o30o,4001 -yt_dlp/extractor/thisamericanlife.py,sha256=2G1ThNlbG-tgAGsMxrn79i9TaWYehG_aJvrowSs6LJg,1550 -yt_dlp/extractor/thisav.py,sha256=zWom_V9DvGd9pWT6ik3vyOqKiO0oitXKu-lZv3sA_m0,2517 -yt_dlp/extractor/thisoldhouse.py,sha256=SFdMY8lqsC3XwpYrFK1IdaPqDMKcU3xJWVAA8_t8XkM,2738 -yt_dlp/extractor/threeqsdn.py,sha256=jiYB5xhT4L4vnrReVy9Zo27HwWLFq35ACH7ln5kO3VY,6130 -yt_dlp/extractor/threespeak.py,sha256=xSnjY6xxlZI0wsvmIq6xC1UlIG96Cvz216JWZPYvpFU,4098 -yt_dlp/extractor/tiktok.py,sha256=WGKT6UC9q7Pb10bEm4ZpcJQmGHREGaIoqMr4msPuQtg,36996 -yt_dlp/extractor/tinypic.py,sha256=WxGvzkAe-degM0o1BkKl2HaLAWh_dNqd-2YQZlaHCq4,1892 -yt_dlp/extractor/tmz.py,sha256=g5N5r7F0kwLL6vum4qtVnJzAFnvjdGEnr3aCsCbJ3T4,7685 -yt_dlp/extractor/tnaflix.py,sha256=GeWD50SKyWYsEb88FKOKAgkDb2MrCqixhRTOTQ0UCOs,12215 -yt_dlp/extractor/toggle.py,sha256=j5v1J--ZNsJYiWvFdgPUX3FMn2XpDFwv80NXCOZH7Uk,8905 -yt_dlp/extractor/toggo.py,sha256=dZtcOt1T-gY_DS0nuStGczyG4DeuKewqc4k9WkdsQbc,2940 -yt_dlp/extractor/tokentube.py,sha256=WkHecbiuO8fHAPJSG98OJsGlYXARf4dc6fA-m4d1A9U,5715 -yt_dlp/extractor/tonline.py,sha256=kEMRahWn6TMKDMM-h-DWsY_VfGVkyqvHiUlLw3lu_os,1925 -yt_dlp/extractor/toongoggles.py,sha256=uLq-zZFoqDFzp_Dermbn7-w4fSqa7hScPNnS9UVct7Q,3030 -yt_dlp/extractor/toutv.py,sha256=N4qOJ_MvTvhFiIaRu8_JIHjpirol4huhgXPlzMh87tk,3607 -yt_dlp/extractor/toypics.py,sha256=M18yaZZjSdTPHQQ2FqKLJYxJ0ZTJUblQXgADnFauvHg,2746 -yt_dlp/extractor/traileraddict.py,sha256=iiyzhehZxHB4zQzas_TqeKxTA5xC4wv9JaL9DiDuF_0,2688 -yt_dlp/extractor/trilulilu.py,sha256=Ml-ILj8TyekWMYUtrzjPY2jZH9HLNtRk7cSzSc-v-Us,3624 -yt_dlp/extractor/trovo.py,sha256=BOsSg2tPjutRDrPlx3ohpqfPJ6XornDfnK0ou4oNcCc,8706 -yt_dlp/extractor/trueid.py,sha256=6rmKfbuEqxz_RY96WoSK6vSCCcUtnnQsu89rTftD5CQ,6405 -yt_dlp/extractor/trunews.py,sha256=KuyHxMQ4sALYqf9Z4oIgzfoYBfzI9n0EImy7HuRDXp4,1485 -yt_dlp/extractor/trutv.py,sha256=qR9uqSwztZ9xk0yaDqbgy7q_7reax4XtqBfn8dELNa0,2518 -yt_dlp/extractor/tube8.py,sha256=EMOmB0K20cYXr4hOGi8hupmF1n5G0KyMOVt757D6zYY,3087 -yt_dlp/extractor/tubitv.py,sha256=bZ2L78TeXQ3KWPOLkDBQG9AFNy2tW_09ylUbg3ywfR0,5125 -yt_dlp/extractor/tudou.py,sha256=KT3Lu_X3dpNXtWeWt_PmCfe1iNr6X24MoXTFQgVbZ4M,1699 -yt_dlp/extractor/tumblr.py,sha256=zTqug0G6oZ9R39i0GOwaDUJ6SL1XW9SC6v7dEWoh1K8,8162 -yt_dlp/extractor/tunein.py,sha256=hBfCA9LVuqnLg7c5kJjuc0HiLPrFzU5PVKUlu3fdmus,5891 -yt_dlp/extractor/tunepk.py,sha256=ycdgW_wcCBhQudwSZnYugOlnP0rnVrkQAzs0CvesOEs,3200 -yt_dlp/extractor/turbo.py,sha256=s0exeOIriig_RdbOhIugNerx5308Sn-Jh5HEGL8y_Rw,2427 -yt_dlp/extractor/turner.py,sha256=xOK76TEnGPTkLK63DuvIdVyEMIjPoaOzMXZGtM5pU08,11162 -yt_dlp/extractor/tv2.py,sha256=bR1NRbpc9nSgy33A8yH6D05j-bsR_Ur7VaF_LS-HOoI,13628 -yt_dlp/extractor/tv2dk.py,sha256=f6dvArPaJLL1Zh_bPmS45Ias4B9QRhfhh2x3lAhttTs,5670 -yt_dlp/extractor/tv2hu.py,sha256=9rvcMNT6gNjl9wJN34h2HgFkEN42HSseHuZEtm7B9ps,4151 -yt_dlp/extractor/tv4.py,sha256=Bhcgy5qcEjQ706yqYeq4GAsJ-qprT72d9ILmK-OJPJE,4930 -yt_dlp/extractor/tv5mondeplus.py,sha256=1xHVkKeytQm0Yjg5EUpsTOLUX2DQXE_3Xs23M0JoyAc,4645 -yt_dlp/extractor/tv5unis.py,sha256=YZg1h2M9Y_s-hItYH_TufELFj-501trNUEKtbOOVAn0,4076 -yt_dlp/extractor/tva.py,sha256=n-upHpJ0k2YhlmqASOQAVx_h_x-BPcip6-oDGub46AA,3158 -yt_dlp/extractor/tvanouvelles.py,sha256=f7s4Tspf2b_0GeUI4kKwqFydUNwKhcOcs9ZQxZ6NhX8,2401 -yt_dlp/extractor/tvc.py,sha256=OBCOylD9bVEb0W8jyAdT05uPOaZnLUUZGMAXOZfoUYc,3910 -yt_dlp/extractor/tver.py,sha256=N8yxw0yxP6-yzZynsdM3NGpHrOxFQdN3mYFTBsoR-z0,2472 -yt_dlp/extractor/tvigle.py,sha256=6GeFd2J5m2vVji-QApJZiJ_ggla_xTXZH672qLWSLeA,5036 -yt_dlp/extractor/tvland.py,sha256=gcgs3IMOHwvmGZ7T3YoON0BTsxR-jf_CSpwUzy9gQag,1580 -yt_dlp/extractor/tvn24.py,sha256=h63gtpUSfdSTu9RftEqSNuclymynOaifP-_rPsZ5oGg,3902 -yt_dlp/extractor/tvnet.py,sha256=EyaqqyUyj8lihwIrEE2Tuo4I71lWAlXAedavLkX4o7w,4877 -yt_dlp/extractor/tvnoe.py,sha256=50w6JIoLb_PnjKB1ZtC2TKseNqoyw159n9i1pIB8k4o,1612 -yt_dlp/extractor/tvnow.py,sha256=3T5VGnkUly-mObkryfvOrBWU9G_WMa_R2yrSCVRtoOg,24114 -yt_dlp/extractor/tvopengr.py,sha256=kmvzTuemObirjMbMxW6j0IiIEejXEl2eqjTvP5N1bMI,6042 -yt_dlp/extractor/tvp.py,sha256=B9YysEoc1UuqDhedWOJ_yJL9GYp4Y3hKGuN_mCUN2EI,20073 -yt_dlp/extractor/tvplay.py,sha256=Pg8o-Jmr7HvAzWL2jFbPsrS33FtSmK5AiasI2gHHmGw,20349 -yt_dlp/extractor/tvplayer.py,sha256=ObddgpjtPwRtHMxwW614H1f1OzSh0giZj4PYjl407Pg,2808 -yt_dlp/extractor/tweakers.py,sha256=vgDrlTVOM2gDhqzz2lgJM31MLU3VzsWOLbQyddJqfU0,2165 -yt_dlp/extractor/twentyfourvideo.py,sha256=LV3udCJEUcNDwb3Rq4fDO4d9monR8FOriEbyCfuf67Y,4742 -yt_dlp/extractor/twentymin.py,sha256=GOJ288OwQ_qM5vXAtVHPybmlzxKA8hRGI2WlHgyeBUQ,2860 -yt_dlp/extractor/twentythreevideo.py,sha256=NVfD77e_WRddtIIENZuC9uIdpqcEvHNo9VlLGGbn5Y4,3284 -yt_dlp/extractor/twitcasting.py,sha256=MRwREavQJ_KaJq0cca4sOHgRjrIOj7N8n9gL8-yXhtU,9752 -yt_dlp/extractor/twitch.py,sha256=gwCJrqxZPFmTlK7JjkErD3Ch6kZ-1TET5wTU8d1-JOY,36572 -yt_dlp/extractor/twitter.py,sha256=jSQJHAhL09KH0k4LlT9iSJbVudpN7vpv6S6zTkeVMCQ,29339 -yt_dlp/extractor/udemy.py,sha256=zIiOcuqB6mZBjgyweaIc6rsw8I1pOKcWV6PiUEdLB9w,19351 -yt_dlp/extractor/udn.py,sha256=Txl0apTktMHK6uVftWDh0z15OiAnH3Pxz0a3uA-wBgU,3575 -yt_dlp/extractor/ufctv.py,sha256=r_RcKNQODJlgTk4GjmyfE_THH6UqVTIQ2U994hAOFDs,511 -yt_dlp/extractor/ukcolumn.py,sha256=nqiPzLjVvj8D-HDQjHYnTXBS_0nJbaQfByShDfhCCfM,2479 -yt_dlp/extractor/uktvplay.py,sha256=6qdHuUcP3GlqJzmkLrCEuqczBfzhsiq7GZ5h8_uDMeA,1364 -yt_dlp/extractor/umg.py,sha256=MT-VK7tETQkXN38mJ6VFozj0K6qOdH_Pt48sFKSEVXE,3262 -yt_dlp/extractor/unistra.py,sha256=6fYrcVe3Xvp3DOmo5brk1Ue6Er-SK8JkAz6OyeOcnbo,2154 -yt_dlp/extractor/unity.py,sha256=vACiIJLvVHHzc_Pp2-pP2IdGzXzLjEQKy-s3cbsAXY8,1228 -yt_dlp/extractor/uol.py,sha256=_p_xm-XANk-JyJhYC7LbRoHNp8delXBVEWQxNYvOFnI,5503 -yt_dlp/extractor/uplynk.py,sha256=HmcGapHkHkQr4DdQ2kja8cAFsoop8q0Jgfv9LggohdM,2682 -yt_dlp/extractor/urort.py,sha256=vpYSn_pvMcTvJU_VOskUHTC_nv9QVR6hQ9EVK3JflSs,2246 -yt_dlp/extractor/urplay.py,sha256=-CcnOTxkqa4Ym6u-xibNOluC6uH4UKwiYICT0CAHTUw,4716 -yt_dlp/extractor/usanetwork.py,sha256=yZ-1KBXE7dC3w4MllGznrd4hl2K3K4mdfGdCA0qdZsg,814 -yt_dlp/extractor/usatoday.py,sha256=_LtMn5HA5yfuzvf9zvatmv2sZqVU4sF9W4yy2_4-GmU,2703 -yt_dlp/extractor/ustream.py,sha256=WcgtYkDIzEeUJRTPmZW0R8vGjCyxPuNCNMTXBAN4Tv4,10766 -yt_dlp/extractor/ustudio.py,sha256=4RABcQbCKyRFowqdzeV-YlGtKpEnla6ZyTQ-HbMc-88,4374 -yt_dlp/extractor/utreon.py,sha256=UnZcOhNplMBAZy5lXJK2OBCt2aDxib4MPfLDWKfixlM,3514 -yt_dlp/extractor/varzesh3.py,sha256=IAIAObbtb2EkZzYhdKCJINakg68BRC60nMC68Yil8LM,3129 -yt_dlp/extractor/vbox7.py,sha256=vaeR4Zr1kHy0gP5ItE2txuA-gP3GN4EwTzPS7u7ToVo,3413 -yt_dlp/extractor/veehd.py,sha256=0phpRUSoldfAuGaYI5FFyXfqgUBv6r0zn1UgrpVoLno,4134 -yt_dlp/extractor/veo.py,sha256=hBlN8vYB-bnGsd6M8iUiC9qXs-HzlSlvMYXw6Ld9b6s,2358 -yt_dlp/extractor/veoh.py,sha256=PlrsuRyvP5QPgo1HUkR6tMUOmV61S7GhLuvaEtZoIUE,4790 -yt_dlp/extractor/vesti.py,sha256=N4thLQJ0fwtnog064AmL9dlOHCCB-p7-L08_tp-YDOI,4379 -yt_dlp/extractor/vevo.py,sha256=N3_x4ybBKlc7oekai50YxYik1xyqf56DsuikrzTPAA8,8856 -yt_dlp/extractor/vgtv.py,sha256=cSz3eNIY79w4A3XOzE3SO2MYO0VXLX1bmAjNr35zMIA,10830 -yt_dlp/extractor/vh1.py,sha256=MYB0OdpGm5VVOIo0rARO6moicB5hbyad2y5zrD5iR9o,1438 -yt_dlp/extractor/vice.py,sha256=_aXgqjYXWwjo2P5o5ICkaOdNfFUs9o7iHX7k1hl2iBo,12367 -yt_dlp/extractor/vidbit.py,sha256=43jtzjcpxfhPHJW2F12xqGKg_iIJwV5RCaLhSJQgD4M,2917 -yt_dlp/extractor/viddler.py,sha256=04_l79Q6kBHm18pX_tUR8D_yMJz3xn88Ui9INygXhfg,4876 -yt_dlp/extractor/videa.py,sha256=tmA1D1pLSDWEEqnIQmyqZBPUQCe2gWmlNC87fdNdN3c,6701 -yt_dlp/extractor/videodetective.py,sha256=fgoV7IFckQMOZiPHIoy6TEp17NcWti_YNz1ti1l1a5I,906 -yt_dlp/extractor/videofyme.py,sha256=d6T9_m7BcB0u6ZU5CsiyIFyBYzF_kh22HlcOsWsYsv8,1750 -yt_dlp/extractor/videomore.py,sha256=qA-Q5QcXE6X1MUJyYrEgNpnM8erc9_Xtqv2LMCj72nc,11649 -yt_dlp/extractor/videopress.py,sha256=l8ud1F4fg_LDnhtxtr3ymKtLSdjc1VVJV8oiHggG7ss,3359 -yt_dlp/extractor/vidio.py,sha256=sqlHupfd5z571mGc4fPaRVtUfUddN8DHvE9GKqRN8U0,12538 -yt_dlp/extractor/vidlii.py,sha256=z_RJk7NRShBOjwIlOdpFm5dUkGaGE_mXWogSqOY1Xnk,5831 -yt_dlp/extractor/vidzi.py,sha256=bwKHsEIf-KwvXE6tUuqWcdnOgXGuY2EzhnjLP4U71ys,2165 -yt_dlp/extractor/vier.py,sha256=Y8DT62_VorP50zG8EzCjvveXk8UBuC4xwVxlzkFoGVc,10000 -yt_dlp/extractor/viewlift.py,sha256=h5OKngr1JOc6bheyJBMsMLfnttWrcFLRoxT9tu0-ucM,14237 -yt_dlp/extractor/viidea.py,sha256=UA1RKYUPQ2LKrGw9xl8i4cxZQzX-Vc5YW1sXafG6o94,7442 -yt_dlp/extractor/viki.py,sha256=xwtjtw21LNN4lFDwgxU55WmbQ8AsQjcpd5Srt5X4Uoo,13686 -yt_dlp/extractor/vimeo.py,sha256=Bjz8Re1hbVeLDHkyt5BhpYLwwtKXRM5U6hG1bqLmsV8,54404 -yt_dlp/extractor/vimm.py,sha256=tSEQ-AuwqEd254ve-enPFmtc8vQkF3yaNJPTjEeVj_M,2242 -yt_dlp/extractor/vimple.py,sha256=dfP77hOY7EJURUoPRUycV02JRvYrryGZ4NmO6Y4FVFI,1968 -yt_dlp/extractor/vine.py,sha256=yt_PpFjAphNJ9up0ld7xUQF_TWEZZaTKyttvo8eUc0M,5378 -yt_dlp/extractor/viqeo.py,sha256=vlgfyQUeQJ5M7VgE0eZX6dBpoauCBaNnFHa5rcJdQ3E,3298 -yt_dlp/extractor/viu.py,sha256=sX9BDtaQVxvg7tPm4brODnz1rt9zJBr37BgVGJD30tk,14681 -yt_dlp/extractor/vk.py,sha256=mrNWwTD3F8QYfwTmC6p7D8C-f0TrQLm1dSxvdlhjg6Y,27164 -yt_dlp/extractor/vlive.py,sha256=n3rltRjbMTS9ltmKkwh2zV6Z8_1evBoIvNDBXFFkxNg,14160 -yt_dlp/extractor/vodlocker.py,sha256=-XT2R046SvkSiq8PUTmwMQFAJHqyIItvPLQff-l4PKw,2796 -yt_dlp/extractor/vodpl.py,sha256=e-_HzG-ClVQHIhNlZZLzlv0gm9eVhFGPHosVY_ZbeMc,1032 -yt_dlp/extractor/vodplatform.py,sha256=RgjUxzW-ZYQVs_eG8n17fMn121ZE6zhNVW9sbZV1aD8,1569 -yt_dlp/extractor/voicerepublic.py,sha256=WfeKnyHR4vz9SjK1FALi5xMG-i6NQqqQej8w3Wp9yF4,2302 -yt_dlp/extractor/voicy.py,sha256=HEc5Kls7hw6iFMYnTLhoy4-5eQnnOEp5_ZXqmaobDvk,5475 -yt_dlp/extractor/voot.py,sha256=MO4l-uo1AJGEx9wskFeHR02nEH7dzlMNrTJSnLp2Zm8,5954 -yt_dlp/extractor/voxmedia.py,sha256=kdShPoHkl_OXjfPpEppMl-z5bS7uPmL-FrKOrYiccU8,9844 -yt_dlp/extractor/vrak.py,sha256=C6F1rIjKRggflKqpaAwI9BtwxGjmi5ldOgVz6iKiIOc,2943 -yt_dlp/extractor/vrt.py,sha256=BmkqS4q48TOsgnLzn6_vX2HBatkTr0FDe2JGeReVk-M,3525 -yt_dlp/extractor/vrv.py,sha256=rccFUKJfaG527jgUA7UY3eEwFeXjCqtdz7XCfVohu7U,11020 -yt_dlp/extractor/vshare.py,sha256=h29M1CnmGclJI90-s4hr8izNCFHq4fXvtW8CXr3E-cc,2281 -yt_dlp/extractor/vtm.py,sha256=7_nJ-e1OZlqO0jolp_VdCKU89MVNSSTu1irJZWVOy5w,1899 -yt_dlp/extractor/vube.py,sha256=sx11tAET9XZk3E5EnRlWKNOBQ5F-CKHGAqI7kVxoi_Y,6900 -yt_dlp/extractor/vuclip.py,sha256=ENGA1EbH1VfD-FKJFOTvlvYtE_PeHuopyswhy8bxH3k,2254 -yt_dlp/extractor/vupload.py,sha256=DFzid218NeqcMbvC2b68wxuDc0Q3tqpQ2TljXlpcPUY,2205 -yt_dlp/extractor/vvvvid.py,sha256=P3LbAREkYpWhSCDftzRiLzoyUU97i9A88CH3bRVxvFk,10292 -yt_dlp/extractor/vyborymos.py,sha256=8RcH7iYKWhpZ11Ot1E3kpjBt39aW6EMd8V3_TjmUbFc,2013 -yt_dlp/extractor/vzaar.py,sha256=l2GiRL_jGCl0H0Y0giZY6XQcJbD0HowJSR8tGJd6xdQ,3608 -yt_dlp/extractor/wakanim.py,sha256=QKARJCxFFg3__LdoH4wIZNWl8fb_I3Pj894wHnKrVkQ,2833 -yt_dlp/extractor/walla.py,sha256=skTPWb_-M-LmaxyrYUcJPh8j5tx2vZrb_3N28GV63QI,2813 -yt_dlp/extractor/washingtonpost.py,sha256=o9PQAmgYpmXaKO_tuJhVb3Q4vENkp0Jb3TbbDbT4JkM,5257 -yt_dlp/extractor/wat.py,sha256=iPUkU70k9a5QEgGAOnrpCKOuOT1_GsSR78ccMumuRW0,4300 -yt_dlp/extractor/watchbox.py,sha256=VG1mSjCJqP02eOrssWmnB1LQQYIclwJBVOb6r0PZAvc,5858 -yt_dlp/extractor/watchindianporn.py,sha256=fUvjtvChnVjLQKIViR7kLqIWYh2liieDlEjjv38DMV8,2293 -yt_dlp/extractor/wdr.py,sha256=H1-DD2_KjON7ROK0sUlu7GfUDlu_7xx6zDwcbvOzh4c,13551 -yt_dlp/extractor/webcaster.py,sha256=25k6gKI_RasQzrNU9W10ztJEP6Hh662dzIOSXNR-kI8,3777 -yt_dlp/extractor/webofstories.py,sha256=LPUtQWdHrE833OFSyAP1I3MKQFawar6kseO47BKKbFI,5550 -yt_dlp/extractor/weibo.py,sha256=EYrlrlb1sGrBEYh8CvL0JFk-FUd0CZiUQaeunD_9Kvs,4493 -yt_dlp/extractor/weiqitv.py,sha256=ghqlTyBeNl1ZGBOdBL2dqJ6ULa-uzsI0bR86-dLgXvs,1681 -yt_dlp/extractor/whowatch.py,sha256=8hWbTNtYiex5Ohup1VbYgj0J8xNmjAUkK4pUQ_ymCRM,4018 -yt_dlp/extractor/willow.py,sha256=pBnGfNnm0qKYV72JVfk34RwBHTk1lVQS3QzM96nf53g,2394 -yt_dlp/extractor/wimtv.py,sha256=wYDYqdD7VCdZizuPKialto2M8mfjPNKPzfbSdotTcoQ,5734 -yt_dlp/extractor/wistia.py,sha256=1W5mdXiUPEDWLHrwO0HGVKPqFZoJJidg3yYbDmTz5so,7227 -yt_dlp/extractor/worldstarhiphop.py,sha256=9XazCpluCOOXWEJg2ntt6l-zfZEVOohurSCwJ_EMbm4,1344 -yt_dlp/extractor/wppilot.py,sha256=kXiFJExy__S5ISD4aOlj6MDV7wEPcpZ5hIiBWtLkFlo,6203 -yt_dlp/extractor/wsj.py,sha256=97OiTsMSBeKh394rlle1At7lXeEAvHEXQSU8HbgotP0,4694 -yt_dlp/extractor/wwe.py,sha256=6TKA4KCG7Mo-TdIioHmirDBMPSarr9oaxXbYMPdjts0,4532 -yt_dlp/extractor/xbef.py,sha256=86k_CeL6cVVBgxA19jMbK_nNtcqDf4KeuYeoUSfUjDY,1447 -yt_dlp/extractor/xboxclips.py,sha256=-lIwJUAo_jPRq8gQIuIbe5yL2QL0-ozlgnnpEehMdYU,2372 -yt_dlp/extractor/xfileshare.py,sha256=bzpwXkBHULXi5gh9hlbBnebEfMwH72jftbnwsOiEZ1s,7377 -yt_dlp/extractor/xhamster.py,sha256=htQve-v7lZ2oiXRvX0gtQfARsqr6KLNYzI00FcEdroE,18932 -yt_dlp/extractor/xiami.py,sha256=0ER3KPS6J965XQC3m0HduPbY_si8kPUPzRgHvECJt7E,6816 -yt_dlp/extractor/ximalaya.py,sha256=_43O3EPfxTlbLzv3FkjMroYdaU3ddbyaeDXvx5a3WjM,9908 -yt_dlp/extractor/xminus.py,sha256=YBsGlieNPXF-GfDiUwXawbSFnp1VdE3wFP9msLG8wfc,2937 -yt_dlp/extractor/xnxx.py,sha256=kWUgyx72AXM8Hz7ynK1RP6eEMppnKu-h5nQhgym8p6Y,2886 -yt_dlp/extractor/xstream.py,sha256=u44Y1Rp7l4osmB1XPZFeLPVkYSeyaLJfIbFuJTzITRg,3974 -yt_dlp/extractor/xtube.py,sha256=eLE9oQa5vYAFlkFhpXtjcs9un3-iaOGRf0JPSYQZri0,7970 -yt_dlp/extractor/xuite.py,sha256=qGyahXGcz2Q5i49uM6v1WMdlGLcwvMjB01fSwWoB-Ac,5807 -yt_dlp/extractor/xvideos.py,sha256=eAvlLW3XuspGq4loRZcLO5qhVXENoB2pfxZhs4X3wNY,5985 -yt_dlp/extractor/xxxymovies.py,sha256=yPkxbXAdV01wQn6VXGxz-mjkHvqpWoFLsB1Z7mviTvY,2654 -yt_dlp/extractor/yahoo.py,sha256=CqrDEyXcBUNdM5grOnR721mKpy4Jx3l6RPLlErgFhu0,23944 -yt_dlp/extractor/yandexdisk.py,sha256=YQ0XFS8A9fI_Lk4lHFZUEELwsnFkTj_Qy2-TYoh_FXE,5152 -yt_dlp/extractor/yandexmusic.py,sha256=GRfaY8qjgo5hYG7SJhxWB1iqvr2GMVC2gmiMiYKW4ic,17701 -yt_dlp/extractor/yandexvideo.py,sha256=yDp-Kj4irMGETx4dfSC1QtRDqPwRLOVsK0VL9Os3NXk,13386 -yt_dlp/extractor/yapfiles.py,sha256=3tGub_l6pl0X2HDpXBDigVwMQIQ2yPGEcd8XIyaznKw,3212 -yt_dlp/extractor/yesjapan.py,sha256=vKGITT87Ucy7hQ2jNUvgN1VZ7WZBiP6A93jq1LRJudg,2196 -yt_dlp/extractor/yinyuetai.py,sha256=WUkLfnoBatUPSoB2F5badCafa_fnLGgcul7vLkxT_Ig,1908 -yt_dlp/extractor/ynet.py,sha256=oJ1d-c6Ixy1GEXhdefyujGKDJOq3_KB7mXCEU7BsVTM,1807 -yt_dlp/extractor/youjizz.py,sha256=c4hN3FNSOAGMgUn5oMZvUMQI8Gn756awdXR6tXMw7P8,3112 -yt_dlp/extractor/youku.py,sha256=WT56nRk86tvPA7klJxyZullWIqnaLK_ccNRYAc29BWU,11391 -yt_dlp/extractor/younow.py,sha256=C45bWbtXeDMMnVWmmWrPbLI5HNLMgjmMFs8W8U-ftHo,7089 -yt_dlp/extractor/youporn.py,sha256=lZ8uEwSMl1pI_0_rB1jUDalctVCquMYK-9AIrg-_RnE,7290 -yt_dlp/extractor/yourporn.py,sha256=SpTte_H2TRaQ5YqCRUNb-1UDBeA3rpspzig2aNspLCM,2062 -yt_dlp/extractor/yourupload.py,sha256=5ZIEtNNgMJ7cnpIAoqymKA_WBxbwy-zFjD10TpN-hhI,1411 -yt_dlp/extractor/youtube.py,sha256=3FqKofRucD_Rc85Wkmij5xcRHJVROElH0Y1HUdTR0Ag,255036 -yt_dlp/extractor/zapiks.py,sha256=KHjDmba9mMC9SHRtv1lwR2A388IeBI6crIPRfH2O2II,3828 -yt_dlp/extractor/zaq1.py,sha256=oaAeg-ZzI9vDXmAXGWhnrTPVN4UiHuhnRxCPctR_lNw,3293 -yt_dlp/extractor/zattoo.py,sha256=L_0kyxBPcm5I30AzaNrJz5C1TulYGaYTcgi2qjRigic,14066 -yt_dlp/extractor/zdf.py,sha256=fVxKIyeTTO2cZHCe5Dw71-SKGV1iP2mDQkljzWfQTf8,15865 -yt_dlp/extractor/zee5.py,sha256=7MOYgwRolWrvPh8OZd9wrlO7roD9g7-0bGqerDc7ko0,10992 -yt_dlp/extractor/zhihu.py,sha256=AyVCjgIj9wNnDnpH80vKmJR1GIi9vqv8BoEXFsc37FM,2641 -yt_dlp/extractor/zingmp3.py,sha256=YtUmt3GlR-ROIhN6I0hYSxsXnL9zM1BbW6j1XjHBEGI,5552 -yt_dlp/extractor/zoom.py,sha256=L6L9JTphAIDOd0VJ0SNM2ZycLiwrLVmJzIl8HRjl324,2949 -yt_dlp/extractor/zype.py,sha256=Tx2SwLxSWUahj4ZKjAaYegHG9Q1oD3lQNIKGUaavBEo,5761 -yt_dlp/jsinterp.py,sha256=gmRaSOvCZqPIhWfmbPisee6Ax340BgUr0QIU-P_DvTA,21594 -yt_dlp/minicurses.py,sha256=xWEDlq6wrnrad2KYltvR8D8ZYKNnd5Fenb_Je1oiYt8,5241 -yt_dlp/options.py,sha256=5c1eGWFEVqG3ZVgqsdSq69HEpRF4qFZVDmQWJD8itZQ,84321 -yt_dlp/postprocessor/__init__.py,sha256=NF4ltYrH_1wqfOtULwQVyGDIxxlhSbvcOCoL24ZdDtw,1256 -yt_dlp/postprocessor/__pycache__/__init__.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/common.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/embedthumbnail.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/exec.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/ffmpeg.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/metadataparser.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/modify_chapters.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/movefilesafterdownload.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/sponskrub.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/sponsorblock.cpython-310.pyc,, -yt_dlp/postprocessor/__pycache__/xattrpp.cpython-310.pyc,, -yt_dlp/postprocessor/common.py,sha256=Lz__95mhsXcBKZPV4A3-_uIXEwcP1fb5TwPxLP0GjFU,6510 -yt_dlp/postprocessor/embedthumbnail.py,sha256=JhAOizMuCdK_qoqlUn7ZPNCItqNEaabunnEyL_0uQmA,10591 -yt_dlp/postprocessor/exec.py,sha256=XCXzikjZs24t4jhxAeJXIPXexAXgj9F5qNyOSkykskU,1658 -yt_dlp/postprocessor/ffmpeg.py,sha256=4rnHseZEKa8MyrRSwjY1l6yxaT_2neNfQK9DsiE-gqc,48521 -yt_dlp/postprocessor/metadataparser.py,sha256=YqFIgVnqCEq1d8aFLIZAjDcWIKXgTbiLVdj-ZmAT2pE,4236 -yt_dlp/postprocessor/modify_chapters.py,sha256=nUE42jNtTci5zF9IYGYcSsghqhi2e774Ei0vCjH4-Ps,17429 -yt_dlp/postprocessor/movefilesafterdownload.py,sha256=0nqxSRqjHTWDt3in8pRbo_UMMA8Dr4GFg4LKg33A5tw,2088 -yt_dlp/postprocessor/sponskrub.py,sha256=qbGy79cH1ZGp8SJYiSu6-39qWXCIJtpq5oys_nMvkIg,4253 -yt_dlp/postprocessor/sponsorblock.py,sha256=M1zn6G2YufuultUvIBCoerxXjDNcBr5w7ESHx8X5N6k,5014 -yt_dlp/postprocessor/xattrpp.py,sha256=5Ox8wwYm9iVMzwqc8v7OKrSk6HD75j6pWpjBFZT28zw,2870 -yt_dlp/socks.py,sha256=0GytSIZ-7ix5Kl6MO1DmUR9StnJBU7O6d8q-mTBzDOc,8861 -yt_dlp/update.py,sha256=toy_xDaQ86Msoymte9iImGv4GnI3wPuiv3TrXHZgNw8,10327 -yt_dlp/utils.py,sha256=GqXp2eGIg_D6295GRyQt-Jok7j_qWOd5WobnjgQPlc4,173935 -yt_dlp/version.py,sha256=5nDYPbSgswdISGzh930qgDpNYhUCS_apqYvZx2RSHhc,108 -yt_dlp/webvtt.py,sha256=nlyXcTEcxzfCDZGA4j__5Ky24L7v16xpx-lqeCiC7RA,11161 diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/REQUESTED b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/REQUESTED deleted file mode 100644 index e69de29..0000000 diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/WHEEL b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/WHEEL deleted file mode 100644 index 0b18a28..0000000 --- a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.37.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/entry_points.txt b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/entry_points.txt deleted file mode 100644 index 134eb21..0000000 --- a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -yt-dlp = yt_dlp:main - diff --git a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/top_level.txt b/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/top_level.txt deleted file mode 100644 index a866577..0000000 --- a/plugins/youtube_download/yt_dlp-2022.2.4.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -yt_dlp diff --git a/plugins/youtube_download/yt_dlp/YoutubeDL.py b/plugins/youtube_download/yt_dlp/YoutubeDL.py index fd1584a..4891b3f 100644 --- a/plugins/youtube_download/yt_dlp/YoutubeDL.py +++ b/plugins/youtube_download/yt_dlp/YoutubeDL.py @@ -1,8 +1,3 @@ -#!/usr/bin/env python3 -# coding: utf-8 - -from __future__ import absolute_import, unicode_literals - import collections import contextlib import datetime @@ -15,7 +10,7 @@ import json import locale import operator import os -import platform +import random import re import shutil import subprocess @@ -24,127 +19,22 @@ import tempfile import time import tokenize import traceback -import random import unicodedata - -from enum import Enum +import urllib.request from string import ascii_letters -from .compat import ( - compat_basestring, - compat_get_terminal_size, - compat_kwargs, - compat_numeric_types, - compat_os_name, - compat_pycrypto_AES, - compat_shlex_quote, - compat_str, - compat_tokenize_tokenize, - compat_urllib_error, - compat_urllib_request, - compat_urllib_request_DataHandler, - windows_enable_vt_mode, -) -from .cookies import load_cookies -from .utils import ( - age_restricted, - args_to_str, - ContentTooShortError, - date_from_str, - DateRange, - DEFAULT_OUTTMPL, - determine_ext, - determine_protocol, - DownloadCancelled, - DownloadError, - encode_compat_str, - encodeFilename, - EntryNotInPlaylist, - error_to_compat_str, - ExistingVideoReached, - expand_path, - ExtractorError, - float_or_none, - format_bytes, - format_field, - format_decimal_suffix, - formatSeconds, - GeoRestrictedError, - get_domain, - HEADRequest, - InAdvancePagedList, - int_or_none, - iri_to_uri, - ISO3166Utils, - join_nonempty, - LazyList, - LINK_TEMPLATES, - locked_file, - make_dir, - make_HTTPS_handler, - MaxDownloadsReached, - network_exceptions, - number_of_digits, - orderedSet, - OUTTMPL_TYPES, - PagedList, - parse_filesize, - PerRequestProxyHandler, - platform_name, - Popen, - POSTPROCESS_WHEN, - PostProcessingError, - preferredencoding, - prepend_extension, - ReExtractInfo, - register_socks_protocols, - RejectedVideoReached, - remove_terminal_sequences, - render_table, - replace_extension, - SameFileError, - sanitize_filename, - sanitize_path, - sanitize_url, - sanitized_Request, - std_headers, - STR_FORMAT_RE_TMPL, - STR_FORMAT_TYPES, - str_or_none, - strftime_or_none, - subtitles_filename, - supports_terminal_sequences, - timetuple_from_msec, - to_high_limit_path, - traverse_obj, - try_get, - UnavailableVideoError, - url_basename, - variadic, - version_tuple, - write_json_file, - write_string, - YoutubeDLCookieProcessor, - YoutubeDLHandler, - YoutubeDLRedirectHandler, -) from .cache import Cache -from .minicurses import format_text -from .extractor import ( - gen_extractor_classes, - get_info_extractor, - _LAZY_LOADER, - _PLUGIN_CLASSES as plugin_extractors -) -from .extractor.openload import PhantomJSwrapper -from .downloader import ( - FFmpegFD, - get_suitable_downloader, - shorten_protocol_name -) +from .compat import compat_os_name, compat_shlex_quote +from .cookies import load_cookies +from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name from .downloader.rtmp import rtmpdump_version +from .extractor import gen_extractor_classes, get_info_extractor +from .extractor.common import UnsupportedURLIE +from .extractor.openload import PhantomJSwrapper +from .minicurses import format_text +from .plugins import directories as plugin_directories +from .postprocessor import _PLUGIN_CLASSES as plugin_pps from .postprocessor import ( - get_postprocessor, EmbedThumbnailPP, FFmpegFixupDuplicateMoovPP, FFmpegFixupDurationPP, @@ -154,17 +44,119 @@ from .postprocessor import ( FFmpegFixupTimestampPP, FFmpegMergerPP, FFmpegPostProcessor, + FFmpegVideoConvertorPP, MoveFilesAfterDownloadPP, - _PLUGIN_CLASSES as plugin_postprocessors + get_postprocessor, ) -from .update import detect_variant -from .version import __version__, RELEASE_GIT_HEAD +from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping +from .update import REPOSITORY, current_git_head, detect_variant +from .utils import ( + DEFAULT_OUTTMPL, + IDENTITY, + LINK_TEMPLATES, + MEDIA_EXTENSIONS, + NO_DEFAULT, + NUMBER_RE, + OUTTMPL_TYPES, + POSTPROCESS_WHEN, + STR_FORMAT_RE_TMPL, + STR_FORMAT_TYPES, + ContentTooShortError, + DateRange, + DownloadCancelled, + DownloadError, + EntryNotInPlaylist, + ExistingVideoReached, + ExtractorError, + FormatSorter, + GeoRestrictedError, + HEADRequest, + ISO3166Utils, + LazyList, + MaxDownloadsReached, + Namespace, + PagedList, + PerRequestProxyHandler, + PlaylistEntries, + Popen, + PostProcessingError, + ReExtractInfo, + RejectedVideoReached, + SameFileError, + UnavailableVideoError, + UserNotLive, + YoutubeDLCookieProcessor, + YoutubeDLHandler, + YoutubeDLRedirectHandler, + age_restricted, + args_to_str, + bug_reports_message, + date_from_str, + deprecation_warning, + determine_ext, + determine_protocol, + encode_compat_str, + encodeFilename, + error_to_compat_str, + escapeHTML, + expand_path, + filter_dict, + float_or_none, + format_bytes, + format_decimal_suffix, + format_field, + formatSeconds, + get_compatible_ext, + get_domain, + int_or_none, + iri_to_uri, + is_path_like, + join_nonempty, + locked_file, + make_archive_id, + make_dir, + make_HTTPS_handler, + merge_headers, + network_exceptions, + number_of_digits, + orderedSet, + orderedSet_from_options, + parse_filesize, + preferredencoding, + prepend_extension, + register_socks_protocols, + remove_terminal_sequences, + render_table, + replace_extension, + sanitize_filename, + sanitize_path, + sanitize_url, + sanitized_Request, + std_headers, + str_or_none, + strftime_or_none, + subtitles_filename, + supports_terminal_sequences, + system_identifier, + timetuple_from_msec, + to_high_limit_path, + traverse_obj, + try_call, + try_get, + url_basename, + variadic, + version_tuple, + windows_enable_vt_mode, + write_json_file, + write_string, +) +from .version import RELEASE_GIT_HEAD, VARIANT, __version__ if compat_os_name == 'nt': import ctypes -class YoutubeDL(object): +class YoutubeDL: """YoutubeDL class. YoutubeDL objects are the ones responsible of downloading the @@ -207,13 +199,6 @@ class YoutubeDL(object): For compatibility, a single list is also accepted print_to_file: A dict with keys WHEN (same as forceprint) mapped to a list of tuples with (template, filename) - forceurl: Force printing final URL. (Deprecated) - forcetitle: Force printing title. (Deprecated) - forceid: Force printing ID. (Deprecated) - forcethumbnail: Force printing thumbnail URL. (Deprecated) - forcedescription: Force printing description. (Deprecated) - forcefilename: Force printing final filename. (Deprecated) - forceduration: Force printing duration. (Deprecated) forcejson: Force printing info_dict as JSON. dump_single_json: Force printing the info_dict of the whole playlist (or video) as a single JSON line. @@ -233,6 +218,8 @@ class YoutubeDL(object): See "Sorting Formats" for more details. format_sort_force: Force the given format_sort. see "Sorting Formats" for more details. + prefer_free_formats: Whether to prefer video formats with free containers + over non-free ones of same quality. allow_multiple_video_streams: Allow multiple video streams to be merged into a single file allow_multiple_audio_streams: Allow multiple audio streams to be merged @@ -255,22 +242,20 @@ class YoutubeDL(object): Default is 'only_download' for CLI, but False for API skip_playlist_after_errors: Number of allowed failures until the rest of the playlist is skipped - force_generic_extractor: Force downloader to use the generic extractor + allowed_extractors: List of regexes to match against extractor names that are allowed overwrites: Overwrite all video and metadata files if True, overwrite only non-video files if None and don't overwrite any file if False For compatibility with youtube-dl, "nooverwrites" may also be used instead - playliststart: Playlist item to start at. - playlistend: Playlist item to end at. playlist_items: Specific indices of playlist to download. - playlistreverse: Download playlist items in reverse order. playlistrandom: Download playlist items in random order. + lazy_playlist: Process playlist entries as they are received. matchtitle: Download only matching titles. rejecttitle: Reject downloads for matching titles. logger: Log messages to a logging.Logger instance. - logtostderr: Log messages to stderr instead of stdout. - consoletitle: Display progress in console window's titlebar. + logtostderr: Print everything to stderr instead of stdout. + consoletitle: Display progress in console window's titlebar. writedescription: Write the video description to a .description file writeinfojson: Write the video description to a .info.json file clean_infojson: Remove private fields from the infojson @@ -288,15 +273,12 @@ class YoutubeDL(object): writedesktoplink: Write a Linux internet shortcut file (.desktop) writesubtitles: Write the video subtitles to a file writeautomaticsub: Write the automatically generated subtitles to a file - allsubtitles: Deprecated - Use subtitleslangs = ['all'] - Downloads all the subtitles of the video - (requires writesubtitles or writeautomaticsub) listsubtitles: Lists all available subtitles for the video subtitlesformat: The format code for subtitles subtitleslangs: List of languages of the subtitles to download (can be regex). The list may contain "all" to refer to all the available subtitles. The language can be prefixed with a "-" to - exclude it from the requested languages. Eg: ['all', '-live_chat'] + exclude it from the requested languages, e.g. ['all', '-live_chat'] keepvideo: Keep the video file after post-processing daterange: A DateRange object, download only if the upload_date is in the range. skip_download: Skip the actual download of the video file @@ -314,24 +296,30 @@ class YoutubeDL(object): downloaded. Videos without view count information are always downloaded. None for no limit. - download_archive: File name of a file where all downloads are recorded. - Videos already present in the file are not downloaded - again. + download_archive: A set, or the name of a file where all downloads are recorded. + Videos already present in the file are not downloaded again. break_on_existing: Stop the download process after attempting to download a file that is in the archive. break_on_reject: Stop the download process when encountering a video that has been filtered out. break_per_url: Whether break_on_reject and break_on_existing should act on each input URL as opposed to for the entire queue - cookiefile: File name where cookies should be read from and dumped to + cookiefile: File name or text stream from where cookies should be read and dumped to cookiesfrombrowser: A tuple containing the name of the browser, the profile - name/pathfrom where cookies are loaded, and the name of the - keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT') + name/path from where cookies are loaded, the name of the keyring, + and the container name, e.g. ('chrome', ) or + ('vivaldi', 'default', 'BASICTEXT') or ('firefox', 'default', None, 'Meta') legacyserverconnect: Explicitly allow HTTPS connection to servers that do not support RFC 5746 secure renegotiation nocheckcertificate: Do not verify SSL certificates + client_certificate: Path to client certificate file in PEM format. May include the private key + client_certificate_key: Path to private key file for client certificate + client_certificate_password: Password for client certificate private key, if encrypted. + If not provided and the key is encrypted, yt-dlp will ask interactively prefer_insecure: Use HTTP instead of HTTPS to retrieve information. - At the moment, this is only supported by YouTube. + (Only supported by some extractors) + enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons. + http_headers: A dictionary of custom headers to be used for all requests proxy: URL of the proxy server to use geo_verification_proxy: URL of the proxy to use for IP address verification on geo-restricted sites. @@ -339,13 +327,17 @@ class YoutubeDL(object): bidi_workaround: Work around buggy terminals without bidirectional text support, using fridibi debug_printtraffic:Print out sent and received HTTP traffic - include_ads: Download ads as well (deprecated) default_search: Prepend this string if an input url is not valid. 'auto' for elaborate guessing encoding: Use this encoding instead of the system-specified. - extract_flat: Do not resolve URLs, return the immediate result. - Pass in 'in_playlist' to only show this behavior for - playlist items. + extract_flat: Whether to resolve and process url_results further + * False: Always process (default) + * True: Never process + * 'in_playlist': Do not process inside playlist/multi_video + * 'discard': Always process, but don't return the result + from inside playlist/multi_video + * 'discard_in_playlist': Same as "discard", but only for + playlists (not multi_video) wait_for_video: If given, wait for scheduled streams to become available. The value should be a tuple containing the range (min_secs, max_secs) to wait between retries @@ -355,10 +347,6 @@ class YoutubeDL(object): * when: When to run the postprocessor. Allowed values are the entries of utils.POSTPROCESS_WHEN Assumed to be 'post_process' if not given - post_hooks: Deprecated - Register a custom postprocessor instead - A list of functions that get called as the final step - for each video file, after all postprocessors have been - called. The filename will be passed as the only argument. progress_hooks: A list of functions that get called on download progress, with a dictionary with the entries * status: One of "downloading", "error", or "finished". @@ -393,7 +381,7 @@ class YoutubeDL(object): Progress hooks are guaranteed to be called at least twice (with status "started" and "finished") if the processing is successful. - merge_output_format: Extension to use when merging formats. + merge_output_format: "/" separated list of extensions to use when merging formats. final_ext: Expected final extension; used to detect when the file was already downloaded and converted fixup: Automatically correct known faults of the file. @@ -403,8 +391,6 @@ class YoutubeDL(object): - "detect_or_warn": check whether we can do anything about it, warn otherwise (default) source_address: Client-side IP address to bind to. - call_home: Boolean, true iff we are allowed to contact the - yt-dlp servers for debugging. (BROKEN) sleep_interval_requests: Number of seconds to sleep between requests during extraction sleep_interval: Number of seconds to sleep before each download when @@ -420,10 +406,14 @@ class YoutubeDL(object): sleep_interval_subtitles: Number of seconds to sleep before each subtitle download listformats: Print an overview of available video formats and exit. list_thumbnails: Print a table of all thumbnails and exit. - match_filter: A function that gets called with the info_dict of - every video. - If it returns a message, the video is ignored. - If it returns None, the video is downloaded. + match_filter: A function that gets called for every video with the signature + (info_dict, *, incomplete: bool) -> Optional[str] + For backward compatibility with youtube-dl, the signature + (info_dict) -> Optional[str] is also allowed. + - If it returns a message, the video is ignored. + - If it returns None, the video is downloaded. + - If it returns utils.NO_DEFAULT, the user is interactively + asked whether to download the video. match_filter_func in utils.py is one example for this. no_color: Do not emit color codes in output. geo_bypass: Bypass geographic restriction via faking X-Forwarded-For @@ -435,17 +425,10 @@ class YoutubeDL(object): geo_bypass_ip_block: IP range in CIDR notation that will be used similarly to geo_bypass_country - - The following options determine which downloader is picked: external_downloader: A dictionary of protocol keys and the executable of the external downloader to use for it. The allowed protocols are default|http|ftp|m3u8|dash|rtsp|rtmp|mms. Set the value to 'native' to use the native downloader - hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'} - or {'m3u8': 'ffmpeg'} instead. - Use the native HLS downloader instead of ffmpeg/avconv - if True, otherwise use ffmpeg/avconv if False, otherwise - use downloader suggested by extractor if None. compat_opts: Compatibility options. See "Differences in default behavior". The following options do not work when used through the API: filename, abort-on-error, multistreams, no-live-chat, format-sort @@ -455,17 +438,29 @@ class YoutubeDL(object): Allowed keys are 'download', 'postprocess', 'download-title' (console title) and 'postprocess-title'. The template is mapped on a dictionary with keys 'progress' and 'info' + retry_sleep_functions: Dictionary of functions that takes the number of attempts + as argument and returns the time to sleep in seconds. + Allowed keys are 'http', 'fragment', 'file_access' + download_ranges: A callback function that gets called for every video with + the signature (info_dict, ydl) -> Iterable[Section]. + Only the returned sections will be downloaded. + Each Section is a dict with the following keys: + * start_time: Start time of the section in seconds + * end_time: End time of the section in seconds + * title: Section title (Optional) + * index: Section number (Optional) + force_keyframes_at_cuts: Re-encode the video when downloading ranges to get precise cuts + noprogress: Do not print the progress bar + live_from_start: Whether to download livestreams videos from the start The following parameters are not used by YoutubeDL itself, they are used by the downloader (see yt_dlp/downloader/common.py): nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize, max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries, - continuedl, noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size, + continuedl, xattr_set_filesize, hls_use_mpegts, http_chunk_size, external_downloader_args, concurrent_fragment_downloads. The following options are used by the post processors: - prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available, - otherwise prefer ffmpeg. (avconv support is deprecated) ffmpeg_location: Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory. postprocessor_args: A dictionary of postprocessor/executable keys (in lower case) @@ -483,46 +478,90 @@ class YoutubeDL(object): discontinuities such as ad breaks (default: False) extractor_args: A dictionary of arguments to be passed to the extractors. See "EXTRACTOR ARGUMENTS" for details. - Eg: {'youtube': {'skip': ['dash', 'hls']}} + E.g. {'youtube': {'skip': ['dash', 'hls']}} mark_watched: Mark videos watched (even with --simulate). Only for YouTube - youtube_include_dash_manifest: Deprecated - Use extractor_args instead. + + The following options are deprecated and may be removed in the future: + + force_generic_extractor: Force downloader to use the generic extractor + - Use allowed_extractors = ['generic', 'default'] + playliststart: - Use playlist_items + Playlist item to start at. + playlistend: - Use playlist_items + Playlist item to end at. + playlistreverse: - Use playlist_items + Download playlist items in reverse order. + forceurl: - Use forceprint + Force printing final URL. + forcetitle: - Use forceprint + Force printing title. + forceid: - Use forceprint + Force printing ID. + forcethumbnail: - Use forceprint + Force printing thumbnail URL. + forcedescription: - Use forceprint + Force printing description. + forcefilename: - Use forceprint + Force printing final filename. + forceduration: - Use forceprint + Force printing duration. + allsubtitles: - Use subtitleslangs = ['all'] + Downloads all the subtitles of the video + (requires writesubtitles or writeautomaticsub) + include_ads: - Doesn't work + Download ads as well + call_home: - Not implemented + Boolean, true iff we are allowed to contact the + yt-dlp servers for debugging. + post_hooks: - Register a custom postprocessor + A list of functions that get called as the final step + for each video file, after all postprocessors have been + called. The filename will be passed as the only argument. + hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}. + Use the native HLS downloader instead of ffmpeg/avconv + if True, otherwise use ffmpeg/avconv if False, otherwise + use downloader suggested by extractor if None. + prefer_ffmpeg: - avconv support is deprecated + If False, use avconv instead of ffmpeg if both are available, + otherwise prefer ffmpeg. + youtube_include_dash_manifest: - Use extractor_args If True (default), DASH manifests and related data will be downloaded and processed by extractor. You can reduce network I/O by disabling it if you don't care about DASH. (only for youtube) - youtube_include_hls_manifest: Deprecated - Use extractor_args instead. + youtube_include_hls_manifest: - Use extractor_args If True (default), HLS manifests and related data will be downloaded and processed by extractor. You can reduce network I/O by disabling it if you don't care about HLS. (only for youtube) """ - _NUMERIC_FIELDS = set(( - 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx', + _NUMERIC_FIELDS = { + 'width', 'height', 'asr', 'audio_channels', 'fps', + 'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx', 'timestamp', 'release_timestamp', 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count', 'average_rating', 'comment_count', 'age_limit', 'start_time', 'end_time', 'chapter_number', 'season_number', 'episode_number', 'track_number', 'disc_number', 'release_year', - )) - - _format_selection_exts = { - 'audio': {'m4a', 'mp3', 'ogg', 'aac'}, - 'video': {'mp4', 'flv', 'webm', '3gp'}, - 'storyboards': {'mhtml'}, } - params = None - _ies = {} - _pps = {k: [] for k in POSTPROCESS_WHEN} - _printed_messages = set() - _first_webpage_request = True - _download_retcode = None - _num_downloads = None - _playlist_level = 0 - _playlist_urls = set() - _screen_file = None + _format_fields = { + # NB: Keep in sync with the docstring of extractor/common.py + 'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note', + 'width', 'height', 'aspect_ratio', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels', + 'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx', 'rows', 'columns', + 'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start', + 'preference', 'language', 'language_preference', 'quality', 'source_preference', + 'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'extra_param_to_segment_url', 'hls_aes', 'downloader_options', + 'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time' + } + _format_selection_exts = { + 'audio': set(MEDIA_EXTENSIONS.common_audio), + 'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )), + 'storyboards': set(MEDIA_EXTENSIONS.storyboards), + } def __init__(self, params=None, auto_init=True): """Create a FileDownloader object with the given options. @@ -531,6 +570,7 @@ class YoutubeDL(object): """ if params is None: params = {} + self.params = params self._ies = {} self._ies_instances = {} self._pps = {k: [] for k in POSTPROCESS_WHEN} @@ -542,20 +582,40 @@ class YoutubeDL(object): self._download_retcode = 0 self._num_downloads = 0 self._num_videos = 0 - self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)] - self._err_file = sys.stderr - self.params = params + self._playlist_level = 0 + self._playlist_urls = set() self.cache = Cache(self) - windows_enable_vt_mode() - self._allow_colors = { - 'screen': not self.params.get('no_color') and supports_terminal_sequences(self._screen_file), - 'err': not self.params.get('no_color') and supports_terminal_sequences(self._err_file), - } + stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout + self._out_files = Namespace( + out=stdout, + error=sys.stderr, + screen=sys.stderr if self.params.get('quiet') else stdout, + console=None if compat_os_name == 'nt' else next( + filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None) + ) - if sys.version_info < (3, 6): - self.report_warning( - 'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2]) + try: + windows_enable_vt_mode() + except Exception as e: + self.write_debug(f'Failed to enable VT mode: {e}') + + self._allow_colors = Namespace(**{ + type_: not self.params.get('no_color') and supports_terminal_sequences(stream) + for type_, stream in self._out_files.items_ if type_ != 'console' + }) + + # The code is left like this to be reused for future deprecations + MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7) + current_version = sys.version_info[:2] + if current_version < MIN_RECOMMENDED: + msg = ('Support for Python version %d.%d has been deprecated. ' + 'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.' + '\n You will no longer receive updates on this version') + if current_version < MIN_SUPPORTED: + msg = 'Python version %d.%d is no longer supported' + self.deprecation_warning( + f'{msg}! Please update to Python %d.%d or above' % (*current_version, *MIN_RECOMMENDED)) if self.params.get('allow_unplayable_formats'): self.report_warning( @@ -564,9 +624,33 @@ class YoutubeDL(object): ' If you experience any issues while using this option, ' f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report') + if self.params.get('bidi_workaround', False): + try: + import pty + master, slave = pty.openpty() + width = shutil.get_terminal_size().columns + width_args = [] if width is None else ['-w', str(width)] + sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error} + try: + self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs) + except OSError: + self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) + self._output_channel = os.fdopen(master, 'rb') + except OSError as ose: + if ose.errno == errno.ENOENT: + self.report_warning( + 'Could not find fribidi executable, ignoring --bidi-workaround. ' + 'Make sure that fribidi is an executable file in one of the directories in your $PATH.') + else: + raise + + self.params['compat_opts'] = set(self.params.get('compat_opts', ())) + if auto_init and auto_init != 'no_verbose_header': + self.print_debug_header() + def check_deprecated(param, option, suggestion): if self.params.get(param) is not None: - self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion)) + self.report_warning(f'{option} is deprecated. Use {suggestion} instead') return True return False @@ -581,9 +665,9 @@ class YoutubeDL(object): for msg in self.params.get('_warnings', []): self.report_warning(msg) for msg in self.params.get('_deprecation_warnings', []): - self.deprecation_warning(msg) + self.deprecated_feature(msg) - if 'list-formats' in self.params.get('compat_opts', []): + if 'list-formats' in self.params['compat_opts']: self.params['listformats_table'] = False if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None: @@ -596,6 +680,13 @@ class YoutubeDL(object): else: self.params['nooverwrites'] = not self.params['overwrites'] + if self.params.get('simulate') is None and any(( + self.params.get('list_thumbnails'), + self.params.get('listformats'), + self.params.get('listsubtitles'), + )): + self.params['simulate'] = 'list_only' + self.params.setdefault('forceprint', {}) self.params.setdefault('print_to_file', {}) @@ -603,31 +694,8 @@ class YoutubeDL(object): if not isinstance(params['forceprint'], dict): self.params['forceprint'] = {'video': params['forceprint']} - if self.params.get('bidi_workaround', False): - try: - import pty - master, slave = pty.openpty() - width = compat_get_terminal_size().columns - if width is None: - width_args = [] - else: - width_args = ['-w', str(width)] - sp_kwargs = dict( - stdin=subprocess.PIPE, - stdout=slave, - stderr=self._err_file) - try: - self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs) - except OSError: - self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs) - self._output_channel = os.fdopen(master, 'rb') - except OSError as ose: - if ose.errno == errno.ENOENT: - self.report_warning( - 'Could not find fribidi executable, ignoring --bidi-workaround. ' - 'Make sure that fribidi is an executable file in one of the directories in your $PATH.') - else: - raise + if auto_init: + self.add_default_info_extractors() if (sys.platform != 'win32' and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] @@ -639,7 +707,7 @@ class YoutubeDL(object): 'Set the LC_ALL environment variable to fix this.') self.params['restrictfilenames'] = True - self.outtmpl_dict = self.parse_outtmpl() + self._parse_outtmpl() # Creating format selector here allows us to catch syntax errors before the extraction self.format_selector = ( @@ -647,12 +715,8 @@ class YoutubeDL(object): else self.params['format'] if callable(self.params['format']) else self.build_format_selector(self.params['format'])) - self._setup_opener() - - if auto_init: - if auto_init != 'no_verbose_header': - self.print_debug_header() - self.add_default_info_extractors() + # Set http_headers defaults according to std_headers + self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {})) hooks = { 'post_hooks': self.add_post_hook, @@ -667,28 +731,31 @@ class YoutubeDL(object): pp_def = dict(pp_def_raw) when = pp_def.pop('when', 'post_process') self.add_post_processor( - get_postprocessor(pp_def.pop('key'))(self, **compat_kwargs(pp_def)), + get_postprocessor(pp_def.pop('key'))(self, **pp_def), when=when) + self._setup_opener() register_socks_protocols() def preload_download_archive(fn): """Preload the archive, if any is specified""" + archive = set() if fn is None: - return False + return archive + elif not is_path_like(fn): + return fn + self.write_debug(f'Loading archive file {fn!r}') try: with locked_file(fn, 'r', encoding='utf-8') as archive_file: for line in archive_file: - self.archive.add(line.strip()) - except IOError as ioe: + archive.add(line.strip()) + except OSError as ioe: if ioe.errno != errno.ENOENT: raise - return False - return True + return archive - self.archive = set() - preload_download_archive(self.params.get('download_archive')) + self.archive = preload_download_archive(self.params.get('download_archive')) def warn_if_short_id(self, argv): # short YouTube ID starting with dash? @@ -714,13 +781,6 @@ class YoutubeDL(object): self._ies_instances[ie_key] = ie ie.set_downloader(self) - def _get_info_extractor_class(self, ie_key): - ie = self._ies.get(ie_key) - if ie is None: - ie = get_info_extractor(ie_key) - self.add_info_extractor(ie) - return ie - def get_info_extractor(self, ie_key): """ Get an instance of an IE with name ie_key, it will try to get one from @@ -737,11 +797,23 @@ class YoutubeDL(object): """ Add the InfoExtractors returned by gen_extractors to the end of the list """ - for ie in gen_extractor_classes(): - self.add_info_extractor(ie) + all_ies = {ie.IE_NAME.lower(): ie for ie in gen_extractor_classes()} + all_ies['end'] = UnsupportedURLIE() + try: + ie_names = orderedSet_from_options( + self.params.get('allowed_extractors', ['default']), { + 'all': list(all_ies), + 'default': [name for name, ie in all_ies.items() if ie._ENABLED], + }, use_regex=True) + except re.error as e: + raise ValueError(f'Wrong regex for allowed_extractors: {e.pattern}') + for name in ie_names: + self.add_info_extractor(all_ies[name]) + self.write_debug(f'Loaded {len(ie_names)} extractors') def add_post_processor(self, pp, when='post_process'): """Add a PostProcessor object to the end of the chain.""" + assert when in POSTPROCESS_WHEN, f'Invalid when={when}' self._pps[when].append(pp) pp.set_downloader(self) @@ -765,11 +837,11 @@ class YoutubeDL(object): return message assert hasattr(self, '_output_process') - assert isinstance(message, compat_str) + assert isinstance(message, str) line_count = message.count('\n') + 1 - self._output_process.stdin.write((message + '\n').encode('utf-8')) + self._output_process.stdin.write((message + '\n').encode()) self._output_process.stdin.flush() - res = ''.join(self._output_channel.readline().decode('utf-8') + res = ''.join(self._output_channel.readline().decode() for _ in range(line_count)) return res[:-len('\n')] @@ -780,22 +852,39 @@ class YoutubeDL(object): self._printed_messages.add(message) write_string(message, out=out, encoding=self.params.get('encoding')) - def to_stdout(self, message, skip_eol=False, quiet=False): + def to_stdout(self, message, skip_eol=False, quiet=None): """Print message to stdout""" + if quiet is not None: + self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. ' + 'Use "YoutubeDL.to_screen" instead') + if skip_eol is not False: + self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. ' + 'Use "YoutubeDL.to_screen" instead') + self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out) + + def to_screen(self, message, skip_eol=False, quiet=None, only_once=False): + """Print message to screen if not in quiet mode""" if self.params.get('logger'): self.params['logger'].debug(message) - elif not quiet or self.params.get('verbose'): - self._write_string( - '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')), - self._err_file if quiet else self._screen_file) + return + if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'): + return + self._write_string( + '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')), + self._out_files.screen, only_once=only_once) def to_stderr(self, message, only_once=False): """Print message to stderr""" - assert isinstance(message, compat_str) + assert isinstance(message, str) if self.params.get('logger'): self.params['logger'].error(message) else: - self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once) + self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once) + + def _send_console_code(self, code): + if compat_os_name == 'nt' or not self._out_files.console: + return + self._write_string(code, self._out_files.console) def to_console_title(self, message): if not self.params.get('consoletitle', False): @@ -806,26 +895,18 @@ class YoutubeDL(object): # c_wchar_p() might not be necessary if `message` is # already of type unicode() ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message)) - elif 'TERM' in os.environ: - self._write_string('\033]0;%s\007' % message, self._screen_file) + else: + self._send_console_code(f'\033]0;{message}\007') def save_console_title(self): - if not self.params.get('consoletitle', False): + if not self.params.get('consoletitle') or self.params.get('simulate'): return - if self.params.get('simulate'): - return - if compat_os_name != 'nt' and 'TERM' in os.environ: - # Save the title on stack - self._write_string('\033[22;0t', self._screen_file) + self._send_console_code('\033[22;0t') # Save the title on stack def restore_console_title(self): - if not self.params.get('consoletitle', False): + if not self.params.get('consoletitle') or self.params.get('simulate'): return - if self.params.get('simulate'): - return - if compat_os_name != 'nt' and 'TERM' in os.environ: - # Restore the title from stack - self._write_string('\033[23;0t', self._screen_file) + self._send_console_code('\033[23;0t') # Restore the title from stack def __enter__(self): self.save_console_title() @@ -871,38 +952,36 @@ class YoutubeDL(object): raise DownloadError(message, exc_info) self._download_retcode = 1 - def to_screen(self, message, skip_eol=False): - """Print message to stdout if not in quiet mode""" - self.to_stdout( - message, skip_eol, quiet=self.params.get('quiet', False)) - - class Styles(Enum): - HEADERS = 'yellow' - EMPHASIS = 'light blue' - ID = 'green' - DELIM = 'blue' - ERROR = 'red' - WARNING = 'yellow' - SUPPRESS = 'light black' + Styles = Namespace( + HEADERS='yellow', + EMPHASIS='light blue', + FILENAME='green', + ID='green', + DELIM='blue', + ERROR='red', + WARNING='yellow', + SUPPRESS='light black', + ) def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False): + text = str(text) if test_encoding: original_text = text - encoding = self.params.get('encoding') or getattr(handle, 'encoding', 'ascii') + # handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711 + encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii' text = text.encode(encoding, 'ignore').decode(encoding) if fallback is not None and text != original_text: text = fallback - if isinstance(f, self.Styles): - f = f.value return format_text(text, f) if allow_colors else text if fallback is None else fallback + def _format_out(self, *args, **kwargs): + return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs) + def _format_screen(self, *args, **kwargs): - return self._format_text( - self._screen_file, self._allow_colors['screen'], *args, **kwargs) + return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs) def _format_err(self, *args, **kwargs): - return self._format_text( - self._err_file, self._allow_colors['err'], *args, **kwargs) + return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs) def report_warning(self, message, only_once=False): ''' @@ -916,11 +995,14 @@ class YoutubeDL(object): return self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once) - def deprecation_warning(self, message): + def deprecation_warning(self, message, *, stacklevel=0): + deprecation_warning( + message, stacklevel=stacklevel + 1, printer=self.report_error, is_error=False) + + def deprecated_feature(self, message): if self.params.get('logger') is not None: - self.params['logger'].warning('DeprecationWarning: {message}') - else: - self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True) + self.params['logger'].warning(f'Deprecated Feature: {message}') + self.to_stderr(f'{self._format_err("Deprecated Feature:", self.Styles.ERROR)} {message}', True) def report_error(self, message, *args, **kwargs): ''' @@ -933,7 +1015,7 @@ class YoutubeDL(object): '''Log debug message or Print message to stderr''' if not self.params.get('verbose', False): return - message = '[debug] %s' % message + message = f'[debug] {message}' if self.params.get('logger'): self.params['logger'].debug(message) else: @@ -953,48 +1035,38 @@ class YoutubeDL(object): except UnicodeEncodeError: self.to_screen('Deleting existing file') - def raise_no_formats(self, info, forced=False): - has_drm = info.get('__has_drm') - msg = 'This video is DRM protected' if has_drm else 'No video formats found!' - expected = self.params.get('ignore_no_formats_error') - if forced or not expected: + def raise_no_formats(self, info, forced=False, *, msg=None): + has_drm = info.get('_has_drm') + ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg) + msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!' + if forced or not ignored: raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'], - expected=has_drm or expected) + expected=has_drm or ignored or expected) else: self.report_warning(msg) def parse_outtmpl(self): - outtmpl_dict = self.params.get('outtmpl', {}) - if not isinstance(outtmpl_dict, dict): - outtmpl_dict = {'default': outtmpl_dict} - # Remove spaces in the default template - if self.params.get('restrictfilenames'): + self.deprecation_warning('"YoutubeDL.parse_outtmpl" is deprecated and may be removed in a future version') + self._parse_outtmpl() + return self.params['outtmpl'] + + def _parse_outtmpl(self): + sanitize = IDENTITY + if self.params.get('restrictfilenames'): # Remove spaces in the default template sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-') - else: - sanitize = lambda x: x - outtmpl_dict.update({ - k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() - if outtmpl_dict.get(k) is None}) - for key, val in outtmpl_dict.items(): - if isinstance(val, bytes): - self.report_warning( - 'Parameter outtmpl is bytes, but should be a unicode string. ' - 'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.') - return outtmpl_dict + + outtmpl = self.params.setdefault('outtmpl', {}) + if not isinstance(outtmpl, dict): + self.params['outtmpl'] = outtmpl = {'default': outtmpl} + outtmpl.update({k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items() if outtmpl.get(k) is None}) def get_output_path(self, dir_type='', filename=None): paths = self.params.get('paths', {}) - assert isinstance(paths, dict) + assert isinstance(paths, dict), '"paths" parameter must be a dictionary' path = os.path.join( expand_path(paths.get('home', '').strip()), expand_path(paths.get(dir_type, '').strip()) if dir_type else '', filename or '') - - # Temporary fix for #4787 - # 'Treat' all problem characters by passing filename through preferredencoding - # to workaround encoding issues with subprocess on python2 @ Windows - if sys.version_info < (3, 0) and sys.platform == 'win32': - path = encodeFilename(path, True).decode(preferredencoding()) return sanitize_path(path, force=self.params.get('windowsfilenames')) @staticmethod @@ -1003,12 +1075,12 @@ class YoutubeDL(object): # correspondingly that is not what we want since we need to keep # '%%' intact for template dict substitution step. Working around # with boundary-alike separator hack. - sep = ''.join([random.choice(ascii_letters) for _ in range(32)]) - outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep)) + sep = ''.join(random.choices(ascii_letters, k=32)) + outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$') # outtmpl should be expand_path'ed before template dict substitution # because meta fields may contain env variables we don't want to - # be expanded. For example, for outtmpl "%(title)s.%(ext)s" and + # be expanded. E.g. for outtmpl "%(title)s.%(ext)s" and # title "Hello $PATH", we don't want `$PATH` to be expanded. return expand_path(outtmpl).replace(sep, '') @@ -1024,7 +1096,7 @@ class YoutubeDL(object): def validate_outtmpl(cls, outtmpl): ''' @return None or Exception object ''' outtmpl = re.sub( - STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBUDS]'), + STR_FORMAT_RE_TMPL.format('[^)]*', '[ljhqBUDS]'), lambda mobj: f'{mobj.group(0)[:-1]}s', cls._outtmpl_expandpath(outtmpl)) try: @@ -1036,8 +1108,8 @@ class YoutubeDL(object): @staticmethod def _copy_infodict(info_dict): info_dict = dict(info_dict) - for key in ('__original_infodict', '__postprocessors'): - info_dict.pop(key, None) + info_dict.pop('__postprocessors', None) + info_dict.pop('__pending_error', None) return info_dict def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False): @@ -1053,7 +1125,7 @@ class YoutubeDL(object): formatSeconds(info_dict['duration'], '-' if sanitize else ':') if info_dict.get('duration', None) is not None else None) - info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads + info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads) info_dict['video_autonumber'] = self._num_videos if info_dict.get('resolution') is None: info_dict['resolution'] = self.format_resolution(info_dict, default=None) @@ -1061,37 +1133,51 @@ class YoutubeDL(object): # For fields playlist_index, playlist_autonumber and autonumber convert all occurrences # of %(field)s to %(field)0Nd for backward compatibility field_size_compat_map = { - 'playlist_index': number_of_digits(info_dict.get('_last_playlist_index') or 0), + 'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0), 'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0), 'autonumber': self.params.get('autonumber_size') or 5, } TMPL_DICT = {} - EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBUDS]')) + EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljhqBUDS]')) MATH_FUNCTIONS = { '+': float.__add__, '-': float.__sub__, } # Field is of the form key1.key2... - # where keys (except first) can be string, int or slice - FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)') - MATH_FIELD_RE = r'''(?:{field}|{num})'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?') + # where keys (except first) can be string, int, slice or "{field, ...}" + FIELD_INNER_RE = r'(?:\w+|%(num)s|%(num)s?(?::%(num)s?){1,2})' % {'num': r'(?:-?\d+)'} + FIELD_RE = r'\w*(?:\.(?:%(inner)s|{%(field)s(?:,%(field)s)*}))*' % { + 'inner': FIELD_INNER_RE, + 'field': rf'\w*(?:\.{FIELD_INNER_RE})*' + } + MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})' MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys())) - INTERNAL_FORMAT_RE = re.compile(r'''(?x) + INTERNAL_FORMAT_RE = re.compile(rf'''(?x) (?P-)? - (?P{field}) - (?P(?:{math_op}{math_field})*) + (?P{FIELD_RE}) + (?P(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*) (?:>(?P.+?))? - (?P(?.*?))? - (?:\|(?P.*?))? - $'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE)) + (?P + (?P(?.*?))? + (?:\|(?P.*?))? + )$''') - def _traverse_infodict(k): - k = k.split('.') - if k[0] == '': - k.pop(0) - return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True) + def _traverse_infodict(fields): + fields = [f for x in re.split(r'\.({.+?})\.?', fields) + for f in ([x] if x.startswith('{') else x.split('.'))] + for i in (0, -1): + if fields and not fields[i]: + fields.pop(i) + + for i, f in enumerate(fields): + if not f.startswith('{'): + continue + assert f.endswith('}'), f'No closing brace for {f} in {fields}' + fields[i] = {k: k.split('.') for k in f[1:-1].split(',')} + + return traverse_obj(info_dict, fields, is_user_input=True, traverse_string=True) def get_value(mdict): # Object traversal @@ -1127,13 +1213,18 @@ class YoutubeDL(object): if mdict['strf_format']: value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ',')) + # XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485 + if sanitize and value == '': + value = None return value na = self.params.get('outtmpl_na_placeholder', 'NA') def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')): - return sanitize_filename(str(value), restricted=restricted, - is_id=re.search(r'(^|[_.])id(\.|$)', key)) + return sanitize_filename(str(value), restricted=restricted, is_id=( + bool(re.search(r'(^|[_.])id(\.|$)', key)) + if 'filename-sanitization' in self.params['compat_opts'] + else NO_DEFAULT)) sanitizer = sanitize if callable(sanitize) else filename_sanitizer sanitize = bool(sanitize) @@ -1156,13 +1247,13 @@ class YoutubeDL(object): value = get_value(mobj) replacement = mobj['replacement'] if value is None and mobj['alternate']: - mobj = re.match(INTERNAL_FORMAT_RE, mobj['alternate'][1:]) + mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:]) else: break fmt = outer_mobj.group('format') if fmt == 's' and value is not None and key in field_size_compat_map.keys(): - fmt = '0{:d}d'.format(field_size_compat_map[key]) + fmt = f'0{field_size_compat_map[key]:d}d' value = default if value is None else value if replacement is None else replacement @@ -1172,12 +1263,16 @@ class YoutubeDL(object): delim = '\n' if '#' in flags else ', ' value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt elif fmt[-1] == 'j': # json - value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt + value, fmt = json.dumps( + value, default=_dumpjson_default, + indent=4 if '#' in flags else None, ensure_ascii='+' not in flags), str_fmt + elif fmt[-1] == 'h': # html + value, fmt = escapeHTML(str(value)), str_fmt elif fmt[-1] == 'q': # quoted value = map(str, variadic(value) if '#' in flags else [value]) value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt elif fmt[-1] == 'B': # bytes - value = f'%{str_fmt}'.encode('utf-8') % str(value).encode('utf-8') + value = f'%{str_fmt}'.encode() % str(value).encode() value, fmt = value.decode('utf-8', 'ignore'), 's' elif fmt[-1] == 'U': # unicode normalized value, fmt = unicodedata.normalize( @@ -1218,18 +1313,21 @@ class YoutubeDL(object): outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs) return self.escape_outtmpl(outtmpl) % info_dict - def _prepare_filename(self, info_dict, tmpl_type='default'): + def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None): + assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive' + if outtmpl is None: + outtmpl = self.params['outtmpl'].get(tmpl_type or 'default', self.params['outtmpl']['default']) try: - outtmpl = self._outtmpl_expandpath(self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default'])) + outtmpl = self._outtmpl_expandpath(outtmpl) filename = self.evaluate_outtmpl(outtmpl, info_dict, True) if not filename: return None - if tmpl_type in ('default', 'temp'): + if tmpl_type in ('', 'temp'): final_ext, ext = self.params.get('final_ext'), info_dict.get('ext') if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'): filename = replace_extension(filename, ext, final_ext) - else: + elif tmpl_type: force_ext = OUTTMPL_TYPES[tmpl_type] if force_ext: filename = replace_extension(filename, force_ext, info_dict.get('ext')) @@ -1245,10 +1343,12 @@ class YoutubeDL(object): self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')') return None - def prepare_filename(self, info_dict, dir_type='', warn=False): - """Generate the output filename.""" - - filename = self._prepare_filename(info_dict, dir_type or 'default') + def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False): + """Generate the output filename""" + if outtmpl: + assert not dir_type, 'outtmpl and dir_type are mutually exclusive' + dir_type = None + filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl) if not filename and dir_type not in ('', 'temp'): return '' @@ -1265,11 +1365,19 @@ class YoutubeDL(object): return self.get_output_path(dir_type, filename) def _match_entry(self, info_dict, incomplete=False, silent=False): - """ Returns None if the file should be downloaded """ + """Returns None if the file should be downloaded""" + _type = info_dict.get('_type', 'video') + assert incomplete or _type == 'video', 'Only video result can be considered complete' - video_title = info_dict.get('title', info_dict.get('id', 'video')) + video_title = info_dict.get('title', info_dict.get('id', 'entry')) def check_filter(): + if _type in ('playlist', 'multi_video'): + return + elif _type in ('url', 'url_transparent') and not try_call( + lambda: self.get_info_extractor(info_dict['ie_key']).is_single_video(info_dict['url'])): + return + if 'title' in info_dict: # This can happen when we're just evaluating the playlist title = info_dict['title'] @@ -1281,11 +1389,12 @@ class YoutubeDL(object): if rejecttitle: if re.search(rejecttitle, title, re.IGNORECASE): return '"' + title + '" title matched reject pattern "' + rejecttitle + '"' + date = info_dict.get('upload_date') if date is not None: dateRange = self.params.get('daterange', DateRange()) if date not in dateRange: - return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange) + return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}' view_count = info_dict.get('view_count') if view_count is not None: min_views = self.params.get('min_views') @@ -1304,7 +1413,16 @@ class YoutubeDL(object): except TypeError: # For backward compatibility ret = None if incomplete else match_filter(info_dict) - if ret is not None: + if ret is NO_DEFAULT: + while True: + filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME) + reply = input(self._format_screen( + f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip() + if reply in {'y', ''}: + return None + elif reply == 'n': + return f'Skipping {video_title}' + elif ret is not None: return ret return None @@ -1330,18 +1448,19 @@ class YoutubeDL(object): def extract_info(self, url, download=True, ie_key=None, extra_info=None, process=True, force_generic_extractor=False): """ - Return a list with a dictionary for each video extracted. + Extract and return the information dictionary of the URL Arguments: - url -- URL to extract + @param url URL to extract Keyword arguments: - download -- whether to download videos during extraction - ie_key -- extractor key hint - extra_info -- dictionary containing the extra values to add to each result - process -- whether to resolve all unresolved references (URLs, playlist items), - must be True for download to work. - force_generic_extractor -- force using the generic extractor + @param download Whether to download videos + @param process Whether to resolve all unresolved references (URLs, playlist items). + Must be True for download to work + @param ie_key Use only the extractor with this key + + @param extra_info Dictionary containing the extra values to add to the info (For internal use only) + @force_generic_extractor Force using the generic extractor (Deprecated; use ie_key='Generic') """ if extra_info is None: @@ -1351,11 +1470,11 @@ class YoutubeDL(object): ie_key = 'Generic' if ie_key: - ies = {ie_key: self._get_info_extractor_class(ie_key)} + ies = {ie_key: self._ies[ie_key]} if ie_key in self._ies else {} else: ies = self._ies - for ie_key, ie in ies.items(): + for key, ie in ies.items(): if not ie.suitable(url): continue @@ -1364,16 +1483,18 @@ class YoutubeDL(object): 'and will probably not work.') temp_id = ie.get_temp_id(url) - if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}): - self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive') + if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': key}): + self.to_screen(f'[{key}] {temp_id}: has already been recorded in the archive') if self.params.get('break_on_existing', False): raise ExistingVideoReached() break - return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process) + return self.__extract_info(url, self.get_info_extractor(key), download, extra_info, process) else: - self.report_error('no suitable InfoExtractor for URL %s' % url) + extractors_restricted = self.params.get('allowed_extractors') not in (None, ['default']) + self.report_error(f'No suitable extractor{format_field(ie_key, None, " (%s)")} found for URL {url}', + tb=False if extractors_restricted else None) - def __handle_extraction_exceptions(func): + def _handle_extraction_exceptions(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): while True: @@ -1405,7 +1526,7 @@ class YoutubeDL(object): break return wrapper - def _wait_for_video(self, ie_result): + def _wait_for_video(self, ie_result={}): if (not self.params.get('wait_for_video') or ie_result.get('_type', 'video') != 'video' or ie_result.get('formats') or ie_result.get('url')): @@ -1416,15 +1537,20 @@ class YoutubeDL(object): def progress(msg): nonlocal last_msg - self.to_screen(msg + ' ' * (len(last_msg) - len(msg)) + '\r', skip_eol=True) + full_msg = f'{msg}\n' + if not self.params.get('noprogress'): + full_msg = msg + ' ' * (len(last_msg) - len(msg)) + '\r' + elif last_msg: + return + self.to_screen(full_msg, skip_eol=True) last_msg = msg min_wait, max_wait = self.params.get('wait_for_video') diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time()) if diff is None and ie_result.get('live_status') == 'is_upcoming': - diff = random.randrange(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait) + diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0) self.report_warning('Release time of video is not known') - elif (diff or 0) <= 0: + elif ie_result and (diff or 0) <= 0: self.report_warning('Video should already be available according to extracted info') diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf')) self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now') @@ -1446,10 +1572,18 @@ class YoutubeDL(object): self.to_screen('') raise - @__handle_extraction_exceptions + @_handle_extraction_exceptions def __extract_info(self, url, ie, download, extra_info, process): - ie_result = ie.extract(url) + try: + ie_result = ie.extract(url) + except UserNotLive as e: + if process: + if self.params.get('wait_for_video'): + self.report_warning(e) + self._wait_for_video() + raise if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) + self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}') return if isinstance(ie_result, list): # Backwards compatibility: old IE result format @@ -1471,8 +1605,12 @@ class YoutubeDL(object): self.add_extra_info(ie_result, { 'webpage_url': url, 'original_url': url, - 'webpage_url_basename': url_basename(url), - 'webpage_url_domain': get_domain(url), + }) + webpage_url = ie_result.get('webpage_url') + if webpage_url: + self.add_extra_info(ie_result, { + 'webpage_url_basename': url_basename(webpage_url), + 'webpage_url_domain': get_domain(webpage_url), }) if ie is not None: self.add_extra_info(ie_result, { @@ -1493,9 +1631,10 @@ class YoutubeDL(object): result_type = ie_result.get('_type', 'video') if result_type in ('url', 'url_transparent'): - ie_result['url'] = sanitize_url(ie_result['url']) - if ie_result.get('original_url'): - extra_info.setdefault('original_url', ie_result['original_url']) + ie_result['url'] = sanitize_url( + ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https') + if ie_result.get('original_url') and not extra_info.get('original_url'): + extra_info = {'original_url': ie_result['original_url'], **extra_info} extract_flat = self.params.get('extract_flat', False) if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) @@ -1507,7 +1646,9 @@ class YoutubeDL(object): self.add_default_extra_info(info_copy, ie, ie_result['url']) self.add_extra_info(info_copy, extra_info) info_copy, _ = self.pre_process(info_copy) + self._fill_common_fields(info_copy, False) self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True) + self._raise_pending_errors(info_copy) if self.params.get('force_write_download_archive', False): self.record_download_archive(info_copy) return ie_result @@ -1515,10 +1656,11 @@ class YoutubeDL(object): if result_type == 'video': self.add_extra_info(ie_result, extra_info) ie_result = self.process_video_result(ie_result, download=download) + self._raise_pending_errors(ie_result) additional_urls = (ie_result or {}).get('additional_urls') if additional_urls: # TODO: Improve MetadataParserPP to allow setting a list - if isinstance(additional_urls, compat_str): + if isinstance(additional_urls, str): additional_urls = [additional_urls] self.to_screen( '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls))) @@ -1549,13 +1691,13 @@ class YoutubeDL(object): if not info: return info - force_properties = dict( - (k, v) for k, v in ie_result.items() if v is not None) - for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'): - if f in force_properties: - del force_properties[f] + exempted_fields = {'_type', 'url', 'ie_key'} + if not ie_result.get('section_end') and ie_result.get('section_start') is None: + # For video clips, the id etc of the clip extractor should be used + exempted_fields |= {'id', 'extractor', 'extractor_key'} + new_result = info.copy() - new_result.update(force_properties) + new_result.update(filter_dict(ie_result, lambda k, v: v is not None and k not in exempted_fields)) # Extracted info may not be a video result (i.e. # info.get('_type', 'video') != video) but rather an url or @@ -1571,8 +1713,8 @@ class YoutubeDL(object): elif result_type in ('playlist', 'multi_video'): # Protect from infinite recursion due to recursively nested playlists # (see https://github.com/ytdl-org/youtube-dl/issues/27833) - webpage_url = ie_result['webpage_url'] - if webpage_url in self._playlist_urls: + webpage_url = ie_result.get('webpage_url') # Playlists maynot have webpage_url + if webpage_url and webpage_url in self._playlist_urls: self.to_screen( '[download] Skipping already downloaded playlist: %s' % ie_result.get('title') or ie_result.get('id')) @@ -1580,6 +1722,7 @@ class YoutubeDL(object): self._playlist_level += 1 self._playlist_urls.add(webpage_url) + self._fill_common_fields(ie_result, False) self._sanitize_thumbnails(ie_result) try: return self.__process_playlist(ie_result, download) @@ -1613,124 +1756,65 @@ class YoutubeDL(object): return make_dir(path, self.report_error) @staticmethod - def _playlist_infodict(ie_result, **kwargs): - return { - **ie_result, + def _playlist_infodict(ie_result, strict=False, **kwargs): + info = { + 'playlist_count': ie_result.get('playlist_count'), 'playlist': ie_result.get('title') or ie_result.get('id'), 'playlist_id': ie_result.get('id'), 'playlist_title': ie_result.get('title'), 'playlist_uploader': ie_result.get('uploader'), 'playlist_uploader_id': ie_result.get('uploader_id'), - 'playlist_index': 0, **kwargs, } + if strict: + return info + if ie_result.get('webpage_url'): + info.update({ + 'webpage_url': ie_result['webpage_url'], + 'webpage_url_basename': url_basename(ie_result['webpage_url']), + 'webpage_url_domain': get_domain(ie_result['webpage_url']), + }) + return { + **info, + 'playlist_index': 0, + '__last_playlist_index': max(ie_result.get('requested_entries') or (0, 0)), + 'extractor': ie_result['extractor'], + 'extractor_key': ie_result['extractor_key'], + } def __process_playlist(self, ie_result, download): - # We process each entry in the playlist - playlist = ie_result.get('title') or ie_result.get('id') - self.to_screen('[download] Downloading playlist: %s' % playlist) + """Process each entry in the playlist""" + assert ie_result['_type'] in ('playlist', 'multi_video') - if 'entries' not in ie_result: - raise EntryNotInPlaylist('There are no entries') + common_info = self._playlist_infodict(ie_result, strict=True) + title = common_info.get('playlist') or '' + if self._match_entry(common_info, incomplete=True) is not None: + return + self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}') - MissingEntry = object() - incomplete_entries = bool(ie_result.get('requested_entries')) - if incomplete_entries: - def fill_missing_entries(entries, indices): - ret = [MissingEntry] * max(indices) - for i, entry in zip(indices, entries): - ret[i - 1] = entry - return ret - ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries']) + all_entries = PlaylistEntries(self, ie_result) + entries = orderedSet(all_entries.get_requested_items(), lazy=True) - playlist_results = [] - - playliststart = self.params.get('playliststart', 1) - playlistend = self.params.get('playlistend') - # For backwards compatibility, interpret -1 as whole list - if playlistend == -1: - playlistend = None - - playlistitems_str = self.params.get('playlist_items') - playlistitems = None - if playlistitems_str is not None: - def iter_playlistitems(format): - for string_segment in format.split(','): - if '-' in string_segment: - start, end = string_segment.split('-') - for item in range(int(start), int(end) + 1): - yield int(item) - else: - yield int(string_segment) - playlistitems = orderedSet(iter_playlistitems(playlistitems_str)) - - ie_entries = ie_result['entries'] - if isinstance(ie_entries, list): - playlist_count = len(ie_entries) - msg = f'Collected {playlist_count} videos; downloading %d of them' - ie_result['playlist_count'] = ie_result.get('playlist_count') or playlist_count - - def get_entry(i): - return ie_entries[i - 1] + lazy = self.params.get('lazy_playlist') + if lazy: + resolved_entries, n_entries = [], 'N/A' + ie_result['requested_entries'], ie_result['entries'] = None, None else: - msg = 'Downloading %d videos' - if not isinstance(ie_entries, (PagedList, LazyList)): - ie_entries = LazyList(ie_entries) - elif isinstance(ie_entries, InAdvancePagedList): - if ie_entries._pagesize == 1: - playlist_count = ie_entries._pagecount + entries = resolved_entries = list(entries) + n_entries = len(resolved_entries) + ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], []) + if not ie_result.get('playlist_count'): + # Better to do this after potentially exhausting entries + ie_result['playlist_count'] = all_entries.get_full_count() - def get_entry(i): - return YoutubeDL.__handle_extraction_exceptions( - lambda self, i: ie_entries[i - 1] - )(self, i) - - entries, broken = [], False - items = playlistitems if playlistitems is not None else itertools.count(playliststart) - for i in items: - if i == 0: - continue - if playlistitems is None and playlistend is not None and playlistend < i: - break - entry = None - try: - entry = get_entry(i) - if entry is MissingEntry: - raise EntryNotInPlaylist() - except (IndexError, EntryNotInPlaylist): - if incomplete_entries: - raise EntryNotInPlaylist(f'Entry {i} cannot be found') - elif not playlistitems: - break - entries.append(entry) - try: - if entry is not None: - self._match_entry(entry, incomplete=True, silent=True) - except (ExistingVideoReached, RejectedVideoReached): - broken = True - break - ie_result['entries'] = entries - - # Save playlist_index before re-ordering - entries = [ - ((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry) - for i, entry in enumerate(entries, 1) - if entry is not None] - n_entries = len(entries) - - if not (ie_result.get('playlist_count') or broken or playlistitems or playlistend): - ie_result['playlist_count'] = n_entries - - if not playlistitems and (playliststart != 1 or playlistend): - playlistitems = list(range(playliststart, playliststart + n_entries)) - ie_result['requested_entries'] = playlistitems + extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries)) + ie_copy = collections.ChainMap(ie_result, extra) _infojson_written = False write_playlist_files = self.params.get('allow_playlist_files', True) if write_playlist_files and self.params.get('list_thumbnails'): self.list_thumbnails(ie_result) if write_playlist_files and not self.params.get('simulate'): - ie_copy = self._playlist_infodict(ie_result, n_entries=n_entries) _infojson_written = self._write_info_json( 'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson')) if _infojson_written is None: @@ -1739,69 +1823,83 @@ class YoutubeDL(object): self.prepare_filename(ie_copy, 'pl_description')) is None: return # TODO: This should be passed to ThumbnailsConvertor if necessary - self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail')) + self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail')) - if self.params.get('playlistreverse', False): - entries = entries[::-1] - if self.params.get('playlistrandom', False): + if lazy: + if self.params.get('playlistreverse') or self.params.get('playlistrandom'): + self.report_warning('playlistreverse and playlistrandom are not supported with lazy_playlist', only_once=True) + elif self.params.get('playlistreverse'): + entries.reverse() + elif self.params.get('playlistrandom'): random.shuffle(entries) - x_forwarded_for = ie_result.get('__x_forwarded_for_ip') + self.to_screen(f'[{ie_result["extractor"]}] Playlist {title}: Downloading {n_entries} items' + f'{format_field(ie_result, "playlist_count", " of %s")}') + + keep_resolved_entries = self.params.get('extract_flat') != 'discard' + if self.params.get('extract_flat') == 'discard_in_playlist': + keep_resolved_entries = ie_result['_type'] != 'playlist' + if keep_resolved_entries: + self.write_debug('The information of all playlist entries will be held in memory') - self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries)) failures = 0 max_failures = self.params.get('skip_playlist_after_errors') or float('inf') - for i, entry_tuple in enumerate(entries, 1): - playlist_index, entry = entry_tuple - if 'playlist-index' in self.params.get('compat_opts', []): - playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1 - self.to_screen('[download] Downloading video %s of %s' % (i, n_entries)) - # This __x_forwarded_for_ip thing is a bit ugly but requires - # minimal changes - if x_forwarded_for: - entry['__x_forwarded_for_ip'] = x_forwarded_for - extra = { - 'n_entries': n_entries, - '_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries), - 'playlist_count': ie_result.get('playlist_count'), - 'playlist_index': playlist_index, - 'playlist_autonumber': i, - 'playlist': playlist, - 'playlist_id': ie_result.get('id'), - 'playlist_title': ie_result.get('title'), - 'playlist_uploader': ie_result.get('uploader'), - 'playlist_uploader_id': ie_result.get('uploader_id'), - 'extractor': ie_result['extractor'], - 'webpage_url': ie_result['webpage_url'], - 'webpage_url_basename': url_basename(ie_result['webpage_url']), - 'webpage_url_domain': get_domain(ie_result['webpage_url']), - 'extractor_key': ie_result['extractor_key'], - } - - if self._match_entry(entry, incomplete=True) is not None: + for i, (playlist_index, entry) in enumerate(entries): + if lazy: + resolved_entries.append((playlist_index, entry)) + if not entry: continue - entry_result = self.__process_iterable_entry(entry, download, extra) + entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip') + if not lazy and 'playlist-index' in self.params.get('compat_opts', []): + playlist_index = ie_result['requested_entries'][i] + + entry_copy = collections.ChainMap(entry, { + **common_info, + 'n_entries': int_or_none(n_entries), + 'playlist_index': playlist_index, + 'playlist_autonumber': i + 1, + }) + + if self._match_entry(entry_copy, incomplete=True) is not None: + # For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369 + resolved_entries[i] = (playlist_index, NO_DEFAULT) + continue + + self.to_screen('[download] Downloading item %s of %s' % ( + self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS))) + + entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({ + 'playlist_index': playlist_index, + 'playlist_autonumber': i + 1, + }, extra)) if not entry_result: failures += 1 if failures >= max_failures: self.report_error( - 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures)) + f'Skipping the remaining entries in playlist "{title}" since {failures} items failed extraction') break - playlist_results.append(entry_result) - ie_result['entries'] = playlist_results + if keep_resolved_entries: + resolved_entries[i] = (playlist_index, entry_result) + + # Update with processed data + ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT] + ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT] + if ie_result['requested_entries'] == try_call(lambda: list(range(1, ie_result['playlist_count'] + 1))): + # Do not set for full playlist + ie_result.pop('requested_entries') # Write the updated info to json - if _infojson_written and self._write_info_json( + if _infojson_written is True and self._write_info_json( 'updated playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None: return ie_result = self.run_all_pps('playlist', ie_result) - self.to_screen(f'[download] Finished downloading playlist: {playlist}') + self.to_screen(f'[download] Finished downloading playlist: {title}') return ie_result - @__handle_extraction_exceptions + @_handle_extraction_exceptions def __process_iterable_entry(self, entry, download, extra_info): return self.process_ie_result( entry, download=download, extra_info=extra_info) @@ -1842,15 +1940,21 @@ class YoutubeDL(object): '^=': lambda attr, value: attr.startswith(value), '$=': lambda attr, value: attr.endswith(value), '*=': lambda attr, value: value in attr, + '~=': lambda attr, value: value.search(attr) is not None } str_operator_rex = re.compile(r'''(?x)\s* (?P[a-zA-Z0-9._-]+)\s* - (?P!\s*)?(?P%s)(?P\s*\?)?\s* - (?P[a-zA-Z0-9._-]+)\s* + (?P!\s*)?(?P%s)\s*(?P\?\s*)? + (?P["'])? + (?P(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+)) + (?(quote)(?P=quote))\s* ''' % '|'.join(map(re.escape, STR_OPERATORS.keys()))) m = str_operator_rex.fullmatch(filter_spec) if m: - comparison_value = m.group('value') + if m.group('op') == '~=': + comparison_value = re.compile(m.group('value')) + else: + comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value')) str_op = STR_OPERATORS[m.group('op')] if m.group('negation'): op = lambda attr, value: not str_op(attr, value) @@ -1877,7 +1981,7 @@ class YoutubeDL(object): temp_file.close() try: success, _ = self.dl(temp_file.name, f, test=True) - except (DownloadError, IOError, OSError, ValueError) + network_exceptions: + except (DownloadError, OSError, ValueError) + network_exceptions: success = False finally: if os.path.exists(temp_file.name): @@ -1901,12 +2005,12 @@ class YoutubeDL(object): and download and ( not can_merge() - or info_dict.get('is_live', False) - or self.outtmpl_dict['default'] == '-')) + or info_dict.get('is_live') and not self.params.get('live_from_start') + or self.params['outtmpl']['default'] == '-')) compat = ( prefer_best or self.params.get('allow_multiple_audio_streams', False) - or 'format-spec' in self.params.get('compat_opts', [])) + or 'format-spec' in self.params['compat_opts']) return ( 'best/bestvideo+bestaudio' if prefer_best @@ -1917,7 +2021,7 @@ class YoutubeDL(object): def syntax_error(note, start): message = ( 'Invalid format specification: ' - '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1])) + '{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1])) return SyntaxError(message) PICKFIRST = 'PICKFIRST' @@ -1940,8 +2044,8 @@ class YoutubeDL(object): filter_parts.append(string) def _remove_unused_ops(tokens): - # Remove operators that we don't use and join them with the surrounding strings - # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' + # Remove operators that we don't use and join them with the surrounding strings. + # E.g. 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9' ALLOWED_OPS = ('/', '+', ',', '(', ')') last_string, last_start, last_end, last_line = None, None, None, None for type, string, start, end, line in tokens: @@ -2021,7 +2125,7 @@ class YoutubeDL(object): raise syntax_error('Expected a selector', start) current_selector = FormatSelector(MERGE, (selector_1, selector_2), []) else: - raise syntax_error('Operator not recognized: "{0}"'.format(string), start) + raise syntax_error(f'Operator not recognized: "{string}"', start) elif type == tokenize.ENDMARKER: break if current_selector: @@ -2057,14 +2161,13 @@ class YoutubeDL(object): the_only_video = video_fmts[0] if len(video_fmts) == 1 else None the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None - output_ext = self.params.get('merge_output_format') - if not output_ext: - if the_only_video: - output_ext = the_only_video['ext'] - elif the_only_audio and not video_fmts: - output_ext = the_only_audio['ext'] - else: - output_ext = 'mkv' + output_ext = get_compatible_ext( + vcodecs=[f.get('vcodec') for f in video_fmts], + acodecs=[f.get('acodec') for f in audio_fmts], + vexts=[f['ext'] for f in video_fmts], + aexts=[f['ext'] for f in audio_fmts], + preferences=(try_call(lambda: self.params['merge_output_format'].split('/')) + or self.params.get('prefer_free_formats') and ('webm', 'mkv'))) filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info)) @@ -2090,6 +2193,7 @@ class YoutubeDL(object): 'vcodec': the_only_video.get('vcodec'), 'vbr': the_only_video.get('vbr'), 'stretched_ratio': the_only_video.get('stretched_ratio'), + 'aspect_ratio': the_only_video.get('aspect_ratio'), }) if the_only_audio: @@ -2097,6 +2201,7 @@ class YoutubeDL(object): 'acodec': the_only_audio.get('acodec'), 'abr': the_only_audio.get('abr'), 'asr': the_only_audio.get('asr'), + 'audio_channels': the_only_audio.get('audio_channels') }) return new_dict @@ -2145,7 +2250,8 @@ class YoutubeDL(object): yield from _check_formats(ctx['formats'][::-1]) elif format_spec == 'mergeall': def selector_function(ctx): - formats = list(_check_formats(ctx['formats'])) + formats = list(_check_formats( + f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none')) if not formats: return merged_format = formats[-1] @@ -2154,7 +2260,7 @@ class YoutubeDL(object): yield merged_format else: - format_fallback, format_reverse, format_idx = False, True, 1 + format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1 mobj = re.match( r'(?Pbest|worst|b|w)(?Pvideo|audio|v|a)?(?P\*)?(?:\.(?P[1-9]\d*))?$', format_spec) @@ -2181,6 +2287,7 @@ class YoutubeDL(object): filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' elif format_spec in self._format_selection_exts['video']: filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none' + seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none' elif format_spec in self._format_selection_exts['storyboards']: filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none' else: @@ -2189,15 +2296,19 @@ class YoutubeDL(object): def selector_function(ctx): formats = list(ctx['formats']) matches = list(filter(filter_f, formats)) if filter_f is not None else formats - if format_fallback and ctx['incomplete_formats'] and not matches: - # for extractors with incomplete formats (audio only (soundcloud) - # or video only (imgur)) best/worst will fallback to - # best/worst {video,audio}-only format - matches = formats + if not matches: + if format_fallback and ctx['incomplete_formats']: + # for extractors with incomplete formats (audio only (soundcloud) + # or video only (imgur)) best/worst will fallback to + # best/worst {video,audio}-only format + matches = formats + elif seperate_fallback and not ctx['has_merged_format']: + # for compatibility with youtube-dl when there is no pre-merged format + matches = list(filter(seperate_fallback, formats)) matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1])) try: yield matches[format_idx - 1] - except IndexError: + except LazyList.IndexError: return filters = [self._build_format_filter(f) for f in selector.filters] @@ -2209,13 +2320,13 @@ class YoutubeDL(object): return selector_function(ctx_copy) return final_selector - stream = io.BytesIO(format_spec.encode('utf-8')) + stream = io.BytesIO(format_spec.encode()) try: - tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline))) + tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline))) except tokenize.TokenError: raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec))) - class TokenIterator(object): + class TokenIterator: def __init__(self, tokens): self.tokens = tokens self.counter = 0 @@ -2239,10 +2350,9 @@ class YoutubeDL(object): return _build_selector_function(parsed_selector) def _calc_headers(self, info_dict): - res = std_headers.copy() - res.update(info_dict.get('http_headers') or {}) + res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {}) - cookies = self._calc_cookies(info_dict) + cookies = self._calc_cookies(info_dict['url']) if cookies: res['Cookie'] = cookies @@ -2253,8 +2363,8 @@ class YoutubeDL(object): return res - def _calc_cookies(self, info_dict): - pr = sanitized_Request(info_dict['url']) + def _calc_cookies(self, url): + pr = sanitized_Request(url) self.cookiejar.add_cookie_header(pr) return pr.get_header('Cookie') @@ -2298,6 +2408,67 @@ class YoutubeDL(object): else: info_dict['thumbnails'] = thumbnails + def _fill_common_fields(self, info_dict, final=True): + # TODO: move sanitization here + if final: + title = info_dict['fulltitle'] = info_dict.get('title') + if not title: + if title == '': + self.write_debug('Extractor gave empty title. Creating a generic title') + else: + self.report_warning('Extractor failed to obtain "title". Creating a generic title instead') + info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}' + + if info_dict.get('duration') is not None: + info_dict['duration_string'] = formatSeconds(info_dict['duration']) + + for ts_key, date_key in ( + ('timestamp', 'upload_date'), + ('release_timestamp', 'release_date'), + ('modified_timestamp', 'modified_date'), + ): + if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None: + # Working around out-of-range timestamp values (e.g. negative ones on Windows, + # see http://bugs.python.org/issue1646728) + with contextlib.suppress(ValueError, OverflowError, OSError): + upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key]) + info_dict[date_key] = upload_date.strftime('%Y%m%d') + + live_keys = ('is_live', 'was_live') + live_status = info_dict.get('live_status') + if live_status is None: + for key in live_keys: + if info_dict.get(key) is False: + continue + if info_dict.get(key): + live_status = key + break + if all(info_dict.get(key) is False for key in live_keys): + live_status = 'not_live' + if live_status: + info_dict['live_status'] = live_status + for key in live_keys: + if info_dict.get(key) is None: + info_dict[key] = (live_status == key) + if live_status == 'post_live': + info_dict['was_live'] = True + + # Auto generate title fields corresponding to the *_number fields when missing + # in order to always have clean titles. This is very common for TV series. + for field in ('chapter', 'season', 'episode'): + if final and info_dict.get('%s_number' % field) is not None and not info_dict.get(field): + info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) + + def _raise_pending_errors(self, info): + err = info.pop('__pending_error', None) + if err: + self.report_error(err, tb=False) + + def sort_formats(self, info_dict): + formats = self._get_formats(info_dict) + formats.sort(key=FormatSorter( + self, info_dict.get('_format_sort_fields') or []).calculate_preference) + def process_video_result(self, info_dict, download=True): assert info_dict.get('_type', 'video') == 'video' self._num_videos += 1 @@ -2307,14 +2478,6 @@ class YoutubeDL(object): elif not info_dict.get('id'): raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor']) - info_dict['fulltitle'] = info_dict.get('title') - if 'title' not in info_dict: - raise ExtractorError('Missing "title" field in extractor result', - video_id=info_dict['id'], ie=info_dict['extractor']) - elif not info_dict.get('title'): - self.report_warning('Extractor failed to obtain "title". Creating a generic title instead') - info_dict['title'] = f'{info_dict["extractor"]} video #{info_dict["id"]}' - def report_force_conversion(field, field_not, conversion): self.report_warning( '"%s" field is not %s - forcing %s conversion, there is an error in extractor' @@ -2322,21 +2485,39 @@ class YoutubeDL(object): def sanitize_string_field(info, string_field): field = info.get(string_field) - if field is None or isinstance(field, compat_str): + if field is None or isinstance(field, str): return report_force_conversion(string_field, 'a string', 'string') - info[string_field] = compat_str(field) + info[string_field] = str(field) def sanitize_numeric_fields(info): for numeric_field in self._NUMERIC_FIELDS: field = info.get(numeric_field) - if field is None or isinstance(field, compat_numeric_types): + if field is None or isinstance(field, (int, float)): continue report_force_conversion(numeric_field, 'numeric', 'int') info[numeric_field] = int_or_none(field) sanitize_string_field(info_dict, 'id') sanitize_numeric_fields(info_dict) + if info_dict.get('section_end') and info_dict.get('section_start') is not None: + info_dict['duration'] = round(info_dict['section_end'] - info_dict['section_start'], 3) + if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None): + self.report_warning('"duration" field is negative, there is an error in extractor') + + chapters = info_dict.get('chapters') or [] + if chapters and chapters[0].get('start_time'): + chapters.insert(0, {'start_time': 0}) + + dummy_chapter = {'end_time': 0, 'start_time': info_dict.get('duration')} + for idx, (prev, current, next_) in enumerate(zip( + (dummy_chapter, *chapters), chapters, (*chapters[1:], dummy_chapter)), 1): + if current.get('start_time') is None: + current['start_time'] = prev.get('end_time') + if not current.get('end_time'): + current['end_time'] = next_.get('start_time') + if not current.get('title'): + current['title'] = f'' if 'playlist' not in info_dict: # It isn't part of a playlist @@ -2355,45 +2536,7 @@ class YoutubeDL(object): if info_dict.get('display_id') is None and 'id' in info_dict: info_dict['display_id'] = info_dict['id'] - if info_dict.get('duration') is not None: - info_dict['duration_string'] = formatSeconds(info_dict['duration']) - - for ts_key, date_key in ( - ('timestamp', 'upload_date'), - ('release_timestamp', 'release_date'), - ('modified_timestamp', 'modified_date'), - ): - if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None: - # Working around out-of-range timestamp values (e.g. negative ones on Windows, - # see http://bugs.python.org/issue1646728) - try: - upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key]) - info_dict[date_key] = upload_date.strftime('%Y%m%d') - except (ValueError, OverflowError, OSError): - pass - - live_keys = ('is_live', 'was_live') - live_status = info_dict.get('live_status') - if live_status is None: - for key in live_keys: - if info_dict.get(key) is False: - continue - if info_dict.get(key): - live_status = key - break - if all(info_dict.get(key) is False for key in live_keys): - live_status = 'not_live' - if live_status: - info_dict['live_status'] = live_status - for key in live_keys: - if info_dict.get(key) is None: - info_dict[key] = (live_status == key) - - # Auto generate title fields corresponding to the *_number fields when missing - # in order to always have clean titles. This is very common for TV series. - for field in ('chapter', 'season', 'episode'): - if info_dict.get('%s_number' % field) is not None and not info_dict.get(field): - info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field]) + self._fill_common_fields(info_dict) for cc_kind in ('subtitles', 'automatic_captions'): cc = info_dict.get(cc_kind) @@ -2411,24 +2554,32 @@ class YoutubeDL(object): info_dict['requested_subtitles'] = self.process_subtitles( info_dict['id'], subtitles, automatic_captions) - if info_dict.get('formats') is None: - # There's only one format available - formats = [info_dict] - else: - formats = info_dict['formats'] + formats = self._get_formats(info_dict) - info_dict['__has_drm'] = any(f.get('has_drm') for f in formats) + # Backward compatibility with InfoExtractor._sort_formats + field_preference = (formats or [{}])[0].pop('__sort_fields', None) + if field_preference: + info_dict['_format_sort_fields'] = field_preference + + # or None ensures --clean-infojson removes it + info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None if not self.params.get('allow_unplayable_formats'): formats = [f for f in formats if not f.get('has_drm')] - if info_dict.get('is_live'): - get_from_start = bool(self.params.get('live_from_start')) - formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start] - if not get_from_start: - info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + if formats and all(f.get('acodec') == f.get('vcodec') == 'none' for f in formats): + self.report_warning( + f'{"This video is DRM protected and " if info_dict["_has_drm"] else ""}' + 'only images are available for download. Use --list-formats to see them'.capitalize()) - if not formats: - self.raise_no_formats(info_dict) + get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start')) + if not get_from_start: + info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + if info_dict.get('is_live') and formats: + formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start] + if get_from_start and not formats: + self.raise_no_formats(info_dict, msg=( + '--live-from-start is passed, but there are no formats that can be downloaded from the start. ' + 'If you want to download from the current time, use --no-live-from-start')) def is_wellformed(f): url = f.get('url') @@ -2442,24 +2593,48 @@ class YoutubeDL(object): return True # Filter out malformed formats for better extraction robustness - formats = list(filter(is_wellformed, formats)) + formats = list(filter(is_wellformed, formats or [])) - formats_dict = {} + if not formats: + self.raise_no_formats(info_dict) - # We check that all the formats have the format and format_id fields - for i, format in enumerate(formats): + for format in formats: sanitize_string_field(format, 'format_id') sanitize_numeric_fields(format) format['url'] = sanitize_url(format['url']) + if format.get('ext') is None: + format['ext'] = determine_ext(format['url']).lower() + if format.get('protocol') is None: + format['protocol'] = determine_protocol(format) + if format.get('resolution') is None: + format['resolution'] = self.format_resolution(format, default=None) + if format.get('dynamic_range') is None and format.get('vcodec') != 'none': + format['dynamic_range'] = 'SDR' + if format.get('aspect_ratio') is None: + format['aspect_ratio'] = try_call(lambda: round(format['width'] / format['height'], 2)) + if (info_dict.get('duration') and format.get('tbr') + and not format.get('filesize') and not format.get('filesize_approx')): + format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8)) + format['http_headers'] = self._calc_headers(collections.ChainMap(format, info_dict)) + + # This is copied to http_headers by the above _calc_headers and can now be removed + if '__x_forwarded_for_ip' in info_dict: + del info_dict['__x_forwarded_for_ip'] + + self.sort_formats({ + 'formats': formats, + '_format_sort_fields': info_dict.get('_format_sort_fields') + }) + + # Sanitize and group by format_id + formats_dict = {} + for i, format in enumerate(formats): if not format.get('format_id'): - format['format_id'] = compat_str(i) + format['format_id'] = str(i) else: # Sanitize format_id from characters used in format selector expression format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) - format_id = format['format_id'] - if format_id not in formats_dict: - formats_dict[format_id] = [] - formats_dict[format_id].append(format) + formats_dict.setdefault(format['format_id'], []).append(format) # Make sure all formats have unique format_id common_exts = set(itertools.chain(*self._format_selection_exts.values())) @@ -2468,40 +2643,17 @@ class YoutubeDL(object): for i, format in enumerate(ambiguous_formats): if ambigious_id: format['format_id'] = '%s-%d' % (format_id, i) - if format.get('ext') is None: - format['ext'] = determine_ext(format['url']).lower() # Ensure there is no conflict between id and ext in format selection # See https://github.com/yt-dlp/yt-dlp/issues/1282 if format['format_id'] != format['ext'] and format['format_id'] in common_exts: format['format_id'] = 'f%s' % format['format_id'] - for i, format in enumerate(formats): - if format.get('format') is None: - format['format'] = '{id} - {res}{note}'.format( - id=format['format_id'], - res=self.format_resolution(format), - note=format_field(format, 'format_note', ' (%s)'), - ) - if format.get('protocol') is None: - format['protocol'] = determine_protocol(format) - if format.get('resolution') is None: - format['resolution'] = self.format_resolution(format, default=None) - if format.get('dynamic_range') is None and format.get('vcodec') != 'none': - format['dynamic_range'] = 'SDR' - if (info_dict.get('duration') and format.get('tbr') - and not format.get('filesize') and not format.get('filesize_approx')): - format['filesize_approx'] = info_dict['duration'] * format['tbr'] * (1024 / 8) - - # Add HTTP headers, so that external programs can use them from the - # json output - full_format_info = info_dict.copy() - full_format_info.update(format) - format['http_headers'] = self._calc_headers(full_format_info) - # Remove private housekeeping stuff - if '__x_forwarded_for_ip' in info_dict: - del info_dict['__x_forwarded_for_ip'] - - # TODO Central sorting goes here + if format.get('format') is None: + format['format'] = '{id} - {res}{note}'.format( + id=format['format_id'], + res=self.format_resolution(format), + note=format_field(format, 'format_note', ' (%s)'), + ) if self.params.get('check_formats') is True: formats = LazyList(self._check_formats(formats[::-1]), reverse=True) @@ -2515,11 +2667,16 @@ class YoutubeDL(object): info_dict, _ = self.pre_process(info_dict) - # The pre-processors may have modified the formats - formats = info_dict.get('formats', [info_dict]) + if self._match_entry(info_dict, incomplete=self._format_fields) is not None: + return info_dict - list_only = self.params.get('simulate') is None and ( - self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles')) + self.post_extract(info_dict) + info_dict, _ = self.pre_process(info_dict, 'after_filter') + + # The pre-processors may have modified the formats + formats = self._get_formats(info_dict) + + list_only = self.params.get('simulate') == 'list_only' interactive_format_selection = not list_only and self.format_selector == '-' if self.params.get('list_thumbnails'): self.list_thumbnails(info_dict) @@ -2533,7 +2690,7 @@ class YoutubeDL(object): if list_only: # Without this printing, -F --print-json will not work self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True) - return + return info_dict format_selector = self.format_selector if format_selector is None: @@ -2551,33 +2708,15 @@ class YoutubeDL(object): self.report_error(err, tb=False, is_error=False) continue - # While in format selection we may need to have an access to the original - # format set in order to calculate some metrics or do some processing. - # For now we need to be able to guess whether original formats provided - # by extractor are incomplete or not (i.e. whether extractor provides only - # video-only or audio-only formats) for proper formats selection for - # extractors with such incomplete formats (see - # https://github.com/ytdl-org/youtube-dl/pull/5556). - # Since formats may be filtered during format selection and may not match - # the original formats the results may be incorrect. Thus original formats - # or pre-calculated metrics should be passed to format selection routines - # as well. - # We will pass a context object containing all necessary additional data - # instead of just formats. - # This fixes incorrect format selection issue (see - # https://github.com/ytdl-org/youtube-dl/issues/10083). - incomplete_formats = ( - # All formats are video-only or - all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) - # all formats are audio-only - or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)) - - ctx = { + formats_to_download = list(format_selector({ 'formats': formats, - 'incomplete_formats': incomplete_formats, - } - - formats_to_download = list(format_selector(ctx)) + 'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats), + 'incomplete_formats': ( + # All formats are video-only or + all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) + # all formats are audio-only + or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)), + })) if interactive_format_selection and not formats_to_download: self.report_error('Requested format is not available', tb=False, is_error=False) continue @@ -2585,29 +2724,46 @@ class YoutubeDL(object): if not formats_to_download: if not self.params.get('ignore_no_formats_error'): - raise ExtractorError('Requested format is not available', expected=True, - video_id=info_dict['id'], ie=info_dict['extractor']) + raise ExtractorError( + 'Requested format is not available. Use --list-formats for a list of available formats', + expected=True, video_id=info_dict['id'], ie=info_dict['extractor']) self.report_warning('Requested format is not available') # Process what we can, even without any available formats. formats_to_download = [{}] - best_format = formats_to_download[-1] + requested_ranges = tuple(self.params.get('download_ranges', lambda *_: [{}])(info_dict, self)) + best_format, downloaded_formats = formats_to_download[-1], [] if download: - if best_format: - self.to_screen( - f'[info] {info_dict["id"]}: Downloading {len(formats_to_download)} format(s): ' - + ', '.join([f['format_id'] for f in formats_to_download])) + if best_format and requested_ranges: + def to_screen(*msg): + self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}') + + to_screen(f'Downloading {len(formats_to_download)} format(s):', + (f['format_id'] for f in formats_to_download)) + if requested_ranges != ({}, ): + to_screen(f'Downloading {len(requested_ranges)} time ranges:', + (f'{c["start_time"]:.1f}-{c["end_time"]:.1f}' for c in requested_ranges)) max_downloads_reached = False - for i, fmt in enumerate(formats_to_download): - formats_to_download[i] = new_info = dict(info_dict) - # Save a reference to the original info_dict so that it can be modified in process_info if needed + + for fmt, chapter in itertools.product(formats_to_download, requested_ranges): + new_info = self._copy_infodict(info_dict) new_info.update(fmt) - new_info['__original_infodict'] = info_dict + offset, duration = info_dict.get('section_start') or 0, info_dict.get('duration') or float('inf') + end_time = offset + min(chapter.get('end_time', duration), duration) + if chapter or offset: + new_info.update({ + 'section_start': offset + chapter.get('start_time', 0), + # duration may not be accurate. So allow deviations <1sec + 'section_end': end_time if end_time <= offset + duration + 1 else None, + 'section_title': chapter.get('title'), + 'section_number': chapter.get('index'), + }) + downloaded_formats.append(new_info) try: self.process_info(new_info) except MaxDownloadsReached: max_downloads_reached = True - new_info.pop('__original_infodict') + self._raise_pending_errors(new_info) # Remove copied info for key, val in tuple(new_info.items()): if info_dict.get(key) == val: @@ -2615,12 +2771,12 @@ class YoutubeDL(object): if max_downloads_reached: break - write_archive = set(f.get('__write_download_archive', False) for f in formats_to_download) + write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats} assert write_archive.issubset({True, False, 'ignore'}) if True in write_archive and False not in write_archive: self.record_download_archive(info_dict) - info_dict['requested_downloads'] = formats_to_download + info_dict['requested_downloads'] = downloaded_formats info_dict = self.run_all_pps('after_video', info_dict) if max_downloads_reached: raise MaxDownloadsReached() @@ -2631,47 +2787,35 @@ class YoutubeDL(object): def process_subtitles(self, video_id, normal_subtitles, automatic_captions): """Select the requested subtitles and their format""" - available_subs = {} + available_subs, normal_sub_langs = {}, [] if normal_subtitles and self.params.get('writesubtitles'): available_subs.update(normal_subtitles) + normal_sub_langs = tuple(normal_subtitles.keys()) if automatic_captions and self.params.get('writeautomaticsub'): for lang, cap_info in automatic_captions.items(): if lang not in available_subs: available_subs[lang] = cap_info - if (not self.params.get('writesubtitles') and not - self.params.get('writeautomaticsub') or not - available_subs): + if not available_subs or ( + not self.params.get('writesubtitles') + and not self.params.get('writeautomaticsub')): return None - all_sub_langs = available_subs.keys() + all_sub_langs = tuple(available_subs.keys()) if self.params.get('allsubtitles', False): requested_langs = all_sub_langs elif self.params.get('subtitleslangs', False): - # A list is used so that the order of languages will be the same as - # given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041 - requested_langs = [] - for lang_re in self.params.get('subtitleslangs'): - if lang_re == 'all': - requested_langs.extend(all_sub_langs) - continue - discard = lang_re[0] == '-' - if discard: - lang_re = lang_re[1:] - current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs) - if discard: - for lang in current_langs: - while lang in requested_langs: - requested_langs.remove(lang) - else: - requested_langs.extend(current_langs) - requested_langs = orderedSet(requested_langs) - elif 'en' in available_subs: - requested_langs = ['en'] + try: + requested_langs = orderedSet_from_options( + self.params.get('subtitleslangs'), {'all': all_sub_langs}, use_regex=True) + except re.error as e: + raise ValueError(f'Wrong regex for subtitlelangs: {e.pattern}') + elif normal_sub_langs: + requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1] else: - requested_langs = [list(all_sub_langs)[0]] + requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1] if requested_langs: - self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs)) + self.to_screen(f'[info] {video_id}: Downloading subtitles: {", ".join(requested_langs)}') formats_query = self.params.get('subtitlesformat', 'best') formats_preference = formats_query.split('/') if formats_query else [] @@ -2679,7 +2823,7 @@ class YoutubeDL(object): for lang in requested_langs: formats = available_subs.get(lang) if formats is None: - self.report_warning('%s subtitles not available for %s' % (lang, video_id)) + self.report_warning(f'{lang} subtitles not available for {video_id}') continue for ext in formats_preference: if ext == 'best': @@ -2707,22 +2851,27 @@ class YoutubeDL(object): info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions')) def format_tmpl(tmpl): - mobj = re.match(r'\w+(=?)$', tmpl) - if mobj and mobj.group(1): - return f'{tmpl[:-1]} = %({tmpl[:-1]})r' - elif mobj: - return f'%({tmpl})s' - return tmpl + mobj = re.fullmatch(r'([\w.:,]|-\d|(?P{([\w.:,]|-\d)+}))+=?', tmpl) + if not mobj: + return tmpl + + fmt = '%({})s' + if tmpl.startswith('{'): + tmpl = f'.{tmpl}' + if tmpl.endswith('='): + tmpl, fmt = tmpl[:-1], '{0} = %({0})#j' + return '\n'.join(map(fmt.format, [tmpl] if mobj.group('dict') else tmpl.split(','))) for tmpl in self.params['forceprint'].get(key, []): self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy)) for tmpl, file_tmpl in self.params['print_to_file'].get(key, []): - filename = self.evaluate_outtmpl(file_tmpl, info_dict) + filename = self.prepare_filename(info_dict, outtmpl=file_tmpl) tmpl = format_tmpl(tmpl) self.to_screen(f'[info] Writing {tmpl!r} to: {filename}') - with io.open(filename, 'a', encoding='utf-8') as f: - f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n') + if self._ensure_dir_exists(filename): + with open(filename, 'a', encoding='utf-8') as f: + f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n') def __forced_printings(self, info_dict, filename, incomplete): def print_mandatory(field, actual_field=None): @@ -2743,7 +2892,7 @@ class YoutubeDL(object): if info_dict.get('requested_formats') is not None: # For RTMP URLs, also include the playpath info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats']) - elif 'url' in info_dict: + elif info_dict.get('url'): info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '') if (self.params.get('forcejson') @@ -2791,7 +2940,7 @@ class YoutubeDL(object): urls = '", "'.join( (f['url'].split(',')[0] + ',' if f['url'].startswith('data:') else f['url']) for f in info.get('requested_formats', []) or [info]) - self.write_debug('Invoking downloader on "%s"' % urls) + self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"') # Note: Ideally info should be a deep-copied so that hooks cannot modify it. # But it may contain objects that are not deep-copyable @@ -2811,7 +2960,7 @@ class YoutubeDL(object): return None def process_info(self, info_dict): - """Process a single resolved IE result. (Modified it in-place)""" + """Process a single resolved IE result. (Modifies it in-place)""" assert info_dict.get('_type', 'video') == 'video' original_infodict = info_dict @@ -2823,7 +2972,18 @@ class YoutubeDL(object): info_dict['__write_download_archive'] = 'ignore' return + # Does nothing under normal operation - for backward compatibility of process_info self.post_extract(info_dict) + + def replace_info_dict(new_info): + nonlocal info_dict + if new_info == info_dict: + return + info_dict.clear() + info_dict.update(new_info) + + new_info, _ = self.pre_process(info_dict, 'video') + replace_info_dict(new_info) self._num_downloads += 1 # info_dict['_filename'] needs to be set for backward compatibility @@ -2834,8 +2994,13 @@ class YoutubeDL(object): # Forced printings self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict)) + def check_max_downloads(): + if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'): + raise MaxDownloadsReached() + if self.params.get('simulate'): info_dict['__write_download_archive'] = self.params.get('force_write_download_archive') + check_max_downloads() return if full_filename is None: @@ -2883,19 +3048,21 @@ class YoutubeDL(object): else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) - with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: + with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') - except (OSError, IOError): + except OSError: self.report_error('Cannot write annotations file: ' + annofn) return # Write internet shortcut files def _write_link_file(link_type): - if 'webpage_url' not in info_dict: - self.report_error('Cannot write internet shortcut file because the "webpage_url" field is missing in the media information') - return False + url = try_get(info_dict['webpage_url'], iri_to_uri) + if not url: + self.report_warning( + f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown') + return True linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext')) if not self._ensure_dir_exists(encodeFilename(linkfn)): return False @@ -2904,13 +3071,13 @@ class YoutubeDL(object): return True try: self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}') - with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', - newline='\r\n' if link_type == 'url' else '\n') as linkfile: - template_vars = {'url': iri_to_uri(info_dict['webpage_url'])} + with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', + newline='\r\n' if link_type == 'url' else '\n') as linkfile: + template_vars = {'url': url} if link_type == 'desktop': template_vars['filename'] = linkfn[:-(len(link_type) + 1)] linkfile.write(LINK_TEMPLATES[link_type] % template_vars) - except (OSError, IOError): + except OSError: self.report_error(f'Cannot write internet shortcut {linkfn}') return False return True @@ -2930,19 +3097,8 @@ class YoutubeDL(object): for link_type, should_write in write_links.items()): return - def replace_info_dict(new_info): - nonlocal info_dict - if new_info == info_dict: - return - info_dict.clear() - info_dict.update(new_info) - - try: - new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move) - replace_info_dict(new_info) - except PostProcessingError as err: - self.report_error('Preprocessing: %s' % str(err)) - return + new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move) + replace_info_dict(new_info) if self.params.get('skip_download'): info_dict['filepath'] = temp_filename @@ -2964,40 +3120,25 @@ class YoutubeDL(object): info_dict['ext'] = os.path.splitext(file)[1][1:] return file - success = True + fd, success = None, True + if info_dict.get('protocol') or info_dict.get('url'): + fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-') + if fd is not FFmpegFD and 'no-direct-merge' not in self.params['compat_opts'] and ( + info_dict.get('section_start') or info_dict.get('section_end')): + msg = ('This format cannot be partially downloaded' if FFmpegFD.available() + else 'You have requested downloading the video partially, but ffmpeg is not installed') + self.report_error(f'{msg}. Aborting') + return + if info_dict.get('requested_formats') is not None: - - def compatible_formats(formats): - # TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them. - video_formats = [format for format in formats if format.get('vcodec') != 'none'] - audio_formats = [format for format in formats if format.get('acodec') != 'none'] - if len(video_formats) > 2 or len(audio_formats) > 2: - return False - - # Check extension - exts = set(format.get('ext') for format in formats) - COMPATIBLE_EXTS = ( - set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')), - set(('webm',)), - ) - for ext_sets in COMPATIBLE_EXTS: - if ext_sets.issuperset(exts): - return True - # TODO: Check acodec/vcodec - return False - requested_formats = info_dict['requested_formats'] old_ext = info_dict['ext'] if self.params.get('merge_output_format') is None: - if not compatible_formats(requested_formats): - info_dict['ext'] = 'mkv' - self.report_warning( - 'Requested formats are incompatible for merge and will be merged into mkv') if (info_dict['ext'] == 'webm' and info_dict.get('thumbnails') # check with type instead of pp_key, __name__, or isinstance # since we dont want any custom PPs to trigger this - and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): + and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721 info_dict['ext'] = 'mkv' self.report_warning( 'webm doesn\'t support embedding a thumbnail, mkv will be used') @@ -3011,7 +3152,7 @@ class YoutubeDL(object): os.path.splitext(filename)[0] if filename_real_ext in (old_ext, new_ext) else filename) - return '%s.%s' % (filename_wo_ext, ext) + return f'{filename_wo_ext}.{ext}' # Ensure filename always has a correct extension for successful merge full_filename = correct_ext(full_filename) @@ -3019,10 +3160,8 @@ class YoutubeDL(object): dl_filename = existing_video_file(full_filename, temp_filename) info_dict['__real_download'] = False - downloaded = [] merger = FFmpegMergerPP(self) - - fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-') + downloaded = [] if dl_filename is not None: self.report_file_already_downloaded(dl_filename) elif fd: @@ -3041,9 +3180,11 @@ class YoutubeDL(object): 'while also allowing unplayable formats to be downloaded. ' 'The formats won\'t be merged to prevent data corruption.') elif not merger.available: - self.report_warning( - 'You have requested merging of multiple formats but ffmpeg is not installed. ' - 'The formats won\'t be merged.') + msg = 'You have requested merging of multiple formats but ffmpeg is not installed' + if not self.params.get('ignoreerrors'): + self.report_error(f'{msg}. Aborting due to --abort-on-error') + return + self.report_warning(f'{msg}. The formats won\'t be merged') if temp_filename == '-': reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params) @@ -3094,12 +3235,13 @@ class YoutubeDL(object): except network_exceptions as err: self.report_error('unable to download video data: %s' % error_to_compat_str(err)) return - except (OSError, IOError) as err: + except OSError as err: raise UnavailableVideoError(err) except (ContentTooShortError, ) as err: - self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded)) + self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})') return + self._raise_pending_errors(info_dict) if success and full_filename != '-': def fixup(): @@ -3110,16 +3252,16 @@ class YoutubeDL(object): if fixup_policy in ('ignore', 'never'): return elif fixup_policy == 'warn': - do_fixup = False + do_fixup = 'warn' elif fixup_policy != 'force': assert fixup_policy in ('detect_or_warn', None) if not info_dict.get('__real_download'): do_fixup = False def ffmpeg_fixup(cndn, msg, cls): - if not cndn: + if not (do_fixup and cndn): return - if not do_fixup: + elif do_fixup == 'warn': self.report_warning(f'{vid}: {msg}') return pp = cls(self) @@ -3129,30 +3271,32 @@ class YoutubeDL(object): self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically') stretched_ratio = info_dict.get('stretched_ratio') - ffmpeg_fixup( - stretched_ratio not in (1, None), - f'Non-uniform pixel ratio {stretched_ratio}', - FFmpegFixupStretchedPP) - - ffmpeg_fixup( - (info_dict.get('requested_formats') is None - and info_dict.get('container') == 'm4a_dash' - and info_dict.get('ext') == 'm4a'), - 'writing DASH m4a. Only some players support this container', - FFmpegFixupM4aPP) + ffmpeg_fixup(stretched_ratio not in (1, None), + f'Non-uniform pixel ratio {stretched_ratio}', + FFmpegFixupStretchedPP) downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None - downloader = downloader.__name__ if downloader else None + downloader = downloader.FD_NAME if downloader else None - if info_dict.get('requested_formats') is None: # Not necessary if doing merger - ffmpeg_fixup(downloader == 'HlsFD', + ext = info_dict.get('ext') + postprocessed_by_ffmpeg = info_dict.get('requested_formats') or any(( + isinstance(pp, FFmpegVideoConvertorPP) + and resolve_recode_mapping(ext, pp.mapping)[0] not in (ext, None) + ) for pp in self._pps['post_process']) + + if not postprocessed_by_ffmpeg: + ffmpeg_fixup(ext == 'm4a' and info_dict.get('container') == 'm4a_dash', + 'writing DASH m4a. Only some players support this container', + FFmpegFixupM4aPP) + ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts') + or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None, 'Possible MPEG-TS in MP4 container or malformed AAC timestamps', FFmpegFixupM3u8PP) ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD', 'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP) - ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'Malformed timestamps detected', FFmpegFixupTimestampPP) - ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'Malformed duration detected', FFmpegFixupDurationPP) + ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP) + ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP) fixup() try: @@ -3168,15 +3312,10 @@ class YoutubeDL(object): return info_dict['__write_download_archive'] = True + assert info_dict is original_infodict # Make sure the info_dict was modified in-place if self.params.get('force_write_download_archive'): info_dict['__write_download_archive'] = True - - # Make sure the info_dict was modified in-place - assert info_dict is original_infodict - - max_downloads = self.params.get('max_downloads') - if max_downloads is not None and self._num_downloads >= int(max_downloads): - raise MaxDownloadsReached() + check_max_downloads() def __download_wrapper(self, func): @functools.wraps(func) @@ -3185,13 +3324,11 @@ class YoutubeDL(object): res = func(*args, **kwargs) except UnavailableVideoError as e: self.report_error(e) - except MaxDownloadsReached as e: - self.to_screen(f'[info] {e}') - raise except DownloadCancelled as e: self.to_screen(f'[info] {e}') if not self.params.get('break_per_url'): raise + self._num_downloads = 0 else: if self.params.get('dump_single_json', False): self.post_extract(res) @@ -3201,7 +3338,7 @@ class YoutubeDL(object): def download(self, url_list): """Download a given list of URLs.""" url_list = variadic(url_list) # Passing a single URL is a common mistake - outtmpl = self.outtmpl_dict['default'] + outtmpl = self.params['outtmpl']['default'] if (len(url_list) > 1 and outtmpl != '-' and '%' not in outtmpl @@ -3240,17 +3377,21 @@ class YoutubeDL(object): return info_dict info_dict.setdefault('epoch', int(time.time())) info_dict.setdefault('_type', 'video') - remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict - keep_keys = ['_type'] # Always keep this to facilitate load-info-json + info_dict.setdefault('_version', { + 'version': __version__, + 'current_git_head': current_git_head(), + 'release_git_head': RELEASE_GIT_HEAD, + 'repository': REPOSITORY, + }) + if remove_private_keys: - remove_keys |= { + reject = lambda k, v: v is None or k.startswith('__') or k in { 'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries', - 'entries', 'filepath', 'infojson_filename', 'original_url', 'playlist_autonumber', + 'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber', + '_format_sort_fields', } - reject = lambda k, v: k not in keep_keys and ( - k.startswith('_') or k in remove_keys or v is None) else: - reject = lambda k, v: k in remove_keys + reject = lambda k, v: False def filter_fn(obj): if isinstance(obj, dict): @@ -3269,6 +3410,17 @@ class YoutubeDL(object): ''' Alias of sanitize_info for backward compatibility ''' return YoutubeDL.sanitize_info(info_dict, actually_filter) + def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None): + for filename in set(filter(None, files_to_delete)): + if msg: + self.to_screen(msg % filename) + try: + os.remove(filename) + except OSError: + self.report_warning(f'Unable to delete file {filename}') + if filename in info.get('__files_to_move', []): # NB: Delete even if None + del info['__files_to_move'][filename] + @staticmethod def post_extract(info_dict): def actual_post_extract(info_dict): @@ -3277,14 +3429,8 @@ class YoutubeDL(object): actual_post_extract(video_dict or {}) return - post_extractor = info_dict.get('__post_extractor') or (lambda: {}) - extra = post_extractor().items() - info_dict.update(extra) - info_dict.pop('__post_extractor', None) - - original_infodict = info_dict.get('__original_infodict') or {} - original_infodict.update(extra) - original_infodict.pop('__post_extractor', None) + post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {}) + info_dict.update(post_extractor()) actual_post_extract(info_dict or {}) @@ -3307,18 +3453,13 @@ class YoutubeDL(object): for f in files_to_delete: infodict['__files_to_move'].setdefault(f, '') else: - for old_filename in set(files_to_delete): - self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename) - try: - os.remove(encodeFilename(old_filename)) - except (IOError, OSError): - self.report_warning('Unable to remove downloaded original file') - if old_filename in infodict['__files_to_move']: - del infodict['__files_to_move'][old_filename] + self._delete_downloaded_files( + *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)') return infodict def run_all_pps(self, key, info, *, additional_pps=None): - self._forceprint(key, info) + if key != 'video': + self._forceprint(key, info) for pp in (additional_pps or []) + self._pps[key]: info = self.run_pp(pp, info) return info @@ -3326,7 +3467,12 @@ class YoutubeDL(object): def pre_process(self, ie_info, key='pre_process', files_to_move=None): info = dict(ie_info) info['__files_to_move'] = files_to_move or {} - info = self.run_all_pps(key, info) + try: + info = self.run_all_pps(key, info) + except PostProcessingError as err: + msg = f'Preprocessing: {err}' + info.setdefault('__pending_error', msg) + self.report_error(msg, is_error=False) return info, info.pop('__files_to_move', None) def post_process(self, filename, info, files_to_move=None): @@ -3356,18 +3502,15 @@ class YoutubeDL(object): break else: return - return '%s %s' % (extractor.lower(), video_id) + return make_archive_id(extractor, video_id) def in_download_archive(self, info_dict): - fn = self.params.get('download_archive') - if fn is None: + if not self.archive: return False - vid_id = self._make_archive_id(info_dict) - if not vid_id: - return False # Incomplete video information - - return vid_id in self.archive + vid_ids = [self._make_archive_id(info_dict)] + vid_ids.extend(info_dict.get('_old_archive_ids') or []) + return any(id_ in self.archive for id_ in vid_ids) def record_download_archive(self, info_dict): fn = self.params.get('download_archive') @@ -3375,9 +3518,11 @@ class YoutubeDL(object): return vid_id = self._make_archive_id(info_dict) assert vid_id + self.write_debug(f'Adding to archive: {vid_id}') - with locked_file(fn, 'a', encoding='utf-8') as archive_file: - archive_file.write(vid_id + '\n') + if is_path_like(fn): + with locked_file(fn, 'a', encoding='utf-8') as archive_file: + archive_file.write(vid_id + '\n') self.archive.add(vid_id) @staticmethod @@ -3396,7 +3541,7 @@ class YoutubeDL(object): def _list_format_headers(self, *headers): if self.params.get('listformats_table', True) is not False: - return [self._format_screen(header, self.Styles.HEADERS) for header in headers] + return [self._format_out(header, self.Styles.HEADERS) for header in headers] return headers def _format_note(self, fdict): @@ -3459,11 +3604,17 @@ class YoutubeDL(object): res += '~' + format_bytes(fdict['filesize_approx']) return res - def render_formats_table(self, info_dict): - if not info_dict.get('formats') and not info_dict.get('url'): - return None + def _get_formats(self, info_dict): + if info_dict.get('formats') is None: + if info_dict.get('url') and info_dict.get('_type', 'video') == 'video': + return [info_dict] + return [] + return info_dict['formats'] - formats = info_dict.get('formats', [info_dict]) + def render_formats_table(self, info_dict): + formats = self._get_formats(info_dict) + if not formats: + return if not self.params.get('listformats_table', True) is not False: table = [ [ @@ -3471,33 +3622,45 @@ class YoutubeDL(object): format_field(f, 'ext'), self.format_resolution(f), self._format_note(f) - ] for f in formats if f.get('preference') is None or f['preference'] >= -1000] + ] for f in formats if (f.get('preference') or 0) >= -1000] return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1) - delim = self._format_screen('\u2502', self.Styles.DELIM, '|', test_encoding=True) + def simplified_codec(f, field): + assert field in ('acodec', 'vcodec') + codec = f.get(field, 'unknown') + if not codec: + return 'unknown' + elif codec != 'none': + return '.'.join(codec.split('.')[:4]) + + if field == 'vcodec' and f.get('acodec') == 'none': + return 'images' + elif field == 'acodec' and f.get('vcodec') == 'none': + return '' + return self._format_out('audio only' if field == 'vcodec' else 'video only', + self.Styles.SUPPRESS) + + delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True) table = [ [ - self._format_screen(format_field(f, 'format_id'), self.Styles.ID), + self._format_out(format_field(f, 'format_id'), self.Styles.ID), format_field(f, 'ext'), format_field(f, func=self.format_resolution, ignore=('audio only', 'images')), - format_field(f, 'fps', '\t%d'), + format_field(f, 'fps', '\t%d', func=round), format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''), + format_field(f, 'audio_channels', '\t%s'), delim, format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes), - format_field(f, 'tbr', '\t%dk'), + format_field(f, 'tbr', '\t%dk', func=round), shorten_protocol_name(f.get('protocol', '')), delim, - format_field(f, 'vcodec', default='unknown').replace( - 'none', 'images' if f.get('acodec') == 'none' - else self._format_screen('audio only', self.Styles.SUPPRESS)), - format_field(f, 'vbr', '\t%dk'), - format_field(f, 'acodec', default='unknown').replace( - 'none', '' if f.get('vcodec') == 'none' - else self._format_screen('video only', self.Styles.SUPPRESS)), - format_field(f, 'abr', '\t%dk'), - format_field(f, 'asr', '\t%dHz'), + simplified_codec(f, 'vcodec'), + format_field(f, 'vbr', '\t%dk', func=round), + simplified_codec(f, 'acodec'), + format_field(f, 'abr', '\t%dk', func=round), + format_field(f, 'asr', '\t%s', func=format_decimal_suffix), join_nonempty( - self._format_screen('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None, + self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None, format_field(f, 'language', '[%s]'), join_nonempty(format_field(f, 'format_note'), format_field(f, 'container', ignore=(None, f.get('ext'))), @@ -3505,12 +3668,12 @@ class YoutubeDL(object): delim=' '), ] for f in formats if f.get('preference') is None or f['preference'] >= -1000] header_line = self._list_format_headers( - 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO', + 'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO', delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO') return render_table( header_line, table, hide_empty=True, - delim=self._format_screen('\u2500', self.Styles.DELIM, '-', test_encoding=True)) + delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True)) def render_thumbnails_table(self, info_dict): thumbnails = list(info_dict.get('thumbnails') or []) @@ -3518,7 +3681,7 @@ class YoutubeDL(object): return None return render_table( self._list_format_headers('ID', 'Width', 'Height', 'URL'), - [[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]) + [[t.get('id'), t.get('width') or 'unknown', t.get('height') or 'unknown', t['url']] for t in thumbnails]) def render_subtitles_table(self, video_id, subtitles): def _row(lang, formats): @@ -3553,7 +3716,7 @@ class YoutubeDL(object): def urlopen(self, req): """ Start an HTTP download """ - if isinstance(req, compat_basestring): + if isinstance(req, str): req = sanitized_Request(req) return self._opener.open(req, timeout=self._socket_timeout) @@ -3561,18 +3724,30 @@ class YoutubeDL(object): if not self.params.get('verbose'): return + from . import _IN_CLI # Must be delayed import + + # These imports can be slow. So import them only as needed + from .extractor.extractors import _LAZY_LOADER + from .extractor.extractors import ( + _PLUGIN_CLASSES as plugin_ies, + _PLUGIN_OVERRIDES as plugin_ie_overrides + ) + def get_encoding(stream): - ret = getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__) + ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__)) if not supports_terminal_sequences(stream): - from .compat import WINDOWS_VT_MODE + from .utils import WINDOWS_VT_MODE # Must be imported locally ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)' return ret - encoding_str = 'Encodings: locale %s, fs %s, out %s, err %s, pref %s' % ( + encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % ( locale.getpreferredencoding(), sys.getfilesystemencoding(), - get_encoding(self._screen_file), get_encoding(self._err_file), - self.get_encoding()) + self.get_encoding(), + ', '.join( + f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_ + if stream is not None and key != 'console') + ) logger = self.params.get('logger') if logger: @@ -3583,55 +3758,37 @@ class YoutubeDL(object): write_debug = lambda msg: self._write_string(f'[debug] {msg}\n') source = detect_variant() + if VARIANT not in (None, 'pip'): + source += '*' write_debug(join_nonempty( - 'yt-dlp version', __version__, + f'{"yt-dlp" if REPOSITORY == "yt-dlp/yt-dlp" else REPOSITORY} version', + __version__, f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '', '' if source == 'unknown' else f'({source})', + '' if _IN_CLI else 'API', delim=' ')) + + if not _IN_CLI: + write_debug(f'params: {self.params}') + + write_debug('** This build is unofficial daily builds, provided for ease of use.') + write_debug('** Please do not ask for any support.') if not _LAZY_LOADER: if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'): write_debug('Lazy loading extractors is forcibly disabled') else: write_debug('Lazy loading extractors is disabled') - if plugin_extractors or plugin_postprocessors: - write_debug('Plugins: %s' % [ - '%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}') - for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())]) - if self.params.get('compat_opts'): - write_debug('Compatibility options: %s' % ', '.join(self.params.get('compat_opts'))) + if self.params['compat_opts']: + write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts'])) - if source == 'source': - try: - sp = Popen( - ['git', 'rev-parse', '--short', 'HEAD'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - cwd=os.path.dirname(os.path.abspath(__file__))) - out, err = sp.communicate_or_kill() - out = out.decode().strip() - if re.match('[0-9a-f]+', out): - write_debug('Git HEAD: %s' % out) - except Exception: - try: - sys.exc_clear() - except Exception: - pass - - def python_implementation(): - impl_name = platform.python_implementation() - if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'): - return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3] - return impl_name - - write_debug('Python version %s (%s %s) - %s' % ( - platform.python_version(), - python_implementation(), - platform.architecture()[0], - platform_name())) + if current_git_head(): + write_debug(f'Git HEAD: {current_git_head()}') + write_debug(system_identifier()) exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self) ffmpeg_features = {key for key, val in ffmpeg_features.items() if val} if ffmpeg_features: - exe_versions['ffmpeg'] += ' (%s)' % ','.join(ffmpeg_features) + exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features)) exe_versions['rtmpdump'] = rtmpdump_version() exe_versions['phantomjs'] = PhantomJSwrapper._version() @@ -3640,31 +3797,41 @@ class YoutubeDL(object): ) or 'none' write_debug('exe versions: %s' % exe_str) - from .downloader.websocket import has_websockets - from .postprocessor.embedthumbnail import has_mutagen - from .cookies import SQLITE_AVAILABLE, SECRETSTORAGE_AVAILABLE + from .compat.compat_utils import get_package_info + from .dependencies import available_dependencies - lib_str = join_nonempty( - compat_pycrypto_AES and compat_pycrypto_AES.__name__.split('.')[0], - SECRETSTORAGE_AVAILABLE and 'secretstorage', - has_mutagen and 'mutagen', - SQLITE_AVAILABLE and 'sqlite', - has_websockets and 'websockets', - delim=', ') or 'none' - write_debug('Optional libraries: %s' % lib_str) + write_debug('Optional libraries: %s' % (', '.join(sorted({ + join_nonempty(*get_package_info(m)) for m in available_dependencies.values() + })) or 'none')) + self._setup_opener() proxy_map = {} for handler in self._opener.handlers: if hasattr(handler, 'proxies'): proxy_map.update(handler.proxies) write_debug(f'Proxy map: {proxy_map}') + for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items(): + display_list = ['%s%s' % ( + klass.__name__, '' if klass.__name__ == name else f' as {name}') + for name, klass in plugins.items()] + if plugin_type == 'Extractor': + display_list.extend(f'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})' + for parent, plugins in plugin_ie_overrides.items()) + if not display_list: + continue + write_debug(f'{plugin_type} Plugins: {", ".join(sorted(display_list))}') + + plugin_dirs = plugin_directories() + if plugin_dirs: + write_debug(f'Plugin directories: {plugin_dirs}') + # Not implemented if False and self.params.get('call_home'): - ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') + ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode() write_debug('Public IP address: %s' % ipaddr) latest_version = self.urlopen( - 'https://yt-dl.org/latest/version').read().decode('utf-8') + 'https://yt-dl.org/latest/version').read().decode() if version_tuple(latest_version) > version_tuple(__version__): self.report_warning( 'You are using an outdated version (newest version: %s)! ' @@ -3672,6 +3839,8 @@ class YoutubeDL(object): latest_version) def _setup_opener(self): + if hasattr(self, '_opener'): + return timeout_val = self.params.get('socket_timeout') self._socket_timeout = 20 if timeout_val is None else float(timeout_val) @@ -3688,7 +3857,7 @@ class YoutubeDL(object): else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: - proxies = compat_urllib_request.getproxies() + proxies = urllib.request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] @@ -3698,19 +3867,22 @@ class YoutubeDL(object): https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel) ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel) redirect_handler = YoutubeDLRedirectHandler() - data_handler = compat_urllib_request_DataHandler() + data_handler = urllib.request.DataHandler() # When passing our own FileHandler instance, build_opener won't add the # default FileHandler and allows us to disable the file protocol, which # can be used for malicious purposes (see # https://github.com/ytdl-org/youtube-dl/issues/8227) - file_handler = compat_urllib_request.FileHandler() + file_handler = urllib.request.FileHandler() - def file_open(*args, **kwargs): - raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons') - file_handler.file_open = file_open + if not self.params.get('enable_file_urls'): + def file_open(*args, **kwargs): + raise urllib.error.URLError( + 'file:// URLs are explicitly disabled in yt-dlp for security reasons. ' + 'Use --enable-file-urls to enable at your own risk.') + file_handler.file_open = file_open - opener = compat_urllib_request.build_opener( + opener = urllib.request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in @@ -3736,7 +3908,7 @@ class YoutubeDL(object): return encoding def _write_info_json(self, label, ie_result, infofn, overwrite=None): - ''' Write infojson and returns True = written, False = skip, None = error ''' + ''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error ''' if overwrite is None: overwrite = self.params.get('overwrites', True) if not self.params.get('writeinfojson'): @@ -3748,14 +3920,15 @@ class YoutubeDL(object): return None elif not overwrite and os.path.exists(infofn): self.to_screen(f'[info] {label.title()} metadata is already present') - else: - self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}') - try: - write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn) - except (OSError, IOError): - self.report_error(f'Cannot write {label} metadata to JSON file {infofn}') - return None - return True + return 'exists' + + self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}') + try: + write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn) + return True + except OSError: + self.report_error(f'Cannot write {label} metadata to JSON file {infofn}') + return None def _write_description(self, label, ie_result, descfn): ''' Write description and returns True = written, False = skip, None = error ''' @@ -3769,14 +3942,14 @@ class YoutubeDL(object): elif not self.params.get('overwrites', True) and os.path.exists(descfn): self.to_screen(f'[info] {label.title()} description is already present') elif ie_result.get('description') is None: - self.report_warning(f'There\'s no {label} description to write') + self.to_screen(f'[info] There\'s no {label} description to write') return False else: try: self.to_screen(f'[info] Writing {label} description to: {descfn}') - with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: + with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: descfile.write(ie_result['description']) - except (OSError, IOError): + except OSError: self.report_error(f'Cannot write {label} description file {descfn}') return None return True @@ -3785,15 +3958,18 @@ class YoutubeDL(object): ''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error''' ret = [] subtitles = info_dict.get('requested_subtitles') - if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')): + if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')): # subtitles download errors are already managed as troubles in relevant IE # that way it will silently go on when used with unsupporting IE return ret - + elif not subtitles: + self.to_screen('[info] There\'s no subtitles for the requested languages') + return ret sub_filename_base = self.prepare_filename(info_dict, 'subtitle') if not sub_filename_base: self.to_screen('[info] Skipping writing video subtitles') return ret + for sub_lang, sub_info in subtitles.items(): sub_format = sub_info['ext'] sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext')) @@ -3810,12 +3986,12 @@ class YoutubeDL(object): try: # Use newline='' to prevent conversion of newline characters # See https://github.com/ytdl-org/youtube-dl/issues/10268 - with io.open(sub_filename, 'w', encoding='utf-8', newline='') as subfile: + with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile: subfile.write(sub_info['data']) sub_info['filepath'] = sub_filename ret.append((sub_filename, sub_filename_final)) continue - except (OSError, IOError): + except OSError: self.report_error(f'Cannot write video subtitles file {sub_filename}') return None @@ -3826,9 +4002,12 @@ class YoutubeDL(object): sub_info['filepath'] = sub_filename ret.append((sub_filename, sub_filename_final)) except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err: + msg = f'Unable to download video subtitles for {sub_lang!r}: {err}' if self.params.get('ignoreerrors') is not True: # False or 'only_download' - raise DownloadError(f'Unable to download video subtitles for {sub_lang!r}: {err}', err) - self.report_warning(f'Unable to download video subtitles for {sub_lang!r}: {err}') + if not self.params.get('ignoreerrors'): + self.report_error(msg) + raise DownloadError(msg) + self.report_warning(msg) return ret def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None): @@ -3837,6 +4016,9 @@ class YoutubeDL(object): thumbnails, ret = [], [] if write_all or self.params.get('writethumbnail', False): thumbnails = info_dict.get('thumbnails') or [] + if not thumbnails: + self.to_screen(f'[info] There\'s no {label} thumbnails to download') + return ret multiple = write_all and len(thumbnails) > 1 if thumb_filename_base is None: @@ -3860,7 +4042,7 @@ class YoutubeDL(object): else: self.to_screen(f'[info] Downloading {thumb_display_id} ...') try: - uf = self.urlopen(t['url']) + uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {}))) self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}') with open(encodeFilename(thumb_filename), 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) diff --git a/plugins/youtube_download/yt_dlp/__init__.py b/plugins/youtube_download/yt_dlp/__init__.py index b93f47e..255b317 100644 --- a/plugins/youtube_download/yt_dlp/__init__.py +++ b/plugins/youtube_download/yt_dlp/__init__.py @@ -1,57 +1,29 @@ -#!/usr/bin/env python3 -# coding: utf-8 - -f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541 +try: + import contextvars # noqa: F401 +except Exception: + raise Exception( + f'You are using an unsupported version of Python. Only Python versions 3.7 and above are supported by yt-dlp') # noqa: F541 __license__ = 'Public Domain' -import codecs -import io +import collections +import getpass import itertools +import optparse import os -import random import re import sys -from .options import ( - parseOpts, -) -from .compat import ( - compat_getpass, - compat_os_name, - compat_shlex_quote, - workaround_optparse_bug9161, -) +from .compat import compat_shlex_quote from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS -from .utils import ( - DateRange, - decodeOption, - DownloadCancelled, - DownloadError, - error_to_compat_str, - expand_path, - GeoUtils, - float_or_none, - int_or_none, - match_filter_func, - parse_duration, - preferredencoding, - read_batch_urls, - render_table, - SameFileError, - setproctitle, - std_headers, - write_string, -) -from .update import run_update -from .downloader import ( - FileDownloader, -) -from .extractor import gen_extractors, list_extractors -from .extractor.common import InfoExtractor +from .downloader.external import get_external_downloader +from .extractor import list_extractor_classes from .extractor.adobepass import MSO_INFO +from .options import parseOpts from .postprocessor import ( FFmpegExtractAudioPP, + FFmpegMergerPP, + FFmpegPostProcessor, FFmpegSubtitlesConvertorPP, FFmpegThumbnailsConvertorPP, FFmpegVideoConvertorPP, @@ -59,256 +31,106 @@ from .postprocessor import ( MetadataFromFieldPP, MetadataParserPP, ) +from .update import Updater +from .utils import ( + NO_DEFAULT, + POSTPROCESS_WHEN, + DateRange, + DownloadCancelled, + DownloadError, + FormatSorter, + GeoUtils, + PlaylistEntries, + SameFileError, + decodeOption, + download_range_func, + expand_path, + float_or_none, + format_field, + int_or_none, + match_filter_func, + parse_bytes, + parse_duration, + preferredencoding, + read_batch_urls, + read_stdin, + render_table, + setproctitle, + std_headers, + traverse_obj, + variadic, + write_string, +) from .YoutubeDL import YoutubeDL +_IN_CLI = False -def _real_main(argv=None): - # Compatibility fixes for Windows - if sys.platform == 'win32': - # https://github.com/ytdl-org/youtube-dl/issues/820 - codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) - workaround_optparse_bug9161() +def _exit(status=0, *args): + for msg in args: + sys.stderr.write(msg) + raise SystemExit(status) - setproctitle('yt-dlp') - - parser, opts, args = parseOpts(argv) - warnings, deprecation_warnings = [], [] - - # Set user agent - if opts.user_agent is not None: - std_headers['User-Agent'] = opts.user_agent - - # Set referer - if opts.referer is not None: - std_headers['Referer'] = opts.referer - - # Custom HTTP headers - std_headers.update(opts.headers) - - # Dump user agent - if opts.dump_user_agent: - write_string(std_headers['User-Agent'] + '\n', out=sys.stdout) - sys.exit(0) +def get_urls(urls, batchfile, verbose): # Batch file verification batch_urls = [] - if opts.batchfile is not None: + if batchfile is not None: try: - if opts.batchfile == '-': - write_string('Reading URLs from stdin - EOF (%s) to end:\n' % ( - 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D')) - batchfd = sys.stdin - else: - batchfd = io.open( - expand_path(opts.batchfile), - 'r', encoding='utf-8', errors='ignore') - batch_urls = read_batch_urls(batchfd) - if opts.verbose: + batch_urls = read_batch_urls( + read_stdin('URLs') if batchfile == '-' + else open(expand_path(batchfile), encoding='utf-8', errors='ignore')) + if verbose: write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n') - except IOError: - sys.exit('ERROR: batch file %s could not be read' % opts.batchfile) - all_urls = batch_urls + [url.strip() for url in args] # batch_urls are already striped in read_batch_urls + except OSError: + _exit(f'ERROR: batch file {batchfile} could not be read') _enc = preferredencoding() - all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] + return [ + url.strip().decode(_enc, 'ignore') if isinstance(url, bytes) else url.strip() + for url in batch_urls + urls] + +def print_extractor_information(opts, urls): + out = '' if opts.list_extractors: - for ie in list_extractors(opts.age_limit): - write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n', out=sys.stdout) - matchedUrls = [url for url in all_urls if ie.suitable(url)] - for mu in matchedUrls: - write_string(' ' + mu + '\n', out=sys.stdout) - sys.exit(0) - if opts.list_extractor_descriptions: - for ie in list_extractors(opts.age_limit): - if not ie.working(): - continue - desc = getattr(ie, 'IE_DESC', ie.IE_NAME) - if desc is False: - continue - if getattr(ie, 'SEARCH_KEY', None) is not None: - _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow') - _COUNTS = ('', '5', '10', 'all') - desc += f'; "{ie.SEARCH_KEY}:" prefix (Example: "{ie.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(_SEARCHES)}")' - write_string(desc + '\n', out=sys.stdout) - sys.exit(0) - if opts.ap_list_mso: - table = [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()] - write_string('Supported TV Providers:\n' + render_table(['mso', 'mso name'], table) + '\n', out=sys.stdout) - sys.exit(0) + # Importing GenericIE is currently slow since it imports YoutubeIE + from .extractor.generic import GenericIE - # Conflicting, missing and erroneous options - if opts.format == 'best': - warnings.append('.\n '.join(( - '"-f best" selects the best pre-merged format which is often not the best option', - 'To let yt-dlp download and merge the best available formats, simply do not pass any format selection', - 'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning'))) - if opts.exec_cmd.get('before_dl') and opts.exec_before_dl_cmd: - parser.error('using "--exec-before-download" conflicts with "--exec before_dl:"') - if opts.usenetrc and (opts.username is not None or opts.password is not None): - parser.error('using .netrc conflicts with giving username/password') - if opts.password is not None and opts.username is None: - parser.error('account username missing\n') - if opts.ap_password is not None and opts.ap_username is None: - parser.error('TV Provider account username missing\n') - if opts.autonumber_size is not None: - if opts.autonumber_size <= 0: - parser.error('auto number size must be positive') - if opts.autonumber_start is not None: - if opts.autonumber_start < 0: - parser.error('auto number start must be positive or 0') - if opts.username is not None and opts.password is None: - opts.password = compat_getpass('Type account password and press [Return]: ') - if opts.ap_username is not None and opts.ap_password is None: - opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ') - if opts.ratelimit is not None: - numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) - if numeric_limit is None: - parser.error('invalid rate limit specified') - opts.ratelimit = numeric_limit - if opts.throttledratelimit is not None: - numeric_limit = FileDownloader.parse_bytes(opts.throttledratelimit) - if numeric_limit is None: - parser.error('invalid rate limit specified') - opts.throttledratelimit = numeric_limit - if opts.min_filesize is not None: - numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) - if numeric_limit is None: - parser.error('invalid min_filesize specified') - opts.min_filesize = numeric_limit - if opts.max_filesize is not None: - numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) - if numeric_limit is None: - parser.error('invalid max_filesize specified') - opts.max_filesize = numeric_limit - if opts.sleep_interval is not None: - if opts.sleep_interval < 0: - parser.error('sleep interval must be positive or 0') - if opts.max_sleep_interval is not None: - if opts.max_sleep_interval < 0: - parser.error('max sleep interval must be positive or 0') - if opts.sleep_interval is None: - parser.error('min sleep interval must be specified, use --min-sleep-interval') - if opts.max_sleep_interval < opts.sleep_interval: - parser.error('max sleep interval must be greater than or equal to min sleep interval') + urls = dict.fromkeys(urls, False) + for ie in list_extractor_classes(opts.age_limit): + out += ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n' + if ie == GenericIE: + matched_urls = [url for url, matched in urls.items() if not matched] + else: + matched_urls = tuple(filter(ie.suitable, urls.keys())) + urls.update(dict.fromkeys(matched_urls, True)) + out += ''.join(f' {url}\n' for url in matched_urls) + elif opts.list_extractor_descriptions: + _SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow') + out = '\n'.join( + ie.description(markdown=False, search_examples=_SEARCHES) + for ie in list_extractor_classes(opts.age_limit) if ie.working() and ie.IE_DESC is not False) + elif opts.ap_list_mso: + out = 'Supported TV Providers:\n%s\n' % render_table( + ['mso', 'mso name'], + [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]) else: - opts.max_sleep_interval = opts.sleep_interval - if opts.sleep_interval_subtitles is not None: - if opts.sleep_interval_subtitles < 0: - parser.error('subtitles sleep interval must be positive or 0') - if opts.sleep_interval_requests is not None: - if opts.sleep_interval_requests < 0: - parser.error('requests sleep interval must be positive or 0') - if opts.ap_mso and opts.ap_mso not in MSO_INFO: - parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers') - if opts.overwrites: # --yes-overwrites implies --no-continue - opts.continue_dl = False - if opts.concurrent_fragment_downloads <= 0: - parser.error('Concurrent fragments must be positive') - if opts.wait_for_video is not None: - min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None]) - if min_wait is None or (max_wait is None and '-' in opts.wait_for_video): - parser.error('Invalid time range to wait') - elif max_wait is not None and max_wait < min_wait: - parser.error('Minimum time range to wait must not be longer than the maximum') - opts.wait_for_video = (min_wait, max_wait) + return False + write_string(out, out=sys.stdout) + return True - def parse_retries(retries, name=''): - if retries in ('inf', 'infinite'): - parsed_retries = float('inf') - else: - try: - parsed_retries = int(retries) - except (TypeError, ValueError): - parser.error('invalid %sretry count specified' % name) - return parsed_retries - if opts.retries is not None: - opts.retries = parse_retries(opts.retries) - if opts.file_access_retries is not None: - opts.file_access_retries = parse_retries(opts.file_access_retries, 'file access ') - if opts.fragment_retries is not None: - opts.fragment_retries = parse_retries(opts.fragment_retries, 'fragment ') - if opts.extractor_retries is not None: - opts.extractor_retries = parse_retries(opts.extractor_retries, 'extractor ') - if opts.buffersize is not None: - numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) - if numeric_buffersize is None: - parser.error('invalid buffer size specified') - opts.buffersize = numeric_buffersize - if opts.http_chunk_size is not None: - numeric_chunksize = FileDownloader.parse_bytes(opts.http_chunk_size) - if not numeric_chunksize: - parser.error('invalid http chunk size specified') - opts.http_chunk_size = numeric_chunksize - if opts.playliststart <= 0: - raise parser.error('Playlist start must be positive') - if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: - raise parser.error('Playlist end must be greater than playlist start') - if opts.extractaudio: - opts.audioformat = opts.audioformat.lower() - if opts.audioformat not in ['best'] + list(FFmpegExtractAudioPP.SUPPORTED_EXTS): - parser.error('invalid audio format specified') - if opts.audioquality: - opts.audioquality = opts.audioquality.strip('k').strip('K') - audioquality = int_or_none(float_or_none(opts.audioquality)) # int_or_none prevents inf, nan - if audioquality is None or audioquality < 0: - parser.error('invalid audio quality specified') - if opts.recodevideo is not None: - opts.recodevideo = opts.recodevideo.replace(' ', '') - if not re.match(FFmpegVideoConvertorPP.FORMAT_RE, opts.recodevideo): - parser.error('invalid video remux format specified') - if opts.remuxvideo is not None: - opts.remuxvideo = opts.remuxvideo.replace(' ', '') - if not re.match(FFmpegVideoRemuxerPP.FORMAT_RE, opts.remuxvideo): - parser.error('invalid video remux format specified') - if opts.convertsubtitles is not None: - if opts.convertsubtitles not in FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS: - parser.error('invalid subtitle format specified') - if opts.convertthumbnails is not None: - if opts.convertthumbnails not in FFmpegThumbnailsConvertorPP.SUPPORTED_EXTS: - parser.error('invalid thumbnail format specified') - if opts.cookiesfrombrowser is not None: - mobj = re.match(r'(?P[^+:]+)(\s*\+\s*(?P[^:]+))?(\s*:(?P.+))?', opts.cookiesfrombrowser) - if mobj is None: - parser.error(f'invalid cookies from browser arguments: {opts.cookiesfrombrowser}') - browser_name, keyring, profile = mobj.group('name', 'keyring', 'profile') - browser_name = browser_name.lower() - if browser_name not in SUPPORTED_BROWSERS: - parser.error(f'unsupported browser specified for cookies: "{browser_name}". ' - f'Supported browsers are: {", ".join(sorted(SUPPORTED_BROWSERS))}') - if keyring is not None: - keyring = keyring.upper() - if keyring not in SUPPORTED_KEYRINGS: - parser.error(f'unsupported keyring specified for cookies: "{keyring}". ' - f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}') - opts.cookiesfrombrowser = (browser_name, profile, keyring) - geo_bypass_code = opts.geo_bypass_ip_block or opts.geo_bypass_country - if geo_bypass_code is not None: - try: - GeoUtils.random_ipv4(geo_bypass_code) - except Exception: - parser.error('unsupported geo-bypass country or ip-block') - - if opts.date is not None: - date = DateRange.day(opts.date) - else: - date = DateRange(opts.dateafter, opts.datebefore) - - compat_opts = opts.compat_opts - - def report_conflict(arg1, arg2): - warnings.append(f'{arg2} is ignored since {arg1} was given') +def set_compat_opts(opts): def _unused_compat_opt(name): - if name not in compat_opts: + if name not in opts.compat_opts: return False - compat_opts.discard(name) - compat_opts.update(['*%s' % name]) + opts.compat_opts.discard(name) + opts.compat_opts.update(['*%s' % name]) return True def set_default_compat(compat_name, opt_name, default=True, remove_compat=True): attr = getattr(opts, opt_name) - if compat_name in compat_opts: + if compat_name in opts.compat_opts: if attr is None: setattr(opts, opt_name, not default) return True @@ -323,36 +145,160 @@ def _real_main(argv=None): set_default_compat('abort-on-error', 'ignoreerrors', 'only_download') set_default_compat('no-playlist-metafiles', 'allow_playlist_files') set_default_compat('no-clean-infojson', 'clean_infojson') - if 'no-attach-info-json' in compat_opts: + if 'no-attach-info-json' in opts.compat_opts: if opts.embed_infojson: _unused_compat_opt('no-attach-info-json') else: opts.embed_infojson = False - if 'format-sort' in compat_opts: - opts.format_sort.extend(InfoExtractor.FormatSort.ytdl_default) + if 'format-sort' in opts.compat_opts: + opts.format_sort.extend(FormatSorter.ytdl_default) _video_multistreams_set = set_default_compat('multistreams', 'allow_multiple_video_streams', False, remove_compat=False) _audio_multistreams_set = set_default_compat('multistreams', 'allow_multiple_audio_streams', False, remove_compat=False) if _video_multistreams_set is False and _audio_multistreams_set is False: _unused_compat_opt('multistreams') - outtmpl_default = opts.outtmpl.get('default') - if outtmpl_default == '': - outtmpl_default, opts.skip_download = None, True - del opts.outtmpl['default'] - if opts.useid: - if outtmpl_default is None: - outtmpl_default = opts.outtmpl['default'] = '%(id)s.%(ext)s' - else: - report_conflict('--output', '--id') - if 'filename' in compat_opts: - if outtmpl_default is None: - outtmpl_default = opts.outtmpl['default'] = '%(title)s-%(id)s.%(ext)s' + if 'filename' in opts.compat_opts: + if opts.outtmpl.get('default') is None: + opts.outtmpl.update({'default': '%(title)s-%(id)s.%(ext)s'}) else: _unused_compat_opt('filename') + +def validate_options(opts): + def validate(cndn, name, value=None, msg=None): + if cndn: + return True + raise ValueError((msg or 'invalid {name} "{value}" given').format(name=name, value=value)) + + def validate_in(name, value, items, msg=None): + return validate(value is None or value in items, name, value, msg) + + def validate_regex(name, value, regex): + return validate(value is None or re.match(regex, value), name, value) + + def validate_positive(name, value, strict=False): + return validate(value is None or value > 0 or (not strict and value == 0), + name, value, '{name} "{value}" must be positive' + ('' if strict else ' or 0')) + + def validate_minmax(min_val, max_val, min_name, max_name=None): + if max_val is None or min_val is None or max_val >= min_val: + return + if not max_name: + min_name, max_name = f'min {min_name}', f'max {min_name}' + raise ValueError(f'{max_name} "{max_val}" must be must be greater than or equal to {min_name} "{min_val}"') + + # Usernames and passwords + validate(not opts.usenetrc or (opts.username is None and opts.password is None), + '.netrc', msg='using {name} conflicts with giving username/password') + validate(opts.password is None or opts.username is not None, 'account username', msg='{name} missing') + validate(opts.ap_password is None or opts.ap_username is not None, + 'TV Provider account username', msg='{name} missing') + validate_in('TV Provider', opts.ap_mso, MSO_INFO, + 'Unsupported {name} "{value}", use --ap-list-mso to get a list of supported TV Providers') + + # Numbers + validate_positive('autonumber start', opts.autonumber_start) + validate_positive('autonumber size', opts.autonumber_size, True) + validate_positive('concurrent fragments', opts.concurrent_fragment_downloads, True) + validate_positive('playlist start', opts.playliststart, True) + if opts.playlistend != -1: + validate_minmax(opts.playliststart, opts.playlistend, 'playlist start', 'playlist end') + + # Time ranges + validate_positive('subtitles sleep interval', opts.sleep_interval_subtitles) + validate_positive('requests sleep interval', opts.sleep_interval_requests) + validate_positive('sleep interval', opts.sleep_interval) + validate_positive('max sleep interval', opts.max_sleep_interval) + if opts.sleep_interval is None: + validate( + opts.max_sleep_interval is None, 'min sleep interval', + msg='{name} must be specified; use --min-sleep-interval') + elif opts.max_sleep_interval is None: + opts.max_sleep_interval = opts.sleep_interval + else: + validate_minmax(opts.sleep_interval, opts.max_sleep_interval, 'sleep interval') + + if opts.wait_for_video is not None: + min_wait, max_wait, *_ = map(parse_duration, opts.wait_for_video.split('-', 1) + [None]) + validate(min_wait is not None and not (max_wait is None and '-' in opts.wait_for_video), + 'time range to wait for video', opts.wait_for_video) + validate_minmax(min_wait, max_wait, 'time range to wait for video') + opts.wait_for_video = (min_wait, max_wait) + + # Format sort + for f in opts.format_sort: + validate_regex('format sorting', f, FormatSorter.regex) + + # Postprocessor formats + validate_regex('merge output format', opts.merge_output_format, + r'({0})(/({0}))*'.format('|'.join(map(re.escape, FFmpegMergerPP.SUPPORTED_EXTS)))) + validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE) + validate_in('subtitle format', opts.convertsubtitles, FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS) + validate_regex('thumbnail format', opts.convertthumbnails, FFmpegThumbnailsConvertorPP.FORMAT_RE) + validate_regex('recode video format', opts.recodevideo, FFmpegVideoConvertorPP.FORMAT_RE) + validate_regex('remux video format', opts.remuxvideo, FFmpegVideoRemuxerPP.FORMAT_RE) + if opts.audioquality: + opts.audioquality = opts.audioquality.strip('k').strip('K') + # int_or_none prevents inf, nan + validate_positive('audio quality', int_or_none(float_or_none(opts.audioquality), default=0)) + + # Retries + def parse_retries(name, value): + if value is None: + return None + elif value in ('inf', 'infinite'): + return float('inf') + try: + return int(value) + except (TypeError, ValueError): + validate(False, f'{name} retry count', value) + + opts.retries = parse_retries('download', opts.retries) + opts.fragment_retries = parse_retries('fragment', opts.fragment_retries) + opts.extractor_retries = parse_retries('extractor', opts.extractor_retries) + opts.file_access_retries = parse_retries('file access', opts.file_access_retries) + + # Retry sleep function + def parse_sleep_func(expr): + NUMBER_RE = r'\d+(?:\.\d+)?' + op, start, limit, step, *_ = tuple(re.fullmatch( + rf'(?:(linear|exp)=)?({NUMBER_RE})(?::({NUMBER_RE})?)?(?::({NUMBER_RE}))?', + expr.strip()).groups()) + (None, None) + + if op == 'exp': + return lambda n: min(float(start) * (float(step or 2) ** n), float(limit or 'inf')) + else: + default_step = start if op or limit else 0 + return lambda n: min(float(start) + float(step or default_step) * n, float(limit or 'inf')) + + for key, expr in opts.retry_sleep.items(): + if not expr: + del opts.retry_sleep[key] + continue + try: + opts.retry_sleep[key] = parse_sleep_func(expr) + except AttributeError: + raise ValueError(f'invalid {key} retry sleep expression {expr!r}') + + # Bytes + def validate_bytes(name, value): + if value is None: + return None + numeric_limit = parse_bytes(value) + validate(numeric_limit is not None, 'rate limit', value) + return numeric_limit + + opts.ratelimit = validate_bytes('rate limit', opts.ratelimit) + opts.throttledratelimit = validate_bytes('throttled rate limit', opts.throttledratelimit) + opts.min_filesize = validate_bytes('min filesize', opts.min_filesize) + opts.max_filesize = validate_bytes('max filesize', opts.max_filesize) + opts.buffersize = validate_bytes('buffer size', opts.buffersize) + opts.http_chunk_size = validate_bytes('http chunk size', opts.http_chunk_size) + + # Output templates def validate_outtmpl(tmpl, msg): err = YoutubeDL.validate_outtmpl(tmpl) if err: - parser.error('invalid %s %r: %s' % (msg, tmpl, error_to_compat_str(err))) + raise ValueError(f'invalid {msg} "{tmpl}": {err}') for k, tmpl in opts.outtmpl.items(): validate_outtmpl(tmpl, f'{k} output template') @@ -361,32 +307,74 @@ def _real_main(argv=None): validate_outtmpl(tmpl, f'{type_} print template') for type_, tmpl_list in opts.print_to_file.items(): for tmpl, file in tmpl_list: - validate_outtmpl(tmpl, f'{type_} print-to-file template') - validate_outtmpl(file, f'{type_} print-to-file filename') + validate_outtmpl(tmpl, f'{type_} print to file template') + validate_outtmpl(file, f'{type_} print to file filename') validate_outtmpl(opts.sponsorblock_chapter_title, 'SponsorBlock chapter title') for k, tmpl in opts.progress_template.items(): k = f'{k[:-6]} console title' if '-title' in k else f'{k} progress' validate_outtmpl(tmpl, f'{k} template') - if opts.extractaudio and not opts.keepvideo and opts.format is None: - opts.format = 'bestaudio/best' + outtmpl_default = opts.outtmpl.get('default') + if outtmpl_default == '': + opts.skip_download = None + del opts.outtmpl['default'] + if outtmpl_default and not os.path.splitext(outtmpl_default)[1] and opts.extractaudio: + raise ValueError( + 'Cannot download a video and extract audio into the same file! ' + f'Use "{outtmpl_default}.%(ext)s" instead of "{outtmpl_default}" as the output template') - if outtmpl_default is not None and not os.path.splitext(outtmpl_default)[1] and opts.extractaudio: - parser.error('Cannot download a video and extract audio into the same' - ' file! Use "{0}.%(ext)s" instead of "{0}" as the output' - ' template'.format(outtmpl_default)) + def parse_chapters(name, value): + chapters, ranges = [], [] + parse_timestamp = lambda x: float('inf') if x in ('inf', 'infinite') else parse_duration(x) + for regex in value or []: + if regex.startswith('*'): + for range_ in map(str.strip, regex[1:].split(',')): + mobj = range_ != '-' and re.fullmatch(r'([^-]+)?\s*-\s*([^-]+)?', range_) + dur = mobj and (parse_timestamp(mobj.group(1) or '0'), parse_timestamp(mobj.group(2) or 'inf')) + if None in (dur or [None]): + raise ValueError(f'invalid {name} time range "{regex}". Must be of the form "*start-end"') + ranges.append(dur) + continue + try: + chapters.append(re.compile(regex)) + except re.error as err: + raise ValueError(f'invalid {name} regex "{regex}" - {err}') + return chapters, ranges - for f in opts.format_sort: - if re.match(InfoExtractor.FormatSort.regex, f) is None: - parser.error('invalid format sort string "%s" specified' % f) + opts.remove_chapters, opts.remove_ranges = parse_chapters('--remove-chapters', opts.remove_chapters) + opts.download_ranges = download_range_func(*parse_chapters('--download-sections', opts.download_ranges)) + # Cookies from browser + if opts.cookiesfrombrowser: + container = None + mobj = re.fullmatch(r'''(?x) + (?P[^+:]+) + (?:\s*\+\s*(?P[^:]+))? + (?:\s*:\s*(?!:)(?P.+?))? + (?:\s*::\s*(?P.+))? + ''', opts.cookiesfrombrowser) + if mobj is None: + raise ValueError(f'invalid cookies from browser arguments: {opts.cookiesfrombrowser}') + browser_name, keyring, profile, container = mobj.group('name', 'keyring', 'profile', 'container') + browser_name = browser_name.lower() + if browser_name not in SUPPORTED_BROWSERS: + raise ValueError(f'unsupported browser specified for cookies: "{browser_name}". ' + f'Supported browsers are: {", ".join(sorted(SUPPORTED_BROWSERS))}') + if keyring is not None: + keyring = keyring.upper() + if keyring not in SUPPORTED_KEYRINGS: + raise ValueError(f'unsupported keyring specified for cookies: "{keyring}". ' + f'Supported keyrings are: {", ".join(sorted(SUPPORTED_KEYRINGS))}') + opts.cookiesfrombrowser = (browser_name, profile, keyring, container) + + # MetadataParser def metadataparser_actions(f): if isinstance(f, str): cmd = '--parse-metadata %s' % compat_shlex_quote(f) try: actions = [MetadataFromFieldPP.to_action(f)] except Exception as err: - parser.error(f'{cmd} is invalid; {err}') + raise ValueError(f'{cmd} is invalid; {err}') else: cmd = '--replace-in-metadata %s' % ' '.join(map(compat_shlex_quote, f)) actions = ((MetadataParserPP.Actions.REPLACE, x, *f[1:]) for x in f[0].split(',')) @@ -395,237 +383,128 @@ def _real_main(argv=None): try: MetadataParserPP.validate_action(*action) except Exception as err: - parser.error(f'{cmd} is invalid; {err}') + raise ValueError(f'{cmd} is invalid; {err}') yield action - if opts.parse_metadata is None: - opts.parse_metadata = [] if opts.metafromtitle is not None: - opts.parse_metadata.append('title:%s' % opts.metafromtitle) - opts.parse_metadata = list(itertools.chain(*map(metadataparser_actions, opts.parse_metadata))) + opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle) + opts.parse_metadata = { + k: list(itertools.chain(*map(metadataparser_actions, v))) + for k, v in opts.parse_metadata.items() + } - any_getting = (any(opts.forceprint.values()) or opts.dumpjson or opts.dump_single_json - or opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail - or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration) + # Other options + if opts.playlist_items is not None: + try: + tuple(PlaylistEntries.parse_playlist_items(opts.playlist_items)) + except Exception as err: + raise ValueError(f'Invalid playlist-items {opts.playlist_items!r}: {err}') - any_printing = opts.print_json - download_archive_fn = expand_path(opts.download_archive) if opts.download_archive is not None else opts.download_archive + geo_bypass_code = opts.geo_bypass_ip_block or opts.geo_bypass_country + if geo_bypass_code is not None: + try: + GeoUtils.random_ipv4(geo_bypass_code) + except Exception: + raise ValueError('unsupported geo-bypass country or ip-block') - # If JSON is not printed anywhere, but comments are requested, save it to file - printing_json = opts.dumpjson or opts.print_json or opts.dump_single_json - if opts.getcomments and not printing_json: - opts.writeinfojson = True + opts.match_filter = match_filter_func(opts.match_filter) + + if opts.download_archive is not None: + opts.download_archive = expand_path(opts.download_archive) + + if opts.ffmpeg_location is not None: + opts.ffmpeg_location = expand_path(opts.ffmpeg_location) + + if opts.user_agent is not None: + opts.headers.setdefault('User-Agent', opts.user_agent) + if opts.referer is not None: + opts.headers.setdefault('Referer', opts.referer) if opts.no_sponsorblock: - opts.sponsorblock_mark = set() - opts.sponsorblock_remove = set() - sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove + opts.sponsorblock_mark = opts.sponsorblock_remove = set() - opts.remove_chapters = opts.remove_chapters or [] + default_downloader = None + for proto, path in opts.external_downloader.items(): + if path == 'native': + continue + ed = get_external_downloader(path) + if ed is None: + raise ValueError( + f'No such {format_field(proto, None, "%s ", ignore="default")}external downloader "{path}"') + elif ed and proto == 'default': + default_downloader = ed.get_basename() - if (opts.remove_chapters or sponsorblock_query) and opts.sponskrub is not False: - if opts.sponskrub: - if opts.remove_chapters: - report_conflict('--remove-chapters', '--sponskrub') - if opts.sponsorblock_mark: - report_conflict('--sponsorblock-mark', '--sponskrub') - if opts.sponsorblock_remove: - report_conflict('--sponsorblock-remove', '--sponskrub') - opts.sponskrub = False - if opts.sponskrub_cut and opts.split_chapters and opts.sponskrub is not False: - report_conflict('--split-chapter', '--sponskrub-cut') - opts.sponskrub_cut = False + warnings, deprecation_warnings = [], [] - if opts.remuxvideo and opts.recodevideo: - report_conflict('--recode-video', '--remux-video') - opts.remuxvideo = False + # Common mistake: -f best + if opts.format == 'best': + warnings.append('.\n '.join(( + '"-f best" selects the best pre-merged format which is often not the best option', + 'To let yt-dlp download and merge the best available formats, simply do not pass any format selection', + 'If you know what you are doing and want only the best pre-merged format, use "-f b" instead to suppress this warning'))) - if opts.allow_unplayable_formats: - def report_unplayable_conflict(opt_name, arg, default=False, allowed=None): - val = getattr(opts, opt_name) - if (not allowed and val) or (allowed and not allowed(val)): - report_conflict('--allow-unplayable-formats', arg) - setattr(opts, opt_name, default) + # --(postprocessor/downloader)-args without name + def report_args_compat(name, value, key1, key2=None, where=None): + if key1 in value and key2 not in value: + warnings.append(f'{name.title()} arguments given without specifying name. ' + f'The arguments will be given to {where or f"all {name}s"}') + return True + return False - report_unplayable_conflict('extractaudio', '--extract-audio') - report_unplayable_conflict('remuxvideo', '--remux-video') - report_unplayable_conflict('recodevideo', '--recode-video') - report_unplayable_conflict('addmetadata', '--embed-metadata') - report_unplayable_conflict('addchapters', '--embed-chapters') - report_unplayable_conflict('embed_infojson', '--embed-info-json') - opts.embed_infojson = False - report_unplayable_conflict('embedsubtitles', '--embed-subs') - report_unplayable_conflict('embedthumbnail', '--embed-thumbnail') - report_unplayable_conflict('xattrs', '--xattrs') - report_unplayable_conflict('fixup', '--fixup', default='never', allowed=lambda x: x in (None, 'never', 'ignore')) - opts.fixup = 'never' - report_unplayable_conflict('remove_chapters', '--remove-chapters', default=[]) - report_unplayable_conflict('sponsorblock_remove', '--sponsorblock-remove', default=set()) - report_unplayable_conflict('sponskrub', '--sponskrub', default=set()) - opts.sponskrub = False + if report_args_compat('external downloader', opts.external_downloader_args, + 'default', where=default_downloader) and default_downloader: + # Compat with youtube-dl's behavior. See https://github.com/ytdl-org/youtube-dl/commit/49c5293014bc11ec8c009856cd63cffa6296c1e1 + opts.external_downloader_args.setdefault(default_downloader, opts.external_downloader_args.pop('default')) - if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None: - opts.addchapters = True - - # PostProcessors - postprocessors = list(opts.add_postprocessors) - if sponsorblock_query: - postprocessors.append({ - 'key': 'SponsorBlock', - 'categories': sponsorblock_query, - 'api': opts.sponsorblock_api, - # Run this immediately after extraction is complete - 'when': 'pre_process' - }) - if opts.parse_metadata: - postprocessors.append({ - 'key': 'MetadataParser', - 'actions': opts.parse_metadata, - # Run this immediately after extraction is complete - 'when': 'pre_process' - }) - if opts.convertsubtitles: - postprocessors.append({ - 'key': 'FFmpegSubtitlesConvertor', - 'format': opts.convertsubtitles, - # Run this before the actual video download - 'when': 'before_dl' - }) - if opts.convertthumbnails: - postprocessors.append({ - 'key': 'FFmpegThumbnailsConvertor', - 'format': opts.convertthumbnails, - # Run this before the actual video download - 'when': 'before_dl' - }) - if opts.extractaudio: - postprocessors.append({ - 'key': 'FFmpegExtractAudio', - 'preferredcodec': opts.audioformat, - 'preferredquality': opts.audioquality, - 'nopostoverwrites': opts.nopostoverwrites, - }) - if opts.remuxvideo: - postprocessors.append({ - 'key': 'FFmpegVideoRemuxer', - 'preferedformat': opts.remuxvideo, - }) - if opts.recodevideo: - postprocessors.append({ - 'key': 'FFmpegVideoConvertor', - 'preferedformat': opts.recodevideo, - }) - # If ModifyChapters is going to remove chapters, subtitles must already be in the container. - if opts.embedsubtitles: - already_have_subtitle = opts.writesubtitles and 'no-keep-subs' not in compat_opts - postprocessors.append({ - 'key': 'FFmpegEmbedSubtitle', - # already_have_subtitle = True prevents the file from being deleted after embedding - 'already_have_subtitle': already_have_subtitle - }) - if not opts.writeautomaticsub and 'no-keep-subs' not in compat_opts: - opts.writesubtitles = True - # --all-sub automatically sets --write-sub if --write-auto-sub is not given - # this was the old behaviour if only --all-sub was given. - if opts.allsubtitles and not opts.writeautomaticsub: - opts.writesubtitles = True - # ModifyChapters must run before FFmpegMetadataPP - remove_chapters_patterns, remove_ranges = [], [] - for regex in opts.remove_chapters: - if regex.startswith('*'): - dur = list(map(parse_duration, regex[1:].split('-'))) - if len(dur) == 2 and all(t is not None for t in dur): - remove_ranges.append(tuple(dur)) - continue - parser.error(f'invalid --remove-chapters time range {regex!r}. Must be of the form *start-end') - try: - remove_chapters_patterns.append(re.compile(regex)) - except re.error as err: - parser.error(f'invalid --remove-chapters regex {regex!r} - {err}') - if opts.remove_chapters or sponsorblock_query: - postprocessors.append({ - 'key': 'ModifyChapters', - 'remove_chapters_patterns': remove_chapters_patterns, - 'remove_sponsor_segments': opts.sponsorblock_remove, - 'remove_ranges': remove_ranges, - 'sponsorblock_chapter_title': opts.sponsorblock_chapter_title, - 'force_keyframes': opts.force_keyframes_at_cuts - }) - # FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and - # FFmpegExtractAudioPP as containers before conversion may not support - # metadata (3gp, webm, etc.) - # By default ffmpeg preserves metadata applicable for both - # source and target containers. From this point the container won't change, - # so metadata can be added here. - if opts.addmetadata or opts.addchapters or opts.embed_infojson: - if opts.embed_infojson is None: - opts.embed_infojson = 'if_exists' - postprocessors.append({ - 'key': 'FFmpegMetadata', - 'add_chapters': opts.addchapters, - 'add_metadata': opts.addmetadata, - 'add_infojson': opts.embed_infojson, - }) - # Deprecated - # This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment - # but must be below EmbedSubtitle and FFmpegMetadata - # See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29 - # If opts.sponskrub is None, sponskrub is used, but it silently fails if the executable can't be found - if opts.sponskrub is not False: - postprocessors.append({ - 'key': 'SponSkrub', - 'path': opts.sponskrub_path, - 'args': opts.sponskrub_args, - 'cut': opts.sponskrub_cut, - 'force': opts.sponskrub_force, - 'ignoreerror': opts.sponskrub is None, - '_from_cli': True, - }) - if opts.embedthumbnail: - postprocessors.append({ - 'key': 'EmbedThumbnail', - # already_have_thumbnail = True prevents the file from being deleted after embedding - 'already_have_thumbnail': opts.writethumbnail - }) - if not opts.writethumbnail: - opts.writethumbnail = True - opts.outtmpl['pl_thumbnail'] = '' - if opts.split_chapters: - postprocessors.append({ - 'key': 'FFmpegSplitChapters', - 'force_keyframes': opts.force_keyframes_at_cuts, - }) - # XAttrMetadataPP should be run after post-processors that may change file contents - if opts.xattrs: - postprocessors.append({'key': 'XAttrMetadata'}) - if opts.concat_playlist != 'never': - postprocessors.append({ - 'key': 'FFmpegConcat', - 'only_multi_video': opts.concat_playlist != 'always', - 'when': 'playlist', - }) - # Exec must be the last PP of each category - if opts.exec_before_dl_cmd: - opts.exec_cmd.setdefault('before_dl', opts.exec_before_dl_cmd) - for when, exec_cmd in opts.exec_cmd.items(): - postprocessors.append({ - 'key': 'Exec', - 'exec_cmd': exec_cmd, - # Run this only after the files have been moved to their final locations - 'when': when, - }) - - def report_args_compat(arg, name): - warnings.append('%s given without specifying name. The arguments will be given to all %s' % (arg, name)) - - if 'default' in opts.external_downloader_args: - report_args_compat('--downloader-args', 'external downloaders') - - if 'default-compat' in opts.postprocessor_args and 'default' not in opts.postprocessor_args: - report_args_compat('--post-processor-args', 'post-processors') + if report_args_compat('post-processor', opts.postprocessor_args, 'default-compat', 'default'): + opts.postprocessor_args['default'] = opts.postprocessor_args.pop('default-compat') opts.postprocessor_args.setdefault('sponskrub', []) - opts.postprocessor_args['default'] = opts.postprocessor_args['default-compat'] + def report_conflict(arg1, opt1, arg2='--allow-unplayable-formats', opt2='allow_unplayable_formats', + val1=NO_DEFAULT, val2=NO_DEFAULT, default=False): + if val2 is NO_DEFAULT: + val2 = getattr(opts, opt2) + if not val2: + return + + if val1 is NO_DEFAULT: + val1 = getattr(opts, opt1) + if val1: + warnings.append(f'{arg1} is ignored since {arg2} was given') + setattr(opts, opt1, default) + + # Conflicting options + report_conflict('--playlist-reverse', 'playlist_reverse', '--playlist-random', 'playlist_random') + report_conflict('--playlist-reverse', 'playlist_reverse', '--lazy-playlist', 'lazy_playlist') + report_conflict('--playlist-random', 'playlist_random', '--lazy-playlist', 'lazy_playlist') + report_conflict('--dateafter', 'dateafter', '--date', 'date', default=None) + report_conflict('--datebefore', 'datebefore', '--date', 'date', default=None) + report_conflict('--exec-before-download', 'exec_before_dl_cmd', + '"--exec before_dl:"', 'exec_cmd', val2=opts.exec_cmd.get('before_dl')) + report_conflict('--id', 'useid', '--output', 'outtmpl', val2=opts.outtmpl.get('default')) + report_conflict('--remux-video', 'remuxvideo', '--recode-video', 'recodevideo') + report_conflict('--sponskrub', 'sponskrub', '--remove-chapters', 'remove_chapters') + report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-mark', 'sponsorblock_mark') + report_conflict('--sponskrub', 'sponskrub', '--sponsorblock-remove', 'sponsorblock_remove') + report_conflict('--sponskrub-cut', 'sponskrub_cut', '--split-chapter', 'split_chapters', + val1=opts.sponskrub and opts.sponskrub_cut) + + # Conflicts with --allow-unplayable-formats + report_conflict('--embed-metadata', 'addmetadata') + report_conflict('--embed-chapters', 'addchapters') + report_conflict('--embed-info-json', 'embed_infojson') + report_conflict('--embed-subs', 'embedsubtitles') + report_conflict('--embed-thumbnail', 'embedthumbnail') + report_conflict('--extract-audio', 'extractaudio') + report_conflict('--fixup', 'fixup', val1=opts.fixup not in (None, 'never', 'ignore'), default='never') + report_conflict('--recode-video', 'recodevideo') + report_conflict('--remove-chapters', 'remove_chapters', default=[]) + report_conflict('--remux-video', 'remuxvideo') + report_conflict('--sponskrub', 'sponskrub') + report_conflict('--sponsorblock-remove', 'sponsorblock_remove', default=set()) + report_conflict('--xattrs', 'xattrs') + + # Fully deprecated options def report_deprecation(val, old, new=None): if not val: return @@ -635,21 +514,224 @@ def _real_main(argv=None): report_deprecation(opts.sponskrub, '--sponskrub', '--sponsorblock-mark or --sponsorblock-remove') report_deprecation(not opts.prefer_ffmpeg, '--prefer-avconv', 'ffmpeg') - report_deprecation(opts.include_ads, '--include-ads') + # report_deprecation(opts.include_ads, '--include-ads') # We may re-implement this in future # report_deprecation(opts.call_home, '--call-home') # We may re-implement this in future # report_deprecation(opts.writeannotations, '--write-annotations') # It's just that no website has it + # Dependent options + opts.date = DateRange.day(opts.date) if opts.date else DateRange(opts.dateafter, opts.datebefore) + + if opts.exec_before_dl_cmd: + opts.exec_cmd['before_dl'] = opts.exec_before_dl_cmd + + if opts.useid: # --id is not deprecated in youtube-dl + opts.outtmpl['default'] = '%(id)s.%(ext)s' + + if opts.overwrites: # --force-overwrites implies --no-continue + opts.continue_dl = False + + if (opts.addmetadata or opts.sponsorblock_mark) and opts.addchapters is None: + # Add chapters when adding metadata or marking sponsors + opts.addchapters = True + + if opts.extractaudio and not opts.keepvideo and opts.format is None: + # Do not unnecessarily download audio + opts.format = 'bestaudio/best' + + if opts.getcomments and opts.writeinfojson is None and not opts.embed_infojson: + # If JSON is not printed anywhere, but comments are requested, save it to file + if not opts.dumpjson or opts.print_json or opts.dump_single_json: + opts.writeinfojson = True + + if opts.allsubtitles and not (opts.embedsubtitles or opts.writeautomaticsub): + # --all-sub automatically sets --write-sub if --write-auto-sub is not given + opts.writesubtitles = True + + if opts.addmetadata and opts.embed_infojson is None: + # If embedding metadata and infojson is present, embed it + opts.embed_infojson = 'if_exists' + + # Ask for passwords + if opts.username is not None and opts.password is None: + opts.password = getpass.getpass('Type account password and press [Return]: ') + if opts.ap_username is not None and opts.ap_password is None: + opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ') + + return warnings, deprecation_warnings + + +def get_postprocessors(opts): + yield from opts.add_postprocessors + + for when, actions in opts.parse_metadata.items(): + yield { + 'key': 'MetadataParser', + 'actions': actions, + 'when': when + } + sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove + if sponsorblock_query: + yield { + 'key': 'SponsorBlock', + 'categories': sponsorblock_query, + 'api': opts.sponsorblock_api, + 'when': 'after_filter' + } + if opts.convertsubtitles: + yield { + 'key': 'FFmpegSubtitlesConvertor', + 'format': opts.convertsubtitles, + 'when': 'before_dl' + } + if opts.convertthumbnails: + yield { + 'key': 'FFmpegThumbnailsConvertor', + 'format': opts.convertthumbnails, + 'when': 'before_dl' + } + if opts.extractaudio: + yield { + 'key': 'FFmpegExtractAudio', + 'preferredcodec': opts.audioformat, + 'preferredquality': opts.audioquality, + 'nopostoverwrites': opts.nopostoverwrites, + } + if opts.remuxvideo: + yield { + 'key': 'FFmpegVideoRemuxer', + 'preferedformat': opts.remuxvideo, + } + if opts.recodevideo: + yield { + 'key': 'FFmpegVideoConvertor', + 'preferedformat': opts.recodevideo, + } + # If ModifyChapters is going to remove chapters, subtitles must already be in the container. + if opts.embedsubtitles: + keep_subs = 'no-keep-subs' not in opts.compat_opts + yield { + 'key': 'FFmpegEmbedSubtitle', + # already_have_subtitle = True prevents the file from being deleted after embedding + 'already_have_subtitle': opts.writesubtitles and keep_subs + } + if not opts.writeautomaticsub and keep_subs: + opts.writesubtitles = True + + # ModifyChapters must run before FFmpegMetadataPP + if opts.remove_chapters or sponsorblock_query: + yield { + 'key': 'ModifyChapters', + 'remove_chapters_patterns': opts.remove_chapters, + 'remove_sponsor_segments': opts.sponsorblock_remove, + 'remove_ranges': opts.remove_ranges, + 'sponsorblock_chapter_title': opts.sponsorblock_chapter_title, + 'force_keyframes': opts.force_keyframes_at_cuts + } + # FFmpegMetadataPP should be run after FFmpegVideoConvertorPP and + # FFmpegExtractAudioPP as containers before conversion may not support + # metadata (3gp, webm, etc.) + # By default ffmpeg preserves metadata applicable for both + # source and target containers. From this point the container won't change, + # so metadata can be added here. + if opts.addmetadata or opts.addchapters or opts.embed_infojson: + yield { + 'key': 'FFmpegMetadata', + 'add_chapters': opts.addchapters, + 'add_metadata': opts.addmetadata, + 'add_infojson': opts.embed_infojson, + } + # Deprecated + # This should be above EmbedThumbnail since sponskrub removes the thumbnail attachment + # but must be below EmbedSubtitle and FFmpegMetadata + # See https://github.com/yt-dlp/yt-dlp/issues/204 , https://github.com/faissaloo/SponSkrub/issues/29 + # If opts.sponskrub is None, sponskrub is used, but it silently fails if the executable can't be found + if opts.sponskrub is not False: + yield { + 'key': 'SponSkrub', + 'path': opts.sponskrub_path, + 'args': opts.sponskrub_args, + 'cut': opts.sponskrub_cut, + 'force': opts.sponskrub_force, + 'ignoreerror': opts.sponskrub is None, + '_from_cli': True, + } + if opts.embedthumbnail: + yield { + 'key': 'EmbedThumbnail', + # already_have_thumbnail = True prevents the file from being deleted after embedding + 'already_have_thumbnail': opts.writethumbnail + } + if not opts.writethumbnail: + opts.writethumbnail = True + opts.outtmpl['pl_thumbnail'] = '' + if opts.split_chapters: + yield { + 'key': 'FFmpegSplitChapters', + 'force_keyframes': opts.force_keyframes_at_cuts, + } + # XAttrMetadataPP should be run after post-processors that may change file contents + if opts.xattrs: + yield {'key': 'XAttrMetadata'} + if opts.concat_playlist != 'never': + yield { + 'key': 'FFmpegConcat', + 'only_multi_video': opts.concat_playlist != 'always', + 'when': 'playlist', + } + # Exec must be the last PP of each category + for when, exec_cmd in opts.exec_cmd.items(): + yield { + 'key': 'Exec', + 'exec_cmd': exec_cmd, + 'when': when, + } + + +ParsedOptions = collections.namedtuple('ParsedOptions', ('parser', 'options', 'urls', 'ydl_opts')) + + +def parse_options(argv=None): + """@returns ParsedOptions(parser, opts, urls, ydl_opts)""" + parser, opts, urls = parseOpts(argv) + urls = get_urls(urls, opts.batchfile, opts.verbose) + + set_compat_opts(opts) + try: + warnings, deprecation_warnings = validate_options(opts) + except ValueError as err: + parser.error(f'{err}\n') + + postprocessors = list(get_postprocessors(opts)) + + print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:]) + any_getting = any(getattr(opts, k) for k in ( + 'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename', + 'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl' + )) + opts.quiet = opts.quiet or any_getting or opts.print_json or bool(opts.forceprint) + + playlist_pps = [pp for pp in postprocessors if pp.get('when') == 'playlist'] + write_playlist_infojson = (opts.writeinfojson and not opts.clean_infojson + and opts.allow_playlist_files and opts.outtmpl.get('pl_infojson') != '') + if not any(( + opts.extract_flat, + opts.dump_single_json, + opts.forceprint.get('playlist'), + opts.print_to_file.get('playlist'), + write_playlist_infojson, + )): + if not playlist_pps: + opts.extract_flat = 'discard' + elif playlist_pps == [{'key': 'FFmpegConcat', 'only_multi_video': True, 'when': 'playlist'}]: + opts.extract_flat = 'discard_in_playlist' + final_ext = ( opts.recodevideo if opts.recodevideo in FFmpegVideoConvertorPP.SUPPORTED_EXTS else opts.remuxvideo if opts.remuxvideo in FFmpegVideoRemuxerPP.SUPPORTED_EXTS - else opts.audioformat if (opts.extractaudio and opts.audioformat != 'best') + else opts.audioformat if (opts.extractaudio and opts.audioformat in FFmpegExtractAudioPP.SUPPORTED_EXTS) else None) - match_filter = ( - None if opts.match_filter is None - else match_filter_func(opts.match_filter)) - - ydl_opts = { + return ParsedOptions(parser, opts, urls, { 'usenetrc': opts.usenetrc, 'netrc_location': opts.netrc_location, 'username': opts.username, @@ -659,7 +741,10 @@ def _real_main(argv=None): 'ap_mso': opts.ap_mso, 'ap_username': opts.ap_username, 'ap_password': opts.ap_password, - 'quiet': (opts.quiet or any_getting or any_printing), + 'client_certificate': opts.client_certificate, + 'client_certificate_key': opts.client_certificate_key, + 'client_certificate_password': opts.client_certificate_password, + 'quiet': opts.quiet, 'no_warnings': opts.no_warnings, 'forceurl': opts.geturl, 'forcetitle': opts.gettitle, @@ -674,7 +759,7 @@ def _real_main(argv=None): 'forcejson': opts.dumpjson or opts.print_json, 'dump_single_json': opts.dump_single_json, 'force_write_download_archive': opts.force_write_download_archive, - 'simulate': (any_getting or None) if opts.simulate is None else opts.simulate, + 'simulate': (print_only or any_getting or None) if opts.simulate is None else opts.simulate, 'skip_download': opts.skip_download, 'format': opts.format, 'allow_unplayable_formats': opts.allow_unplayable_formats, @@ -695,6 +780,7 @@ def _real_main(argv=None): 'windowsfilenames': opts.windowsfilenames, 'ignoreerrors': opts.ignoreerrors, 'force_generic_extractor': opts.force_generic_extractor, + 'allowed_extractors': opts.allowed_extractors or ['default'], 'ratelimit': opts.ratelimit, 'throttledratelimit': opts.throttledratelimit, 'overwrites': opts.overwrites, @@ -702,6 +788,7 @@ def _real_main(argv=None): 'file_access_retries': opts.file_access_retries, 'fragment_retries': opts.fragment_retries, 'extractor_retries': opts.extractor_retries, + 'retry_sleep_functions': opts.retry_sleep, 'skip_unavailable_fragments': opts.skip_unavailable_fragments, 'keep_fragments': opts.keep_fragments, 'concurrent_fragment_downloads': opts.concurrent_fragment_downloads, @@ -716,8 +803,9 @@ def _real_main(argv=None): 'playlistend': opts.playlistend, 'playlistreverse': opts.playlist_reverse, 'playlistrandom': opts.playlist_random, + 'lazy_playlist': opts.lazy_playlist, 'noplaylist': opts.noplaylist, - 'logtostderr': outtmpl_default == '-', + 'logtostderr': opts.outtmpl.get('default') == '-', 'consoletitle': opts.consoletitle, 'nopart': opts.nopart, 'updatetime': opts.updatetime, @@ -747,17 +835,18 @@ def _real_main(argv=None): 'verbose': opts.verbose, 'dump_intermediate_pages': opts.dump_intermediate_pages, 'write_pages': opts.write_pages, + 'load_pages': opts.load_pages, 'test': opts.test, 'keepvideo': opts.keepvideo, 'min_filesize': opts.min_filesize, 'max_filesize': opts.max_filesize, 'min_views': opts.min_views, 'max_views': opts.max_views, - 'daterange': date, + 'daterange': opts.date, 'cachedir': opts.cachedir, 'youtube_print_sig_code': opts.youtube_print_sig_code, 'age_limit': opts.age_limit, - 'download_archive': download_archive_fn, + 'download_archive': opts.download_archive, 'break_on_existing': opts.break_on_existing, 'break_on_reject': opts.break_on_reject, 'break_per_url': opts.break_per_url, @@ -767,6 +856,8 @@ def _real_main(argv=None): 'legacyserverconnect': opts.legacy_server_connect, 'nocheckcertificate': opts.no_check_certificate, 'prefer_insecure': opts.prefer_insecure, + 'enable_file_urls': opts.enable_file_urls, + 'http_headers': opts.headers, 'proxy': opts.proxy, 'socket_timeout': opts.socket_timeout, 'bidi_workaround': opts.bidi_workaround, @@ -794,10 +885,12 @@ def _real_main(argv=None): 'max_sleep_interval': opts.max_sleep_interval, 'sleep_interval_subtitles': opts.sleep_interval_subtitles, 'external_downloader': opts.external_downloader, + 'download_ranges': opts.download_ranges, + 'force_keyframes_at_cuts': opts.force_keyframes_at_cuts, 'list_thumbnails': opts.list_thumbnails, 'playlist_items': opts.playlist_items, 'xattr_set_filesize': opts.xattr_set_filesize, - 'match_filter': match_filter, + 'match_filter': opts.match_filter, 'no_color': opts.no_color, 'ffmpeg_location': opts.ffmpeg_location, 'hls_prefer_native': opts.hls_prefer_native, @@ -812,60 +905,91 @@ def _real_main(argv=None): 'geo_bypass_ip_block': opts.geo_bypass_ip_block, '_warnings': warnings, '_deprecation_warnings': deprecation_warnings, - 'compat_opts': compat_opts, - } + 'compat_opts': opts.compat_opts, + }) + + +def _real_main(argv=None): + setproctitle('yt-dlp') + + parser, opts, all_urls, ydl_opts = parse_options(argv) + + # Dump user agent + if opts.dump_user_agent: + ua = traverse_obj(opts.headers, 'User-Agent', casesense=False, default=std_headers['User-Agent']) + write_string(f'{ua}\n', out=sys.stdout) + return + + if print_extractor_information(opts, all_urls): + return + + # We may need ffmpeg_location without having access to the YoutubeDL instance + # See https://github.com/yt-dlp/yt-dlp/issues/2191 + if opts.ffmpeg_location: + FFmpegPostProcessor._ffmpeg_location.set(opts.ffmpeg_location) with YoutubeDL(ydl_opts) as ydl: + pre_process = opts.update_self or opts.rm_cachedir actual_use = all_urls or opts.load_info_filename - # Remove cache dir if opts.rm_cachedir: ydl.cache.remove() - # Update version - if opts.update_self: - # If updater returns True, exit. Required for windows - if run_update(ydl): - if actual_use: - sys.exit('ERROR: The program must exit for the update to complete') - sys.exit() + updater = Updater(ydl) + if opts.update_self and updater.update() and actual_use: + if updater.cmd: + return updater.restart() + # This code is reachable only for zip variant in py < 3.10 + # It makes sense to exit here, but the old behavior is to continue + ydl.report_warning('Restart yt-dlp to use the updated version') + # return 100, 'ERROR: The program must exit for the update to complete' - # Maybe do nothing if not actual_use: - if opts.update_self or opts.rm_cachedir: - sys.exit() + if pre_process: + return ydl._download_retcode ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv) parser.error( 'You must provide at least one URL.\n' 'Type yt-dlp --help to see a list of all options.') + parser.destroy() try: if opts.load_info_filename is not None: - retcode = ydl.download_with_info_file(expand_path(opts.load_info_filename)) + return ydl.download_with_info_file(expand_path(opts.load_info_filename)) else: - retcode = ydl.download(all_urls) + return ydl.download(all_urls) except DownloadCancelled: ydl.to_screen('Aborting remaining downloads') - retcode = 101 - - sys.exit(retcode) + return 101 def main(argv=None): + global _IN_CLI + _IN_CLI = True try: - _real_main(argv) + _exit(*variadic(_real_main(argv))) except DownloadError: - sys.exit(1) + _exit(1) except SameFileError as e: - sys.exit(f'ERROR: {e}') + _exit(f'ERROR: {e}') except KeyboardInterrupt: - sys.exit('\nERROR: Interrupted by user') + _exit('\nERROR: Interrupted by user') except BrokenPipeError as e: # https://docs.python.org/3/library/signal.html#note-on-sigpipe devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, sys.stdout.fileno()) - sys.exit(f'\nERROR: {e}') + _exit(f'\nERROR: {e}') + except optparse.OptParseError as e: + _exit(2, f'\n{e}') -__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors'] +from .extractor import gen_extractors, list_extractors + +__all__ = [ + 'main', + 'YoutubeDL', + 'parse_options', + 'gen_extractors', + 'list_extractors', +] diff --git a/plugins/youtube_download/yt_dlp/__main__.py b/plugins/youtube_download/yt_dlp/__main__.py index c9f4147..78701df 100644 --- a/plugins/youtube_download/yt_dlp/__main__.py +++ b/plugins/youtube_download/yt_dlp/__main__.py @@ -1,13 +1,11 @@ #!/usr/bin/env python3 -from __future__ import unicode_literals # Execute with -# $ python yt_dlp/__main__.py (2.6+) -# $ python -m yt_dlp (2.7+) +# $ python -m yt_dlp import sys -if __package__ is None and not hasattr(sys, 'frozen'): +if __package__ is None and not getattr(sys, 'frozen', False): # direct call of __main__.py import os.path path = os.path.realpath(os.path.abspath(__file__)) diff --git a/plugins/youtube_download/yt_dlp/__pyinstaller/__init__.py b/plugins/youtube_download/yt_dlp/__pyinstaller/__init__.py new file mode 100644 index 0000000..1c52aad --- /dev/null +++ b/plugins/youtube_download/yt_dlp/__pyinstaller/__init__.py @@ -0,0 +1,5 @@ +import os + + +def get_hook_dirs(): + return [os.path.dirname(__file__)] diff --git a/plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py b/plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py new file mode 100644 index 0000000..057cfef --- /dev/null +++ b/plugins/youtube_download/yt_dlp/__pyinstaller/hook-yt_dlp.py @@ -0,0 +1,57 @@ +import ast +import os +import sys +from pathlib import Path + +from PyInstaller.utils.hooks import collect_submodules + + +def find_attribute_accesses(node, name, path=()): + if isinstance(node, ast.Attribute): + path = [*path, node.attr] + if isinstance(node.value, ast.Name) and node.value.id == name: + yield path[::-1] + for child in ast.iter_child_nodes(node): + yield from find_attribute_accesses(child, name, path) + + +def collect_used_submodules(name, level): + for dirpath, _, filenames in os.walk(Path(__file__).parent.parent): + for filename in filenames: + if not filename.endswith('.py'): + continue + with open(Path(dirpath) / filename, encoding='utf8') as f: + for submodule in find_attribute_accesses(ast.parse(f.read()), name): + yield '.'.join(submodule[:level]) + + +def pycryptodome_module(): + try: + import Cryptodome # noqa: F401 + except ImportError: + try: + import Crypto # noqa: F401 + print('WARNING: Using Crypto since Cryptodome is not available. ' + 'Install with: pip install pycryptodomex', file=sys.stderr) + return 'Crypto' + except ImportError: + pass + return 'Cryptodome' + + +def get_hidden_imports(): + yield 'yt_dlp.compat._legacy' + yield from collect_submodules('websockets') + + crypto = pycryptodome_module() + for sm in set(collect_used_submodules('Cryptodome', 2)): + yield f'{crypto}.{sm}' + + # These are auto-detected, but explicitly add them just in case + yield from ('mutagen', 'brotli', 'certifi') + + +hiddenimports = list(get_hidden_imports()) +print(f'Adding imports: {hiddenimports}') + +excludedimports = ['youtube_dl', 'youtube_dlc', 'test', 'ytdlp_plugins', 'devscripts'] diff --git a/plugins/youtube_download/yt_dlp/aes.py b/plugins/youtube_download/yt_dlp/aes.py index b37f0dd..deff0a2 100644 --- a/plugins/youtube_download/yt_dlp/aes.py +++ b/plugins/youtube_download/yt_dlp/aes.py @@ -1,26 +1,18 @@ -from __future__ import unicode_literals - +import base64 from math import ceil -from .compat import ( - compat_b64decode, - compat_ord, - compat_pycrypto_AES, -) -from .utils import ( - bytes_to_intlist, - intlist_to_bytes, -) +from .compat import compat_ord +from .dependencies import Cryptodome +from .utils import bytes_to_intlist, intlist_to_bytes - -if compat_pycrypto_AES: +if Cryptodome: def aes_cbc_decrypt_bytes(data, key, iv): """ Decrypt bytes with AES-CBC using pycryptodome """ - return compat_pycrypto_AES.new(key, compat_pycrypto_AES.MODE_CBC, iv).decrypt(data) + return Cryptodome.Cipher.AES.new(key, Cryptodome.Cipher.AES.MODE_CBC, iv).decrypt(data) def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce): """ Decrypt bytes with AES-GCM using pycryptodome """ - return compat_pycrypto_AES.new(key, compat_pycrypto_AES.MODE_GCM, nonce).decrypt_and_verify(data, tag) + return Cryptodome.Cipher.AES.new(key, Cryptodome.Cipher.AES.MODE_GCM, nonce).decrypt_and_verify(data, tag) else: def aes_cbc_decrypt_bytes(data, key, iv): @@ -32,16 +24,59 @@ else: return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce)))) -def unpad_pkcs7(data): - return data[:-compat_ord(data[-1])] +def aes_cbc_encrypt_bytes(data, key, iv, **kwargs): + return intlist_to_bytes(aes_cbc_encrypt(*map(bytes_to_intlist, (data, key, iv)), **kwargs)) BLOCK_SIZE_BYTES = 16 +def unpad_pkcs7(data): + return data[:-compat_ord(data[-1])] + + +def pkcs7_padding(data): + """ + PKCS#7 padding + + @param {int[]} data cleartext + @returns {int[]} padding data + """ + + remaining_length = BLOCK_SIZE_BYTES - len(data) % BLOCK_SIZE_BYTES + return data + [remaining_length] * remaining_length + + +def pad_block(block, padding_mode): + """ + Pad a block with the given padding mode + @param {int[]} block block to pad + @param padding_mode padding mode + """ + padding_size = BLOCK_SIZE_BYTES - len(block) + + PADDING_BYTE = { + 'pkcs7': padding_size, + 'iso7816': 0x0, + 'whitespace': 0x20, + 'zero': 0x0, + } + + if padding_size < 0: + raise ValueError('Block size exceeded') + elif padding_mode not in PADDING_BYTE: + raise NotImplementedError(f'Padding mode {padding_mode} is not implemented') + + if padding_mode == 'iso7816' and padding_size: + block = block + [0x80] # NB: += mutates list + padding_size -= 1 + + return block + [PADDING_BYTE[padding_mode]] * padding_size + + def aes_ecb_encrypt(data, key, iv=None): """ - Encrypt with aes in ECB mode + Encrypt with aes in ECB mode. Using PKCS#7 padding @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @@ -54,8 +89,7 @@ def aes_ecb_encrypt(data, key, iv=None): encrypted_data = [] for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] - encrypted_data += aes_encrypt(block, expanded_key) - encrypted_data = encrypted_data[:len(data)] + encrypted_data += aes_encrypt(pkcs7_padding(block), expanded_key) return encrypted_data @@ -145,13 +179,14 @@ def aes_cbc_decrypt(data, key, iv): return decrypted_data -def aes_cbc_encrypt(data, key, iv): +def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'): """ - Encrypt with aes in CBC mode. Using PKCS#7 padding + Encrypt with aes in CBC mode @param {int[]} data cleartext @param {int[]} key 16/24/32-Byte cipher key @param {int[]} iv 16-Byte IV + @param padding_mode Padding mode to use @returns {int[]} encrypted data """ expanded_key = key_expansion(key) @@ -161,8 +196,8 @@ def aes_cbc_encrypt(data, key, iv): previous_cipher_block = iv for i in range(block_count): block = data[i * BLOCK_SIZE_BYTES: (i + 1) * BLOCK_SIZE_BYTES] - remaining_length = BLOCK_SIZE_BYTES - len(block) - block += [remaining_length] * remaining_length + block = pad_block(block, padding_mode) + mixed_block = xor(block, previous_cipher_block) encrypted_block = aes_encrypt(mixed_block, expanded_key) @@ -273,8 +308,8 @@ def aes_decrypt_text(data, password, key_size_bytes): """ NONCE_LENGTH_BYTES = 8 - data = bytes_to_intlist(compat_b64decode(data)) - password = bytes_to_intlist(password.encode('utf-8')) + data = bytes_to_intlist(base64.b64decode(data)) + password = bytes_to_intlist(password.encode()) key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) @@ -503,20 +538,30 @@ def ghash(subkey, data): last_y = [0] * BLOCK_SIZE_BYTES for i in range(0, len(data), BLOCK_SIZE_BYTES): - block = data[i : i + BLOCK_SIZE_BYTES] # noqa: E203 + block = data[i: i + BLOCK_SIZE_BYTES] last_y = block_product(xor(last_y, block), subkey) return last_y __all__ = [ - 'aes_ctr_decrypt', 'aes_cbc_decrypt', 'aes_cbc_decrypt_bytes', + 'aes_ctr_decrypt', 'aes_decrypt_text', - 'aes_encrypt', + 'aes_decrypt', + 'aes_ecb_decrypt', 'aes_gcm_decrypt_and_verify', 'aes_gcm_decrypt_and_verify_bytes', + + 'aes_cbc_encrypt', + 'aes_cbc_encrypt_bytes', + 'aes_ctr_encrypt', + 'aes_ecb_encrypt', + 'aes_encrypt', + 'key_expansion', + 'pad_block', + 'pkcs7_padding', 'unpad_pkcs7', ] diff --git a/plugins/youtube_download/yt_dlp/cache.py b/plugins/youtube_download/yt_dlp/cache.py index e5cb193..7be91ea 100644 --- a/plugins/youtube_download/yt_dlp/cache.py +++ b/plugins/youtube_download/yt_dlp/cache.py @@ -1,37 +1,31 @@ -from __future__ import unicode_literals - +import contextlib import errno -import io import json import os import re import shutil import traceback +import urllib.parse -from .compat import compat_getenv -from .utils import ( - expand_path, - write_json_file, -) +from .utils import expand_path, traverse_obj, version_tuple, write_json_file +from .version import __version__ -class Cache(object): +class Cache: def __init__(self, ydl): self._ydl = ydl def _get_root_dir(self): res = self._ydl.params.get('cachedir') if res is None: - cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache') + cache_root = os.getenv('XDG_CACHE_HOME', '~/.cache') res = os.path.join(cache_root, 'yt-dlp') return expand_path(res) def _get_cache_fn(self, section, key, dtype): - assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \ - 'invalid section %r' % section - assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key - return os.path.join( - self._get_root_dir(), section, '%s.%s' % (key, dtype)) + assert re.match(r'^[\w.-]+$', section), f'invalid section {section!r}' + key = urllib.parse.quote(key, safe='').replace('%', ',') # encode non-ascii characters + return os.path.join(self._get_root_dir(), section, f'{key}.{dtype}') @property def enabled(self): @@ -51,33 +45,37 @@ class Cache(object): if ose.errno != errno.EEXIST: raise self._ydl.write_debug(f'Saving {section}.{key} to cache') - write_json_file(data, fn) + write_json_file({'yt-dlp_version': __version__, 'data': data}, fn) except Exception: tb = traceback.format_exc() - self._ydl.report_warning( - 'Writing cache to %r failed: %s' % (fn, tb)) + self._ydl.report_warning(f'Writing cache to {fn!r} failed: {tb}') - def load(self, section, key, dtype='json', default=None): + def _validate(self, data, min_ver): + version = traverse_obj(data, 'yt-dlp_version') + if not version: # Backward compatibility + data, version = {'data': data}, '2022.08.19' + if not min_ver or version_tuple(version) >= version_tuple(min_ver): + return data['data'] + self._ydl.write_debug(f'Discarding old cache from version {version} (needs {min_ver})') + + def load(self, section, key, dtype='json', default=None, *, min_ver=None): assert dtype in ('json',) if not self.enabled: return default cache_fn = self._get_cache_fn(section, key, dtype) - try: + with contextlib.suppress(OSError): try: - with io.open(cache_fn, 'r', encoding='utf-8') as cachef: + with open(cache_fn, encoding='utf-8') as cachef: self._ydl.write_debug(f'Loading {section}.{key} from cache') - return json.load(cachef) - except ValueError: + return self._validate(json.load(cachef), min_ver) + except (ValueError, KeyError): try: file_size = os.path.getsize(cache_fn) - except (OSError, IOError) as oe: + except OSError as oe: file_size = str(oe) - self._ydl.report_warning( - 'Cache retrieval from %s failed (%s)' % (cache_fn, file_size)) - except IOError: - pass # No cache available + self._ydl.report_warning(f'Cache retrieval from {cache_fn} failed ({file_size})') return default diff --git a/plugins/youtube_download/yt_dlp/compat.py b/plugins/youtube_download/yt_dlp/compat.py deleted file mode 100644 index b97d451..0000000 --- a/plugins/youtube_download/yt_dlp/compat.py +++ /dev/null @@ -1,311 +0,0 @@ -# coding: utf-8 - -import asyncio -import base64 -import collections -import ctypes -import getpass -import html -import html.parser -import http -import http.client -import http.cookiejar -import http.cookies -import http.server -import itertools -import optparse -import os -import re -import shlex -import shutil -import socket -import struct -import subprocess -import sys -import tokenize -import urllib -import xml.etree.ElementTree as etree -from subprocess import DEVNULL - - -# HTMLParseError has been deprecated in Python 3.3 and removed in -# Python 3.5. Introducing dummy exception for Python >3.5 for compatible -# and uniform cross-version exception handling -class compat_HTMLParseError(Exception): - pass - - -# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE -# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines -def compat_ctypes_WINFUNCTYPE(*args, **kwargs): - return ctypes.WINFUNCTYPE(*args, **kwargs) - - -class _TreeBuilder(etree.TreeBuilder): - def doctype(self, name, pubid, system): - pass - - -def compat_etree_fromstring(text): - return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder())) - - -compat_os_name = os._name if os.name == 'java' else os.name - - -if compat_os_name == 'nt': - def compat_shlex_quote(s): - return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"') -else: - from shlex import quote as compat_shlex_quote - - -def compat_ord(c): - if type(c) is int: - return c - else: - return ord(c) - - -def compat_setenv(key, value, env=os.environ): - env[key] = value - - -if compat_os_name == 'nt' and sys.version_info < (3, 8): - # os.path.realpath on Windows does not follow symbolic links - # prior to Python 3.8 (see https://bugs.python.org/issue9949) - def compat_realpath(path): - while os.path.islink(path): - path = os.path.abspath(os.readlink(path)) - return path -else: - compat_realpath = os.path.realpath - - -def compat_print(s): - assert isinstance(s, compat_str) - print(s) - - -# Fix https://github.com/ytdl-org/youtube-dl/issues/4223 -# See http://bugs.python.org/issue9161 for what is broken -def workaround_optparse_bug9161(): - op = optparse.OptionParser() - og = optparse.OptionGroup(op, 'foo') - try: - og.add_option('-t') - except TypeError: - real_add_option = optparse.OptionGroup.add_option - - def _compat_add_option(self, *args, **kwargs): - enc = lambda v: ( - v.encode('ascii', 'replace') if isinstance(v, compat_str) - else v) - bargs = [enc(a) for a in args] - bkwargs = dict( - (k, enc(v)) for k, v in kwargs.items()) - return real_add_option(self, *bargs, **bkwargs) - optparse.OptionGroup.add_option = _compat_add_option - - -try: - compat_Pattern = re.Pattern -except AttributeError: - compat_Pattern = type(re.compile('')) - - -try: - compat_Match = re.Match -except AttributeError: - compat_Match = type(re.compile('').match('')) - - -try: - compat_asyncio_run = asyncio.run # >= 3.7 -except AttributeError: - def compat_asyncio_run(coro): - try: - loop = asyncio.get_event_loop() - except RuntimeError: - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop.run_until_complete(coro) - - asyncio.run = compat_asyncio_run - - -# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl -# See https://github.com/yt-dlp/yt-dlp/issues/792 -# https://docs.python.org/3/library/os.path.html#os.path.expanduser -if compat_os_name in ('nt', 'ce') and 'HOME' in os.environ: - _userhome = os.environ['HOME'] - - def compat_expanduser(path): - if not path.startswith('~'): - return path - i = path.replace('\\', '/', 1).find('/') # ~user - if i < 0: - i = len(path) - userhome = os.path.join(os.path.dirname(_userhome), path[1:i]) if i > 1 else _userhome - return userhome + path[i:] -else: - compat_expanduser = os.path.expanduser - - -try: - from Cryptodome.Cipher import AES as compat_pycrypto_AES -except ImportError: - try: - from Crypto.Cipher import AES as compat_pycrypto_AES - except ImportError: - compat_pycrypto_AES = None - - -WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None - - -def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075 - if compat_os_name != 'nt': - return - global WINDOWS_VT_MODE - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - try: - subprocess.Popen('', shell=True, startupinfo=startupinfo) - WINDOWS_VT_MODE = True - except Exception: - pass - - -# Deprecated - -compat_basestring = str -compat_chr = chr -compat_filter = filter -compat_input = input -compat_integer_types = (int, ) -compat_kwargs = lambda kwargs: kwargs -compat_map = map -compat_numeric_types = (int, float, complex) -compat_str = str -compat_xpath = lambda xpath: xpath -compat_zip = zip - -compat_collections_abc = collections.abc -compat_HTMLParser = html.parser.HTMLParser -compat_HTTPError = urllib.error.HTTPError -compat_Struct = struct.Struct -compat_b64decode = base64.b64decode -compat_cookiejar = http.cookiejar -compat_cookiejar_Cookie = compat_cookiejar.Cookie -compat_cookies = http.cookies -compat_cookies_SimpleCookie = compat_cookies.SimpleCookie -compat_etree_Element = etree.Element -compat_etree_register_namespace = etree.register_namespace -compat_get_terminal_size = shutil.get_terminal_size -compat_getenv = os.getenv -compat_getpass = getpass.getpass -compat_html_entities = html.entities -compat_html_entities_html5 = compat_html_entities.html5 -compat_http_client = http.client -compat_http_server = http.server -compat_itertools_count = itertools.count -compat_parse_qs = urllib.parse.parse_qs -compat_shlex_split = shlex.split -compat_socket_create_connection = socket.create_connection -compat_struct_pack = struct.pack -compat_struct_unpack = struct.unpack -compat_subprocess_get_DEVNULL = lambda: DEVNULL -compat_tokenize_tokenize = tokenize.tokenize -compat_urllib_error = urllib.error -compat_urllib_parse = urllib.parse -compat_urllib_parse_quote = urllib.parse.quote -compat_urllib_parse_quote_plus = urllib.parse.quote_plus -compat_urllib_parse_unquote = urllib.parse.unquote -compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus -compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes -compat_urllib_parse_urlencode = urllib.parse.urlencode -compat_urllib_parse_urlparse = urllib.parse.urlparse -compat_urllib_parse_urlunparse = urllib.parse.urlunparse -compat_urllib_request = urllib.request -compat_urllib_request_DataHandler = urllib.request.DataHandler -compat_urllib_response = urllib.response -compat_urlparse = urllib.parse -compat_urlretrieve = urllib.request.urlretrieve -compat_xml_parse_error = etree.ParseError - - -# Set public objects - -__all__ = [ - 'WINDOWS_VT_MODE', - 'compat_HTMLParseError', - 'compat_HTMLParser', - 'compat_HTTPError', - 'compat_Match', - 'compat_Pattern', - 'compat_Struct', - 'compat_asyncio_run', - 'compat_b64decode', - 'compat_basestring', - 'compat_chr', - 'compat_collections_abc', - 'compat_cookiejar', - 'compat_cookiejar_Cookie', - 'compat_cookies', - 'compat_cookies_SimpleCookie', - 'compat_ctypes_WINFUNCTYPE', - 'compat_etree_Element', - 'compat_etree_fromstring', - 'compat_etree_register_namespace', - 'compat_expanduser', - 'compat_filter', - 'compat_get_terminal_size', - 'compat_getenv', - 'compat_getpass', - 'compat_html_entities', - 'compat_html_entities_html5', - 'compat_http_client', - 'compat_http_server', - 'compat_input', - 'compat_integer_types', - 'compat_itertools_count', - 'compat_kwargs', - 'compat_map', - 'compat_numeric_types', - 'compat_ord', - 'compat_os_name', - 'compat_parse_qs', - 'compat_print', - 'compat_pycrypto_AES', - 'compat_realpath', - 'compat_setenv', - 'compat_shlex_quote', - 'compat_shlex_split', - 'compat_socket_create_connection', - 'compat_str', - 'compat_struct_pack', - 'compat_struct_unpack', - 'compat_subprocess_get_DEVNULL', - 'compat_tokenize_tokenize', - 'compat_urllib_error', - 'compat_urllib_parse', - 'compat_urllib_parse_quote', - 'compat_urllib_parse_quote_plus', - 'compat_urllib_parse_unquote', - 'compat_urllib_parse_unquote_plus', - 'compat_urllib_parse_unquote_to_bytes', - 'compat_urllib_parse_urlencode', - 'compat_urllib_parse_urlparse', - 'compat_urllib_parse_urlunparse', - 'compat_urllib_request', - 'compat_urllib_request_DataHandler', - 'compat_urllib_response', - 'compat_urlparse', - 'compat_urlretrieve', - 'compat_xml_parse_error', - 'compat_xpath', - 'compat_zip', - 'windows_enable_vt_mode', - 'workaround_optparse_bug9161', -] diff --git a/plugins/youtube_download/yt_dlp/compat/__init__.py b/plugins/youtube_download/yt_dlp/compat/__init__.py new file mode 100644 index 0000000..c6c0254 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/compat/__init__.py @@ -0,0 +1,72 @@ +import os +import sys +import warnings +import xml.etree.ElementTree as etree + +from ._deprecated import * # noqa: F401, F403 +from .compat_utils import passthrough_module + +# XXX: Implement this the same way as other DeprecationWarnings without circular import +passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn( + DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=5)) + + +# HTMLParseError has been deprecated in Python 3.3 and removed in +# Python 3.5. Introducing dummy exception for Python >3.5 for compatible +# and uniform cross-version exception handling +class compat_HTMLParseError(ValueError): + pass + + +class _TreeBuilder(etree.TreeBuilder): + def doctype(self, name, pubid, system): + pass + + +def compat_etree_fromstring(text): + return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder())) + + +compat_os_name = os._name if os.name == 'java' else os.name + + +if compat_os_name == 'nt': + def compat_shlex_quote(s): + import re + return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"') +else: + from shlex import quote as compat_shlex_quote # noqa: F401 + + +def compat_ord(c): + return c if isinstance(c, int) else ord(c) + + +if compat_os_name == 'nt' and sys.version_info < (3, 8): + # os.path.realpath on Windows does not follow symbolic links + # prior to Python 3.8 (see https://bugs.python.org/issue9949) + def compat_realpath(path): + while os.path.islink(path): + path = os.path.abspath(os.readlink(path)) + return os.path.realpath(path) +else: + compat_realpath = os.path.realpath + + +# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl +# See https://github.com/yt-dlp/yt-dlp/issues/792 +# https://docs.python.org/3/library/os.path.html#os.path.expanduser +if compat_os_name in ('nt', 'ce'): + def compat_expanduser(path): + HOME = os.environ.get('HOME') + if not HOME: + return os.path.expanduser(path) + elif not path.startswith('~'): + return path + i = path.replace('\\', '/', 1).find('/') # ~user + if i < 0: + i = len(path) + userhome = os.path.join(os.path.dirname(HOME), path[1:i]) if i > 1 else HOME + return userhome + path[i:] +else: + compat_expanduser = os.path.expanduser diff --git a/plugins/youtube_download/yt_dlp/compat/_deprecated.py b/plugins/youtube_download/yt_dlp/compat/_deprecated.py new file mode 100644 index 0000000..342f1f8 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/compat/_deprecated.py @@ -0,0 +1,16 @@ +"""Deprecated - New code should avoid these""" + +import base64 +import urllib.error +import urllib.parse + +compat_str = str + +compat_b64decode = base64.b64decode + +compat_HTTPError = urllib.error.HTTPError +compat_urlparse = urllib.parse +compat_parse_qs = urllib.parse.parse_qs +compat_urllib_parse_unquote = urllib.parse.unquote +compat_urllib_parse_urlencode = urllib.parse.urlencode +compat_urllib_parse_urlparse = urllib.parse.urlparse diff --git a/plugins/youtube_download/yt_dlp/compat/_legacy.py b/plugins/youtube_download/yt_dlp/compat/_legacy.py new file mode 100644 index 0000000..d19333d --- /dev/null +++ b/plugins/youtube_download/yt_dlp/compat/_legacy.py @@ -0,0 +1,97 @@ +""" Do not use! """ + +import collections +import ctypes +import getpass +import html.entities +import html.parser +import http.client +import http.cookiejar +import http.cookies +import http.server +import itertools +import os +import shlex +import shutil +import socket +import struct +import tokenize +import urllib.error +import urllib.parse +import urllib.request +import xml.etree.ElementTree as etree +from subprocess import DEVNULL + +# isort: split +import asyncio # noqa: F401 +import re # noqa: F401 +from asyncio import run as compat_asyncio_run # noqa: F401 +from re import Pattern as compat_Pattern # noqa: F401 +from re import match as compat_Match # noqa: F401 + +from .compat_utils import passthrough_module +from ..dependencies import Cryptodome_AES as compat_pycrypto_AES # noqa: F401 +from ..dependencies import brotli as compat_brotli # noqa: F401 +from ..dependencies import websockets as compat_websockets # noqa: F401 + +passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode')) + + +# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE +# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines +def compat_ctypes_WINFUNCTYPE(*args, **kwargs): + return ctypes.WINFUNCTYPE(*args, **kwargs) + + +def compat_setenv(key, value, env=os.environ): + env[key] = value + + +compat_basestring = str +compat_casefold = str.casefold +compat_chr = chr +compat_collections_abc = collections.abc +compat_cookiejar = http.cookiejar +compat_cookiejar_Cookie = http.cookiejar.Cookie +compat_cookies = http.cookies +compat_cookies_SimpleCookie = http.cookies.SimpleCookie +compat_etree_Element = etree.Element +compat_etree_register_namespace = etree.register_namespace +compat_filter = filter +compat_get_terminal_size = shutil.get_terminal_size +compat_getenv = os.getenv +compat_getpass = getpass.getpass +compat_html_entities = html.entities +compat_html_entities_html5 = html.entities.html5 +compat_HTMLParser = html.parser.HTMLParser +compat_http_client = http.client +compat_http_server = http.server +compat_input = input +compat_integer_types = (int, ) +compat_itertools_count = itertools.count +compat_kwargs = lambda kwargs: kwargs +compat_map = map +compat_numeric_types = (int, float, complex) +compat_print = print +compat_shlex_split = shlex.split +compat_socket_create_connection = socket.create_connection +compat_Struct = struct.Struct +compat_struct_pack = struct.pack +compat_struct_unpack = struct.unpack +compat_subprocess_get_DEVNULL = lambda: DEVNULL +compat_tokenize_tokenize = tokenize.tokenize +compat_urllib_error = urllib.error +compat_urllib_parse = urllib.parse +compat_urllib_parse_quote = urllib.parse.quote +compat_urllib_parse_quote_plus = urllib.parse.quote_plus +compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus +compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes +compat_urllib_parse_urlunparse = urllib.parse.urlunparse +compat_urllib_request = urllib.request +compat_urllib_request_DataHandler = urllib.request.DataHandler +compat_urllib_response = urllib.response +compat_urlretrieve = urllib.request.urlretrieve +compat_xml_parse_error = etree.ParseError +compat_xpath = lambda xpath: xpath +compat_zip = zip +workaround_optparse_bug9161 = lambda: None diff --git a/plugins/youtube_download/yt_dlp/compat/compat_utils.py b/plugins/youtube_download/yt_dlp/compat/compat_utils.py new file mode 100644 index 0000000..8956b3b --- /dev/null +++ b/plugins/youtube_download/yt_dlp/compat/compat_utils.py @@ -0,0 +1,83 @@ +import collections +import contextlib +import functools +import importlib +import sys +import types + +_NO_ATTRIBUTE = object() + +_Package = collections.namedtuple('Package', ('name', 'version')) + + +def get_package_info(module): + return _Package( + name=getattr(module, '_yt_dlp__identifier', module.__name__), + version=str(next(filter(None, ( + getattr(module, attr, None) + for attr in ('__version__', 'version_string', 'version') + )), None))) + + +def _is_package(module): + return '__path__' in vars(module) + + +def _is_dunder(name): + return name.startswith('__') and name.endswith('__') + + +class EnhancedModule(types.ModuleType): + def __bool__(self): + return vars(self).get('__bool__', lambda: True)() + + def __getattribute__(self, attr): + try: + ret = super().__getattribute__(attr) + except AttributeError: + if _is_dunder(attr): + raise + getter = getattr(self, '__getattr__', None) + if not getter: + raise + ret = getter(attr) + return ret.fget() if isinstance(ret, property) else ret + + +def passthrough_module(parent, child, allowed_attributes=(..., ), *, callback=lambda _: None): + """Passthrough parent module into a child module, creating the parent if necessary""" + def __getattr__(attr): + if _is_package(parent): + with contextlib.suppress(ImportError): + return importlib.import_module(f'.{attr}', parent.__name__) + + ret = from_child(attr) + if ret is _NO_ATTRIBUTE: + raise AttributeError(f'module {parent.__name__} has no attribute {attr}') + callback(attr) + return ret + + @functools.lru_cache(maxsize=None) + def from_child(attr): + nonlocal child + if attr not in allowed_attributes: + if ... not in allowed_attributes or _is_dunder(attr): + return _NO_ATTRIBUTE + + if isinstance(child, str): + child = importlib.import_module(child, parent.__name__) + + if _is_package(child): + with contextlib.suppress(ImportError): + return passthrough_module(f'{parent.__name__}.{attr}', + importlib.import_module(f'.{attr}', child.__name__)) + + with contextlib.suppress(AttributeError): + return getattr(child, attr) + + return _NO_ATTRIBUTE + + parent = sys.modules.get(parent, types.ModuleType(parent)) + parent.__class__ = EnhancedModule + parent.__getattr__ = __getattr__ + return parent diff --git a/plugins/youtube_download/yt_dlp/compat/functools.py b/plugins/youtube_download/yt_dlp/compat/functools.py new file mode 100644 index 0000000..ec003ea --- /dev/null +++ b/plugins/youtube_download/yt_dlp/compat/functools.py @@ -0,0 +1,26 @@ +# flake8: noqa: F405 +from functools import * # noqa: F403 + +from .compat_utils import passthrough_module + +passthrough_module(__name__, 'functools') +del passthrough_module + +try: + cache # >= 3.9 +except NameError: + cache = lru_cache(maxsize=None) + +try: + cached_property # >= 3.8 +except NameError: + class cached_property: + def __init__(self, func): + update_wrapper(self, func) + self.func = func + + def __get__(self, instance, _): + if instance is None: + return self + setattr(instance, self.func.__name__, self.func(instance)) + return getattr(instance, self.func.__name__) diff --git a/plugins/youtube_download/yt_dlp/compat/imghdr.py b/plugins/youtube_download/yt_dlp/compat/imghdr.py new file mode 100644 index 0000000..5d64ab0 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/compat/imghdr.py @@ -0,0 +1,16 @@ +tests = { + 'webp': lambda h: h[0:4] == b'RIFF' and h[8:] == b'WEBP', + 'png': lambda h: h[:8] == b'\211PNG\r\n\032\n', + 'jpeg': lambda h: h[6:10] in (b'JFIF', b'Exif'), + 'gif': lambda h: h[:6] in (b'GIF87a', b'GIF89a'), +} + + +def what(file=None, h=None): + """Detect format of image (Currently supports jpeg, png, webp, gif only) + Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py + """ + if h is None: + with open(file, 'rb') as f: + h = f.read(12) + return next((type_ for type_, test in tests.items() if test(h)), None) diff --git a/plugins/youtube_download/yt_dlp/compat/shutil.py b/plugins/youtube_download/yt_dlp/compat/shutil.py new file mode 100644 index 0000000..23239d5 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/compat/shutil.py @@ -0,0 +1,30 @@ +# flake8: noqa: F405 +from shutil import * # noqa: F403 + +from .compat_utils import passthrough_module + +passthrough_module(__name__, 'shutil') +del passthrough_module + + +import sys + +if sys.platform.startswith('freebsd'): + import errno + import os + import shutil + + # Workaround for PermissionError when using restricted ACL mode on FreeBSD + def copy2(src, dst, *args, **kwargs): + if os.path.isdir(dst): + dst = os.path.join(dst, os.path.basename(src)) + shutil.copyfile(src, dst, *args, **kwargs) + try: + shutil.copystat(src, dst, *args, **kwargs) + except PermissionError as e: + if e.errno != getattr(errno, 'EPERM', None): + raise + return dst + + def move(*args, copy_function=copy2, **kwargs): + return shutil.move(*args, copy_function=copy_function, **kwargs) diff --git a/plugins/youtube_download/yt_dlp/cookies.py b/plugins/youtube_download/yt_dlp/cookies.py index fc033a8..8ca7cea 100644 --- a/plugins/youtube_download/yt_dlp/cookies.py +++ b/plugins/youtube_download/yt_dlp/cookies.py @@ -1,12 +1,16 @@ +import base64 import contextlib -import ctypes +import http.cookiejar +import http.cookies import json import os +import re import shutil import struct import subprocess import sys import tempfile +import time from datetime import datetime, timedelta, timezone from enum import Enum, auto from hashlib import pbkdf2_hmac @@ -16,38 +20,21 @@ from .aes import ( aes_gcm_decrypt_and_verify_bytes, unpad_pkcs7, ) -from .compat import ( - compat_b64decode, - compat_cookiejar_Cookie, +from .dependencies import ( + _SECRETSTORAGE_UNAVAILABLE_REASON, + secretstorage, + sqlite3, ) +from .minicurses import MultilinePrinter, QuietMultilinePrinter from .utils import ( - expand_path, Popen, YoutubeDLCookieJar, + error_to_str, + expand_path, + is_path_like, + try_call, ) -try: - import sqlite3 - SQLITE_AVAILABLE = True -except ImportError: - # although sqlite3 is part of the standard library, it is possible to compile python without - # sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544 - SQLITE_AVAILABLE = False - - -try: - import secretstorage - SECRETSTORAGE_AVAILABLE = True -except ImportError: - SECRETSTORAGE_AVAILABLE = False - SECRETSTORAGE_UNAVAILABLE_REASON = ( - 'as the `secretstorage` module is not installed. ' - 'Please install by running `python3 -m pip install secretstorage`.') -except Exception as _err: - SECRETSTORAGE_AVAILABLE = False - SECRETSTORAGE_UNAVAILABLE_REASON = f'as the `secretstorage` module could not be initialized. {_err}' - - CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'} SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'} @@ -72,37 +59,72 @@ class YDLLogger: if self._ydl: self._ydl.report_error(message) + class ProgressBar(MultilinePrinter): + _DELAY, _timer = 0.1, 0 + + def print(self, message): + if time.time() - self._timer > self._DELAY: + self.print_at_line(f'[Cookies] {message}', 0) + self._timer = time.time() + + def progress_bar(self): + """Return a context manager with a print method. (Optional)""" + # Do not print to files/pipes, loggers, or when --no-progress is used + if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'): + return + file = self._ydl._out_files.error + try: + if not file.isatty(): + return + except BaseException: + return + return self.ProgressBar(file, preserve_output=False) + + +def _create_progress_bar(logger): + if hasattr(logger, 'progress_bar'): + printer = logger.progress_bar() + if printer: + return printer + printer = QuietMultilinePrinter() + printer.print = lambda _: None + return printer + def load_cookies(cookie_file, browser_specification, ydl): cookie_jars = [] if browser_specification is not None: - browser_name, profile, keyring = _parse_browser_specification(*browser_specification) - cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring)) + browser_name, profile, keyring, container = _parse_browser_specification(*browser_specification) + cookie_jars.append( + extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring, container=container)) if cookie_file is not None: - cookie_file = expand_path(cookie_file) + is_filename = is_path_like(cookie_file) + if is_filename: + cookie_file = expand_path(cookie_file) + jar = YoutubeDLCookieJar(cookie_file) - if os.access(cookie_file, os.R_OK): + if not is_filename or os.access(cookie_file, os.R_OK): jar.load(ignore_discard=True, ignore_expires=True) cookie_jars.append(jar) return _merge_cookie_jars(cookie_jars) -def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None): +def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None, container=None): if browser_name == 'firefox': - return _extract_firefox_cookies(profile, logger) + return _extract_firefox_cookies(profile, container, logger) elif browser_name == 'safari': return _extract_safari_cookies(profile, logger) elif browser_name in CHROMIUM_BASED_BROWSERS: return _extract_chrome_cookies(browser_name, profile, keyring, logger) else: - raise ValueError('unknown browser: {}'.format(browser_name)) + raise ValueError(f'unknown browser: {browser_name}') -def _extract_firefox_cookies(profile, logger): +def _extract_firefox_cookies(profile, container, logger): logger.info('Extracting cookies from firefox') - if not SQLITE_AVAILABLE: + if not sqlite3: logger.warning('Cannot extract cookies from firefox without sqlite3 support. ' 'Please use a python interpreter compiled with sqlite3 support') return YoutubeDLCookieJar() @@ -114,25 +136,54 @@ def _extract_firefox_cookies(profile, logger): else: search_root = os.path.join(_firefox_browser_dir(), profile) - cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite') + cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger) if cookie_database_path is None: - raise FileNotFoundError('could not find firefox cookies database in {}'.format(search_root)) - logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path)) + raise FileNotFoundError(f'could not find firefox cookies database in {search_root}') + logger.debug(f'Extracting cookies from: "{cookie_database_path}"') + + container_id = None + if container not in (None, 'none'): + containers_path = os.path.join(os.path.dirname(cookie_database_path), 'containers.json') + if not os.path.isfile(containers_path) or not os.access(containers_path, os.R_OK): + raise FileNotFoundError(f'could not read containers.json in {search_root}') + with open(containers_path) as containers: + identities = json.load(containers).get('identities', []) + container_id = next((context.get('userContextId') for context in identities if container in ( + context.get('name'), + try_call(lambda: re.fullmatch(r'userContext([^\.]+)\.label', context['l10nID']).group()) + )), None) + if not isinstance(container_id, int): + raise ValueError(f'could not find firefox container "{container}" in containers.json') with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir: cursor = None try: cursor = _open_database_copy(cookie_database_path, tmpdir) - cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies') + if isinstance(container_id, int): + logger.debug( + f'Only loading cookies from firefox container "{container}", ID {container_id}') + cursor.execute( + 'SELECT host, name, value, path, expiry, isSecure FROM moz_cookies WHERE originAttributes LIKE ? OR originAttributes LIKE ?', + (f'%userContextId={container_id}', f'%userContextId={container_id}&%')) + elif container == 'none': + logger.debug('Only loading cookies not belonging to any container') + cursor.execute( + 'SELECT host, name, value, path, expiry, isSecure FROM moz_cookies WHERE NOT INSTR(originAttributes,"userContextId=")') + else: + cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies') jar = YoutubeDLCookieJar() - for host, name, value, path, expiry, is_secure in cursor.fetchall(): - cookie = compat_cookiejar_Cookie( - version=0, name=name, value=value, port=None, port_specified=False, - domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'), - path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False, - comment=None, comment_url=None, rest={}) - jar.set_cookie(cookie) - logger.info('Extracted {} cookies from firefox'.format(len(jar))) + with _create_progress_bar(logger) as progress_bar: + table = cursor.fetchall() + total_cookie_count = len(table) + for i, (host, name, value, path, expiry, is_secure) in enumerate(table): + progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') + cookie = http.cookiejar.Cookie( + version=0, name=name, value=value, port=None, port_specified=False, + domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'), + path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False, + comment=None, comment_url=None, rest={}) + jar.set_cookie(cookie) + logger.info(f'Extracted {len(jar)} cookies from firefox') return jar finally: if cursor is not None: @@ -140,39 +191,25 @@ def _extract_firefox_cookies(profile, logger): def _firefox_browser_dir(): - if sys.platform in ('linux', 'linux2'): - return os.path.expanduser('~/.mozilla/firefox') - elif sys.platform == 'win32': - return os.path.expandvars(r'%APPDATA%\Mozilla\Firefox\Profiles') + if sys.platform in ('cygwin', 'win32'): + return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles') elif sys.platform == 'darwin': return os.path.expanduser('~/Library/Application Support/Firefox') - else: - raise ValueError('unsupported platform: {}'.format(sys.platform)) + return os.path.expanduser('~/.mozilla/firefox') def _get_chromium_based_browser_settings(browser_name): # https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md - if sys.platform in ('linux', 'linux2'): - config = _config_home() - browser_dir = { - 'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'), - 'chrome': os.path.join(config, 'google-chrome'), - 'chromium': os.path.join(config, 'chromium'), - 'edge': os.path.join(config, 'microsoft-edge'), - 'opera': os.path.join(config, 'opera'), - 'vivaldi': os.path.join(config, 'vivaldi'), - }[browser_name] - - elif sys.platform == 'win32': + if sys.platform in ('cygwin', 'win32'): appdata_local = os.path.expandvars('%LOCALAPPDATA%') appdata_roaming = os.path.expandvars('%APPDATA%') browser_dir = { - 'brave': os.path.join(appdata_local, r'BraveSoftware\Brave-Browser\User Data'), - 'chrome': os.path.join(appdata_local, r'Google\Chrome\User Data'), - 'chromium': os.path.join(appdata_local, r'Chromium\User Data'), - 'edge': os.path.join(appdata_local, r'Microsoft\Edge\User Data'), - 'opera': os.path.join(appdata_roaming, r'Opera Software\Opera Stable'), - 'vivaldi': os.path.join(appdata_local, r'Vivaldi\User Data'), + 'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'), + 'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'), + 'chromium': os.path.join(appdata_local, R'Chromium\User Data'), + 'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'), + 'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'), + 'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'), }[browser_name] elif sys.platform == 'darwin': @@ -187,7 +224,15 @@ def _get_chromium_based_browser_settings(browser_name): }[browser_name] else: - raise ValueError('unsupported platform: {}'.format(sys.platform)) + config = _config_home() + browser_dir = { + 'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'), + 'chrome': os.path.join(config, 'google-chrome'), + 'chromium': os.path.join(config, 'chromium'), + 'edge': os.path.join(config, 'microsoft-edge'), + 'opera': os.path.join(config, 'opera'), + 'vivaldi': os.path.join(config, 'vivaldi'), + }[browser_name] # Linux keyring names can be determined by snooping on dbus while opening the browser in KDE: # dbus-monitor "interface='org.kde.KWallet'" "type=method_return" @@ -210,11 +255,11 @@ def _get_chromium_based_browser_settings(browser_name): def _extract_chrome_cookies(browser_name, profile, keyring, logger): - logger.info('Extracting cookies from {}'.format(browser_name)) + logger.info(f'Extracting cookies from {browser_name}') - if not SQLITE_AVAILABLE: - logger.warning(('Cannot extract cookies from {} without sqlite3 support. ' - 'Please use a python interpreter compiled with sqlite3 support').format(browser_name)) + if not sqlite3: + logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. ' + 'Please use a python interpreter compiled with sqlite3 support') return YoutubeDLCookieJar() config = _get_chromium_based_browser_settings(browser_name) @@ -228,13 +273,13 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger): if config['supports_profiles']: search_root = os.path.join(config['browser_dir'], profile) else: - logger.error('{} does not support profiles'.format(browser_name)) + logger.error(f'{browser_name} does not support profiles') search_root = config['browser_dir'] - cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies') + cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger) if cookie_database_path is None: - raise FileNotFoundError('could not find {} cookies database in "{}"'.format(browser_name, search_root)) - logger.debug('Extracting cookies from: "{}"'.format(cookie_database_path)) + raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"') + logger.debug(f'Extracting cookies from: "{cookie_database_path}"') decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring) @@ -245,45 +290,55 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger): cursor.connection.text_factory = bytes column_names = _get_column_names(cursor, 'cookies') secure_column = 'is_secure' if 'is_secure' in column_names else 'secure' - cursor.execute('SELECT host_key, name, value, encrypted_value, path, ' - 'expires_utc, {} FROM cookies'.format(secure_column)) + cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies') jar = YoutubeDLCookieJar() failed_cookies = 0 unencrypted_cookies = 0 - for host_key, name, value, encrypted_value, path, expires_utc, is_secure in cursor.fetchall(): - host_key = host_key.decode('utf-8') - name = name.decode('utf-8') - value = value.decode('utf-8') - path = path.decode('utf-8') - - if not value and encrypted_value: - value = decryptor.decrypt(encrypted_value) - if value is None: + with _create_progress_bar(logger) as progress_bar: + table = cursor.fetchall() + total_cookie_count = len(table) + for i, line in enumerate(table): + progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') + is_encrypted, cookie = _process_chrome_cookie(decryptor, *line) + if not cookie: failed_cookies += 1 continue - else: - unencrypted_cookies += 1 - - cookie = compat_cookiejar_Cookie( - version=0, name=name, value=value, port=None, port_specified=False, - domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'), - path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False, - comment=None, comment_url=None, rest={}) - jar.set_cookie(cookie) + elif not is_encrypted: + unencrypted_cookies += 1 + jar.set_cookie(cookie) if failed_cookies > 0: - failed_message = ' ({} could not be decrypted)'.format(failed_cookies) + failed_message = f' ({failed_cookies} could not be decrypted)' else: failed_message = '' - logger.info('Extracted {} cookies from {}{}'.format(len(jar), browser_name, failed_message)) - counts = decryptor.cookie_counts.copy() + logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}') + counts = decryptor._cookie_counts.copy() counts['unencrypted'] = unencrypted_cookies - logger.debug('cookie version breakdown: {}'.format(counts)) + logger.debug(f'cookie version breakdown: {counts}') return jar finally: if cursor is not None: cursor.connection.close() +def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure): + host_key = host_key.decode() + name = name.decode() + value = value.decode() + path = path.decode() + is_encrypted = not value and encrypted_value + + if is_encrypted: + value = decryptor.decrypt(encrypted_value) + if value is None: + return is_encrypted, None + + return is_encrypted, http.cookiejar.Cookie( + version=0, name=name, value=value, port=None, port_specified=False, + domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'), + path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False, + comment=None, comment_url=None, rest={}) + + class ChromeCookieDecryptor: """ Overview: @@ -310,24 +365,18 @@ class ChromeCookieDecryptor: - KeyStorageLinux::CreateService """ - def decrypt(self, encrypted_value): - raise NotImplementedError + _cookie_counts = {} - @property - def cookie_counts(self): - raise NotImplementedError + def decrypt(self, encrypted_value): + raise NotImplementedError('Must be implemented by sub classes') def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None): - if sys.platform in ('linux', 'linux2'): - return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring) - elif sys.platform == 'darwin': + if sys.platform == 'darwin': return MacChromeCookieDecryptor(browser_keyring_name, logger) - elif sys.platform == 'win32': + elif sys.platform in ('win32', 'cygwin'): return WindowsChromeCookieDecryptor(browser_root, logger) - else: - raise NotImplementedError('Chrome cookie decryption is not supported ' - 'on this platform: {}'.format(sys.platform)) + return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring) class LinuxChromeCookieDecryptor(ChromeCookieDecryptor): @@ -344,10 +393,6 @@ class LinuxChromeCookieDecryptor(ChromeCookieDecryptor): # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1, key_length=16) - @property - def cookie_counts(self): - return self._cookie_counts - def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] @@ -381,10 +426,6 @@ class MacChromeCookieDecryptor(ChromeCookieDecryptor): # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm return pbkdf2_sha1(password, salt=b'saltysalt', iterations=1003, key_length=16) - @property - def cookie_counts(self): - return self._cookie_counts - def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] @@ -410,10 +451,6 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor): self._v10_key = _get_windows_v10_key(browser_root, logger) self._cookie_counts = {'v10': 0, 'other': 0} - @property - def cookie_counts(self): - return self._cookie_counts - def decrypt(self, encrypted_value): version = encrypted_value[:3] ciphertext = encrypted_value[3:] @@ -442,25 +479,28 @@ class WindowsChromeCookieDecryptor(ChromeCookieDecryptor): self._cookie_counts['other'] += 1 # any other prefix means the data is DPAPI encrypted # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc - return _decrypt_windows_dpapi(encrypted_value, self._logger).decode('utf-8') + return _decrypt_windows_dpapi(encrypted_value, self._logger).decode() def _extract_safari_cookies(profile, logger): if profile is not None: logger.error('safari does not support profiles') if sys.platform != 'darwin': - raise ValueError('unsupported platform: {}'.format(sys.platform)) + raise ValueError(f'unsupported platform: {sys.platform}') cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies') if not os.path.isfile(cookies_path): - raise FileNotFoundError('could not find safari cookies database') + logger.debug('Trying secondary cookie location') + cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies') + if not os.path.isfile(cookies_path): + raise FileNotFoundError('could not find safari cookies database') with open(cookies_path, 'rb') as f: cookies_data = f.read() jar = parse_safari_cookies(cookies_data, logger=logger) - logger.info('Extracted {} cookies from safari'.format(len(jar))) + logger.info(f'Extracted {len(jar)} cookies from safari') return jar @@ -476,7 +516,7 @@ class DataParser: def read_bytes(self, num_bytes): if num_bytes < 0: - raise ParserError('invalid read of {} bytes'.format(num_bytes)) + raise ParserError(f'invalid read of {num_bytes} bytes') end = self.cursor + num_bytes if end > len(self._data): raise ParserError('reached end of input') @@ -487,7 +527,7 @@ class DataParser: def expect_bytes(self, expected_value, message): value = self.read_bytes(len(expected_value)) if value != expected_value: - raise ParserError('unexpected value: {} != {} ({})'.format(value, expected_value, message)) + raise ParserError(f'unexpected value: {value} != {expected_value} ({message})') def read_uint(self, big_endian=False): data_format = '>I' if big_endian else ' 0: - self._logger.debug('skipping {} bytes ({}): {}'.format( - num_bytes, description, self.read_bytes(num_bytes))) + self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}') elif num_bytes < 0: - raise ParserError('invalid skip of {} bytes'.format(num_bytes)) + raise ParserError(f'invalid skip of {num_bytes} bytes') def skip_to(self, offset, description='unknown'): self.skip(offset - self.cursor, description) @@ -538,15 +577,17 @@ def _parse_safari_cookies_page(data, jar, logger): number_of_cookies = p.read_uint() record_offsets = [p.read_uint() for _ in range(number_of_cookies)] if number_of_cookies == 0: - logger.debug('a cookies page of size {} has no cookies'.format(len(data))) + logger.debug(f'a cookies page of size {len(data)} has no cookies') return p.skip_to(record_offsets[0], 'unknown page header field') - for record_offset in record_offsets: - p.skip_to(record_offset, 'space between records') - record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger) - p.read_bytes(record_length) + with _create_progress_bar(logger) as progress_bar: + for i, record_offset in enumerate(record_offsets): + progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}') + p.skip_to(record_offset, 'space between records') + record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger) + p.read_bytes(record_length) p.skip_to_end('space in between pages') @@ -583,7 +624,7 @@ def _parse_safari_cookies_record(data, jar, logger): p.skip_to(record_size, 'space at the end of the record') - cookie = compat_cookiejar_Cookie( + cookie = http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False, @@ -682,7 +723,7 @@ def _choose_linux_keyring(logger): SelectBackend """ desktop_environment = _get_linux_desktop_environment(os.environ) - logger.debug('detected desktop environment: {}'.format(desktop_environment.name)) + logger.debug(f'detected desktop environment: {desktop_environment.name}') if desktop_environment == _LinuxDesktopEnvironment.KDE: linux_keyring = _LinuxKeyring.KWALLET elif desktop_environment == _LinuxDesktopEnvironment.OTHER: @@ -703,23 +744,21 @@ def _get_kwallet_network_wallet(logger): """ default_wallet = 'kdewallet' try: - proc = Popen([ + stdout, _, returncode = Popen.run([ 'dbus-send', '--session', '--print-reply=literal', '--dest=org.kde.kwalletd5', '/modules/kwalletd5', 'org.kde.KWallet.networkWallet' - ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) + ], text=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) - stdout, stderr = proc.communicate_or_kill() - if proc.returncode != 0: + if returncode: logger.warning('failed to read NetworkWallet') return default_wallet else: - network_wallet = stdout.decode('utf-8').strip() - logger.debug('NetworkWallet = "{}"'.format(network_wallet)) - return network_wallet - except BaseException as e: - logger.warning('exception while obtaining NetworkWallet: {}'.format(e)) + logger.debug(f'NetworkWallet = "{stdout.strip()}"') + return stdout.strip() + except Exception as e: + logger.warning(f'exception while obtaining NetworkWallet: {e}') return default_wallet @@ -735,17 +774,16 @@ def _get_kwallet_password(browser_keyring_name, logger): network_wallet = _get_kwallet_network_wallet(logger) try: - proc = Popen([ + stdout, _, returncode = Popen.run([ 'kwallet-query', - '--read-password', '{} Safe Storage'.format(browser_keyring_name), - '--folder', '{} Keys'.format(browser_keyring_name), + '--read-password', f'{browser_keyring_name} Safe Storage', + '--folder', f'{browser_keyring_name} Keys', network_wallet ], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) - stdout, stderr = proc.communicate_or_kill() - if proc.returncode != 0: - logger.error('kwallet-query failed with return code {}. Please consult ' - 'the kwallet-query man page for details'.format(proc.returncode)) + if returncode: + logger.error(f'kwallet-query failed with return code {returncode}. ' + 'Please consult the kwallet-query man page for details') return b'' else: if stdout.lower().startswith(b'failed to read'): @@ -760,17 +798,15 @@ def _get_kwallet_password(browser_keyring_name, logger): return b'' else: logger.debug('password found') - if stdout[-1:] == b'\n': - stdout = stdout[:-1] - return stdout - except BaseException as e: - logger.warning(f'exception running kwallet-query: {type(e).__name__}({e})') + return stdout.rstrip(b'\n') + except Exception as e: + logger.warning(f'exception running kwallet-query: {error_to_str(e)}') return b'' def _get_gnome_keyring_password(browser_keyring_name, logger): - if not SECRETSTORAGE_AVAILABLE: - logger.error('secretstorage not available {}'.format(SECRETSTORAGE_UNAVAILABLE_REASON)) + if not secretstorage: + logger.error(f'secretstorage not available {_SECRETSTORAGE_UNAVAILABLE_REASON}') return b'' # the Gnome keyring does not seem to organise keys in the same way as KWallet, # using `dbus-monitor` during startup, it can be observed that chromium lists all keys @@ -779,7 +815,7 @@ def _get_gnome_keyring_password(browser_keyring_name, logger): with contextlib.closing(secretstorage.dbus_init()) as con: col = secretstorage.get_default_collection(con) for item in col.get_all_items(): - if item.get_label() == '{} Safe Storage'.format(browser_keyring_name): + if item.get_label() == f'{browser_keyring_name} Safe Storage': return item.get_secret() else: logger.error('failed to read from keyring') @@ -809,35 +845,35 @@ def _get_linux_keyring_password(browser_keyring_name, keyring, logger): def _get_mac_keyring_password(browser_keyring_name, logger): logger.debug('using find-generic-password to obtain password from OSX keychain') try: - proc = Popen( + stdout, _, returncode = Popen.run( ['security', 'find-generic-password', '-w', # write password to stdout '-a', browser_keyring_name, # match 'account' - '-s', '{} Safe Storage'.format(browser_keyring_name)], # match 'service' + '-s', f'{browser_keyring_name} Safe Storage'], # match 'service' stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) - - stdout, stderr = proc.communicate_or_kill() - if stdout[-1:] == b'\n': - stdout = stdout[:-1] - return stdout - except BaseException as e: - logger.warning(f'exception running find-generic-password: {type(e).__name__}({e})') + if returncode: + logger.warning('find-generic-password failed') + return None + return stdout.rstrip(b'\n') + except Exception as e: + logger.warning(f'exception running find-generic-password: {error_to_str(e)}') return None def _get_windows_v10_key(browser_root, logger): - path = _find_most_recently_used_file(browser_root, 'Local State') + path = _find_most_recently_used_file(browser_root, 'Local State', logger) if path is None: logger.error('could not find local state file') return None - with open(path, 'r', encoding='utf8') as f: + logger.debug(f'Found local state file at "{path}"') + with open(path, encoding='utf8') as f: data = json.load(f) try: base64_key = data['os_crypt']['encrypted_key'] except KeyError: logger.error('no encrypted key in Local State') return None - encrypted_key = compat_b64decode(base64_key) + encrypted_key = base64.b64decode(base64_key) prefix = b'DPAPI' if not encrypted_key.startswith(prefix): logger.error('invalid key') @@ -852,7 +888,7 @@ def pbkdf2_sha1(password, salt, iterations, key_length): def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16): plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector)) try: - return plaintext.decode('utf-8') + return plaintext.decode() except UnicodeDecodeError: logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True) return None @@ -866,7 +902,7 @@ def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger): return None try: - return plaintext.decode('utf-8') + return plaintext.decode() except UnicodeDecodeError: logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True) return None @@ -877,10 +913,12 @@ def _decrypt_windows_dpapi(ciphertext, logger): References: - https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata """ - from ctypes.wintypes import DWORD + + import ctypes + import ctypes.wintypes class DATA_BLOB(ctypes.Structure): - _fields_ = [('cbData', DWORD), + _fields_ = [('cbData', ctypes.wintypes.DWORD), ('pbData', ctypes.POINTER(ctypes.c_char))] buffer = ctypes.create_string_buffer(ciphertext) @@ -917,17 +955,20 @@ def _open_database_copy(database_path, tmpdir): def _get_column_names(cursor, table_name): - table_info = cursor.execute('PRAGMA table_info({})'.format(table_name)).fetchall() - return [row[1].decode('utf-8') for row in table_info] + table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall() + return [row[1].decode() for row in table_info] -def _find_most_recently_used_file(root, filename): +def _find_most_recently_used_file(root, filename, logger): # if there are multiple browser profiles, take the most recently used one - paths = [] - for root, dirs, files in os.walk(root): - for file in files: - if file == filename: - paths.append(os.path.join(root, file)) + i, paths = 0, [] + with _create_progress_bar(logger) as progress_bar: + for curr_root, dirs, files in os.walk(root): + for file in files: + i += 1 + progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched') + if file == filename: + paths.append(os.path.join(curr_root, file)) return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime) @@ -945,11 +986,102 @@ def _is_path(value): return os.path.sep in value -def _parse_browser_specification(browser_name, profile=None, keyring=None): +def _parse_browser_specification(browser_name, profile=None, keyring=None, container=None): if browser_name not in SUPPORTED_BROWSERS: raise ValueError(f'unsupported browser: "{browser_name}"') if keyring not in (None, *SUPPORTED_KEYRINGS): raise ValueError(f'unsupported keyring: "{keyring}"') - if profile is not None and _is_path(profile): - profile = os.path.expanduser(profile) - return browser_name, profile, keyring + if profile is not None and _is_path(expand_path(profile)): + profile = expand_path(profile) + return browser_name, profile, keyring, container + + +class LenientSimpleCookie(http.cookies.SimpleCookie): + """More lenient version of http.cookies.SimpleCookie""" + # From https://github.com/python/cpython/blob/v3.10.7/Lib/http/cookies.py + # We use Morsel's legal key chars to avoid errors on setting values + _LEGAL_KEY_CHARS = r'\w\d' + re.escape('!#$%&\'*+-.:^_`|~') + _LEGAL_VALUE_CHARS = _LEGAL_KEY_CHARS + re.escape('(),/<=>?@[]{}') + + _RESERVED = { + "expires", + "path", + "comment", + "domain", + "max-age", + "secure", + "httponly", + "version", + "samesite", + } + + _FLAGS = {"secure", "httponly"} + + # Added 'bad' group to catch the remaining value + _COOKIE_PATTERN = re.compile(r""" + \s* # Optional whitespace at start of cookie + (?P # Start of group 'key' + [""" + _LEGAL_KEY_CHARS + r"""]+?# Any word of at least one letter + ) # End of group 'key' + ( # Optional group: there may not be a value. + \s*=\s* # Equal Sign + ( # Start of potential value + (?P # Start of group 'val' + "(?:[^\\"]|\\.)*" # Any doublequoted string + | # or + \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr + | # or + [""" + _LEGAL_VALUE_CHARS + r"""]* # Any word or empty string + ) # End of group 'val' + | # or + (?P(?:\\;|[^;])*?) # 'bad' group fallback for invalid values + ) # End of potential value + )? # End of optional value group + \s* # Any number of spaces. + (\s+|;|$) # Ending either at space, semicolon, or EOS. + """, re.ASCII | re.VERBOSE) + + def load(self, data): + # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4776 + if not isinstance(data, str): + return super().load(data) + + morsel = None + for match in self._COOKIE_PATTERN.finditer(data): + if match.group('bad'): + morsel = None + continue + + key, value = match.group('key', 'val') + + is_attribute = False + if key.startswith('$'): + key = key[1:] + is_attribute = True + + lower_key = key.lower() + if lower_key in self._RESERVED: + if morsel is None: + continue + + if value is None: + if lower_key not in self._FLAGS: + morsel = None + continue + value = True + else: + value, _ = self.value_decode(value) + + morsel[key] = value + + elif is_attribute: + morsel = None + + elif value is not None: + morsel = self.get(key, http.cookies.Morsel()) + real_value, coded_value = self.value_decode(value) + morsel.set(key, real_value, coded_value) + self[key] = morsel + + else: + morsel = None diff --git a/plugins/youtube_download/yt_dlp/dependencies/Cryptodome.py b/plugins/youtube_download/yt_dlp/dependencies/Cryptodome.py new file mode 100644 index 0000000..2adc513 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/dependencies/Cryptodome.py @@ -0,0 +1,30 @@ +import types + +from ..compat import functools +from ..compat.compat_utils import passthrough_module + +try: + import Cryptodome as _parent +except ImportError: + try: + import Crypto as _parent + except (ImportError, SyntaxError): # Old Crypto gives SyntaxError in newer Python + _parent = types.ModuleType('no_Cryptodome') + __bool__ = lambda: False + +passthrough_module(__name__, _parent, (..., '__version__')) +del passthrough_module + + +@property +@functools.cache +def _yt_dlp__identifier(): + if _parent.__name__ == 'Crypto': + from Crypto.Cipher import AES + try: + # In pycrypto, mode defaults to ECB. See: + # https://www.pycryptodome.org/en/latest/src/vs_pycrypto.html#:~:text=not%20have%20ECB%20as%20default%20mode + AES.new(b'abcdefghijklmnop') + except TypeError: + return 'pycrypto' + return _parent.__name__ diff --git a/plugins/youtube_download/yt_dlp/dependencies/__init__.py b/plugins/youtube_download/yt_dlp/dependencies/__init__.py new file mode 100644 index 0000000..c2214e6 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/dependencies/__init__.py @@ -0,0 +1,83 @@ +# flake8: noqa: F401 +"""Imports all optional dependencies for the project. +An attribute "_yt_dlp__identifier" may be inserted into the module if it uses an ambiguous namespace""" + +try: + import brotlicffi as brotli +except ImportError: + try: + import brotli + except ImportError: + brotli = None + + +try: + import certifi +except ImportError: + certifi = None +else: + from os.path import exists as _path_exists + + # The certificate may not be bundled in executable + if not _path_exists(certifi.where()): + certifi = None + + +try: + import mutagen +except ImportError: + mutagen = None + + +secretstorage = None +try: + import secretstorage + _SECRETSTORAGE_UNAVAILABLE_REASON = None +except ImportError: + _SECRETSTORAGE_UNAVAILABLE_REASON = ( + 'as the `secretstorage` module is not installed. ' + 'Please install by running `python3 -m pip install secretstorage`') +except Exception as _err: + _SECRETSTORAGE_UNAVAILABLE_REASON = f'as the `secretstorage` module could not be initialized. {_err}' + + +try: + import sqlite3 +except ImportError: + # although sqlite3 is part of the standard library, it is possible to compile python without + # sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544 + sqlite3 = None + + +try: + import websockets +except (ImportError, SyntaxError): + # websockets 3.10 on python 3.6 causes SyntaxError + # See https://github.com/yt-dlp/yt-dlp/issues/2633 + websockets = None + + +try: + import xattr # xattr or pyxattr +except ImportError: + xattr = None +else: + if hasattr(xattr, 'set'): # pyxattr + xattr._yt_dlp__identifier = 'pyxattr' + + +from . import Cryptodome + +all_dependencies = {k: v for k, v in globals().items() if not k.startswith('_')} +available_dependencies = {k: v for k, v in all_dependencies.items() if v} + + +# Deprecated +Cryptodome_AES = Cryptodome.Cipher.AES if Cryptodome else None + + +__all__ = [ + 'all_dependencies', + 'available_dependencies', + *all_dependencies.keys(), +] diff --git a/plugins/youtube_download/yt_dlp/downloader/__init__.py b/plugins/youtube_download/yt_dlp/downloader/__init__.py index acc19f4..c34dbce 100644 --- a/plugins/youtube_download/yt_dlp/downloader/__init__.py +++ b/plugins/youtube_download/yt_dlp/downloader/__init__.py @@ -1,10 +1,4 @@ -from __future__ import unicode_literals - -from ..compat import compat_str -from ..utils import ( - determine_protocol, - NO_DEFAULT -) +from ..utils import NO_DEFAULT, determine_protocol def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=None, to_stdout=False): @@ -29,20 +23,18 @@ def get_suitable_downloader(info_dict, params={}, default=NO_DEFAULT, protocol=N # Some of these require get_suitable_downloader from .common import FileDownloader from .dash import DashSegmentsFD +from .external import FFmpegFD, get_external_downloader from .f4m import F4mFD +from .fc2 import FC2LiveFD from .hls import HlsFD from .http import HttpFD -from .rtmp import RtmpFD -from .rtsp import RtspFD from .ism import IsmFD from .mhtml import MhtmlFD from .niconico import NiconicoDmcFD +from .rtmp import RtmpFD +from .rtsp import RtspFD from .websocket import WebSocketFragmentFD from .youtube_live_chat import YoutubeLiveChatFD -from .external import ( - get_external_downloader, - FFmpegFD, -) PROTOCOL_MAP = { 'rtmp': RtmpFD, @@ -58,6 +50,7 @@ PROTOCOL_MAP = { 'ism': IsmFD, 'mhtml': MhtmlFD, 'niconico_dmc': NiconicoDmcFD, + 'fc2_live': FC2LiveFD, 'websocket_frag': WebSocketFragmentFD, 'youtube_live_chat': YoutubeLiveChatFD, 'youtube_live_chat_replay': YoutubeLiveChatFD, @@ -66,10 +59,11 @@ PROTOCOL_MAP = { def shorten_protocol_name(proto, simplify=False): short_protocol_names = { - 'm3u8_native': 'm3u8_n', - 'rtmp_ffmpeg': 'rtmp_f', + 'm3u8_native': 'm3u8', + 'm3u8': 'm3u8F', + 'rtmp_ffmpeg': 'rtmpF', 'http_dash_segments': 'dash', - 'http_dash_segments_generator': 'dash_g', + 'http_dash_segments_generator': 'dashG', 'niconico_dmc': 'dmc', 'websocket_frag': 'WSfrag', } @@ -77,6 +71,7 @@ def shorten_protocol_name(proto, simplify=False): short_protocol_names.update({ 'https': 'http', 'ftps': 'ftp', + 'm3u8': 'm3u8', # Reverse above m3u8 mapping 'm3u8_native': 'm3u8', 'http_dash_segments_generator': 'dash', 'rtmp_ffmpeg': 'rtmp', @@ -91,13 +86,13 @@ def _get_suitable_downloader(info_dict, protocol, params, default): if default is NO_DEFAULT: default = HttpFD - # if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict): - # return FFmpegFD + if (info_dict.get('section_start') or info_dict.get('section_end')) and FFmpegFD.can_download(info_dict): + return FFmpegFD info_dict['protocol'] = protocol downloaders = params.get('external_downloader') external_downloader = ( - downloaders if isinstance(downloaders, compat_str) or downloaders is None + downloaders if isinstance(downloaders, str) or downloaders is None else downloaders.get(shorten_protocol_name(protocol, True), downloaders.get('default'))) if external_downloader is None: @@ -117,7 +112,7 @@ def _get_suitable_downloader(info_dict, protocol, params, default): return FFmpegFD elif (external_downloader or '').lower() == 'native': return HlsFD - elif get_suitable_downloader( + elif protocol == 'm3u8_native' and get_suitable_downloader( info_dict, params, None, protocol='m3u8_frag_urls', to_stdout=info_dict['to_stdout']): return HlsFD elif params.get('hls_prefer_native') is True: diff --git a/plugins/youtube_download/yt_dlp/downloader/common.py b/plugins/youtube_download/yt_dlp/downloader/common.py index 37321e3..077b29b 100644 --- a/plugins/youtube_download/yt_dlp/downloader/common.py +++ b/plugins/youtube_download/yt_dlp/downloader/common.py @@ -1,30 +1,40 @@ -from __future__ import division, unicode_literals - +import contextlib +import errno +import functools import os +import random import re import time -import random -import errno +from ..minicurses import ( + BreaklineStatusPrinter, + MultilineLogger, + MultilinePrinter, + QuietMultilinePrinter, +) from ..utils import ( + IDENTITY, + NO_DEFAULT, + LockingUnsupportedError, + Namespace, + RetryManager, + classproperty, decodeArgument, + deprecation_warning, encodeFilename, - error_to_compat_str, format_bytes, + join_nonempty, + parse_bytes, + remove_start, sanitize_open, shell_quote, timeconvert, timetuple_from_msec, -) -from ..minicurses import ( - MultilineLogger, - MultilinePrinter, - QuietMultilinePrinter, - BreaklineStatusPrinter + try_call, ) -class FileDownloader(object): +class FileDownloader: """File Downloader class. File downloader objects are the ones responsible of downloading the @@ -39,6 +49,7 @@ class FileDownloader(object): verbose: Print additional info to stdout. quiet: Do not print messages to stdout. ratelimit: Download speed limit, in bytes/sec. + continuedl: Attempt to continue downloads if possible throttledratelimit: Assume the download is being throttled below this speed (bytes/sec) retries: Number of times to retry for HTTP error 5xx file_access_retries: Number of times to retry on file access error @@ -62,6 +73,7 @@ class FileDownloader(object): useful for bypassing bandwidth throttling imposed by a webserver (experimental) progress_template: See YoutubeDL.py + retry_sleep_functions: See YoutubeDL.py Subclasses of this one must re-define the real_download method. """ @@ -71,21 +83,51 @@ class FileDownloader(object): def __init__(self, ydl, params): """Create a FileDownloader object with the given options.""" - self.ydl = ydl + self._set_ydl(ydl) self._progress_hooks = [] self.params = params self._prepare_multiline_status() self.add_progress_hook(self.report_progress) + def _set_ydl(self, ydl): + self.ydl = ydl + + for func in ( + 'deprecation_warning', + 'deprecated_feature', + 'report_error', + 'report_file_already_downloaded', + 'report_warning', + 'to_console_title', + 'to_stderr', + 'trouble', + 'write_debug', + ): + if not hasattr(self, func): + setattr(self, func, getattr(ydl, func)) + + def to_screen(self, *args, **kargs): + self.ydl.to_screen(*args, quiet=self.params.get('quiet'), **kargs) + + __to_screen = to_screen + + @classproperty + def FD_NAME(cls): + return re.sub(r'(?<=[a-z])(?=[A-Z])', '_', cls.__name__[:-2]).lower() + @staticmethod def format_seconds(seconds): + if seconds is None: + return ' Unknown' time = timetuple_from_msec(seconds * 1000) if time.hours > 99: return '--:--:--' - if not time.hours: - return '%02d:%02d' % time[1:-1] return '%02d:%02d:%02d' % time[:-1] + @classmethod + def format_eta(cls, seconds): + return f'{remove_start(cls.format_seconds(seconds), "00:"):>8s}' + @staticmethod def calc_percent(byte_counter, data_len): if data_len is None: @@ -94,11 +136,7 @@ class FileDownloader(object): @staticmethod def format_percent(percent): - if percent is None: - return '---.-%' - elif percent == 100: - return '100%' - return '%6s' % ('%3.1f%%' % percent) + return ' N/A%' if percent is None else f'{percent:>5.1f}%' @staticmethod def calc_eta(start, now, total, current): @@ -112,12 +150,6 @@ class FileDownloader(object): rate = float(current) / dif return int((float(total) - float(current)) / rate) - @staticmethod - def format_eta(eta): - if eta is None: - return '--:--' - return FileDownloader.format_seconds(eta) - @staticmethod def calc_speed(start, now, bytes): dif = now - start @@ -127,13 +159,11 @@ class FileDownloader(object): @staticmethod def format_speed(speed): - if speed is None: - return '%10s' % '---b/s' - return '%10s' % ('%s/s' % format_bytes(speed)) + return ' Unknown B/s' if speed is None else f'{format_bytes(speed):>10s}/s' @staticmethod def format_retries(retries): - return 'inf' if retries == float('inf') else '%.0f' % retries + return 'inf' if retries == float('inf') else int(retries) @staticmethod def best_block_size(elapsed_time, bytes): @@ -151,33 +181,9 @@ class FileDownloader(object): @staticmethod def parse_bytes(bytestr): """Parse a string indicating a byte quantity into an integer.""" - matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr) - if matchobj is None: - return None - number = float(matchobj.group(1)) - multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower()) - return int(round(number * multiplier)) - - def to_screen(self, *args, **kargs): - self.ydl.to_stdout(*args, quiet=self.params.get('quiet'), **kargs) - - def to_stderr(self, message): - self.ydl.to_stderr(message) - - def to_console_title(self, message): - self.ydl.to_console_title(message) - - def trouble(self, *args, **kargs): - self.ydl.trouble(*args, **kargs) - - def report_warning(self, *args, **kargs): - self.ydl.report_warning(*args, **kargs) - - def report_error(self, *args, **kargs): - self.ydl.report_error(*args, **kargs) - - def write_debug(self, *args, **kargs): - self.ydl.write_debug(*args, **kargs) + deprecation_warning('yt_dlp.FileDownloader.parse_bytes is deprecated and ' + 'may be removed in the future. Use yt_dlp.utils.parse_bytes instead') + return parse_bytes(bytestr) def slow_down(self, start_time, now, byte_counter): """Sleep if the download speed is over the rate limit.""" @@ -210,28 +216,42 @@ class FileDownloader(object): def ytdl_filename(self, filename): return filename + '.ytdl' - def sanitize_open(self, filename, open_mode): - file_access_retries = self.params.get('file_access_retries', 10) - retry = 0 - while True: - try: - return sanitize_open(filename, open_mode) - except (IOError, OSError) as err: - retry = retry + 1 - if retry > file_access_retries or err.errno not in (errno.EACCES,): - raise - self.to_screen( - '[download] Got file access error. Retrying (attempt %d of %s) ...' - % (retry, self.format_retries(file_access_retries))) - time.sleep(0.01) + def wrap_file_access(action, *, fatal=False): + def error_callback(err, count, retries, *, fd): + return RetryManager.report_retry( + err, count, retries, info=fd.__to_screen, + warn=lambda e: (time.sleep(0.01), fd.to_screen(f'[download] Unable to {action} file: {e}')), + error=None if fatal else lambda e: fd.report_error(f'Unable to {action} file: {e}'), + sleep_func=fd.params.get('retry_sleep_functions', {}).get('file_access')) + def wrapper(self, func, *args, **kwargs): + for retry in RetryManager(self.params.get('file_access_retries'), error_callback, fd=self): + try: + return func(self, *args, **kwargs) + except OSError as err: + if err.errno in (errno.EACCES, errno.EINVAL): + retry.error = err + continue + retry.error_callback(err, 1, 0) + + return functools.partial(functools.partialmethod, wrapper) + + @wrap_file_access('open', fatal=True) + def sanitize_open(self, filename, open_mode): + f, filename = sanitize_open(filename, open_mode) + if not getattr(f, 'locked', None): + self.write_debug(f'{LockingUnsupportedError.msg}. Proceeding without locking', only_once=True) + return f, filename + + @wrap_file_access('remove') + def try_remove(self, filename): + os.remove(filename) + + @wrap_file_access('rename') def try_rename(self, old_filename, new_filename): if old_filename == new_filename: return - try: - os.replace(old_filename, new_filename) - except (IOError, OSError) as err: - self.report_error(f'unable to rename file: {err}') + os.replace(old_filename, new_filename) def try_utime(self, filename, last_modified_hdr): """Try to set the last-modified time of the given file.""" @@ -248,10 +268,8 @@ class FileDownloader(object): # Ignore obviously invalid dates if filetime == 0: return - try: + with contextlib.suppress(Exception): os.utime(filename, (time.time(), filetime)) - except Exception: - pass return filetime def report_destination(self, filename): @@ -264,26 +282,26 @@ class FileDownloader(object): elif self.ydl.params.get('logger'): self._multiline = MultilineLogger(self.ydl.params['logger'], lines) elif self.params.get('progress_with_newline'): - self._multiline = BreaklineStatusPrinter(self.ydl._screen_file, lines) + self._multiline = BreaklineStatusPrinter(self.ydl._out_files.out, lines) else: - self._multiline = MultilinePrinter(self.ydl._screen_file, lines, not self.params.get('quiet')) + self._multiline = MultilinePrinter(self.ydl._out_files.out, lines, not self.params.get('quiet')) self._multiline.allow_colors = self._multiline._HAVE_FULLCAP and not self.params.get('no_color') def _finish_multiline_status(self): self._multiline.end() - _progress_styles = { - 'downloaded_bytes': 'light blue', - 'percent': 'light blue', - 'eta': 'yellow', - 'speed': 'green', - 'elapsed': 'bold white', - 'total_bytes': '', - 'total_bytes_estimate': '', - } + ProgressStyles = Namespace( + downloaded_bytes='light blue', + percent='light blue', + eta='yellow', + speed='green', + elapsed='bold white', + total_bytes='', + total_bytes_estimate='', + ) def _report_progress_status(self, s, default_template): - for name, style in self._progress_styles.items(): + for name, style in self.ProgressStyles.items_: name = f'_{name}_str' if name not in s: continue @@ -307,78 +325,73 @@ class FileDownloader(object): self._multiline.stream, self._multiline.allow_colors, *args, **kwargs) def report_progress(self, s): + def with_fields(*tups, default=''): + for *fields, tmpl in tups: + if all(s.get(f) is not None for f in fields): + return tmpl + return default + + _format_bytes = lambda k: f'{format_bytes(s.get(k)):>10s}' + if s['status'] == 'finished': if self.params.get('noprogress'): self.to_screen('[download] Download completed') - msg_template = '100%%' - if s.get('total_bytes') is not None: - s['_total_bytes_str'] = format_bytes(s['total_bytes']) - msg_template += ' of %(_total_bytes_str)s' - if s.get('elapsed') is not None: - s['_elapsed_str'] = self.format_seconds(s['elapsed']) - msg_template += ' in %(_elapsed_str)s' - s['_percent_str'] = self.format_percent(100) - self._report_progress_status(s, msg_template) - return + speed = try_call(lambda: s['total_bytes'] / s['elapsed']) + s.update({ + 'speed': speed, + '_speed_str': self.format_speed(speed).strip(), + '_total_bytes_str': _format_bytes('total_bytes'), + '_elapsed_str': self.format_seconds(s.get('elapsed')), + '_percent_str': self.format_percent(100), + }) + self._report_progress_status(s, join_nonempty( + '100%%', + with_fields(('total_bytes', 'of %(_total_bytes_str)s')), + with_fields(('elapsed', 'in %(_elapsed_str)s')), + with_fields(('speed', 'at %(_speed_str)s')), + delim=' ')) if s['status'] != 'downloading': return - if s.get('eta') is not None: - s['_eta_str'] = self.format_eta(s['eta']) - else: - s['_eta_str'] = 'Unknown' + s.update({ + '_eta_str': self.format_eta(s.get('eta')).strip(), + '_speed_str': self.format_speed(s.get('speed')), + '_percent_str': self.format_percent(try_call( + lambda: 100 * s['downloaded_bytes'] / s['total_bytes'], + lambda: 100 * s['downloaded_bytes'] / s['total_bytes_estimate'], + lambda: s['downloaded_bytes'] == 0 and 0)), + '_total_bytes_str': _format_bytes('total_bytes'), + '_total_bytes_estimate_str': _format_bytes('total_bytes_estimate'), + '_downloaded_bytes_str': _format_bytes('downloaded_bytes'), + '_elapsed_str': self.format_seconds(s.get('elapsed')), + }) - if s.get('total_bytes') and s.get('downloaded_bytes') is not None: - s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes']) - elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None: - s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate']) - else: - if s.get('downloaded_bytes') == 0: - s['_percent_str'] = self.format_percent(0) - else: - s['_percent_str'] = 'Unknown %' + msg_template = with_fields( + ('total_bytes', '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'), + ('total_bytes_estimate', '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'), + ('downloaded_bytes', 'elapsed', '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'), + ('downloaded_bytes', '%(_downloaded_bytes_str)s at %(_speed_str)s'), + default='%(_percent_str)s at %(_speed_str)s ETA %(_eta_str)s') - if s.get('speed') is not None: - s['_speed_str'] = self.format_speed(s['speed']) - else: - s['_speed_str'] = 'Unknown speed' - - if s.get('total_bytes') is not None: - s['_total_bytes_str'] = format_bytes(s['total_bytes']) - msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s' - elif s.get('total_bytes_estimate') is not None: - s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate']) - msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s' - else: - if s.get('downloaded_bytes') is not None: - s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes']) - if s.get('elapsed'): - s['_elapsed_str'] = self.format_seconds(s['elapsed']) - msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)' - else: - msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s' - else: - msg_template = '%(_percent_str)s at %(_speed_str)s ETA %(_eta_str)s' - if s.get('fragment_index') and s.get('fragment_count'): - msg_template += ' (frag %(fragment_index)s/%(fragment_count)s)' - elif s.get('fragment_index'): - msg_template += ' (frag %(fragment_index)s)' + msg_template += with_fields( + ('fragment_index', 'fragment_count', ' (frag %(fragment_index)s/%(fragment_count)s)'), + ('fragment_index', ' (frag %(fragment_index)s)')) self._report_progress_status(s, msg_template) def report_resuming_byte(self, resume_len): """Report attempt to resume at given byte.""" self.to_screen('[download] Resuming download at byte %s' % resume_len) - def report_retry(self, err, count, retries): - """Report retry in case of HTTP error 5xx""" - self.to_screen( - '[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...' - % (error_to_compat_str(err), count, self.format_retries(retries))) - - def report_file_already_downloaded(self, *args, **kwargs): - """Report file has already been fully downloaded.""" - return self.ydl.report_file_already_downloaded(*args, **kwargs) + def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True): + """Report retry""" + is_frag = False if frag_index is NO_DEFAULT else 'fragment' + RetryManager.report_retry( + err, count, retries, info=self.__to_screen, + warn=lambda msg: self.__to_screen(f'[download] Got error: {msg}'), + error=IDENTITY if not fatal else lambda e: self.report_error(f'\r[download] Got error: {e}'), + sleep_func=self.params.get('retry_sleep_functions', {}).get(is_frag or 'http'), + suffix=f'fragment{"s" if frag_index is None else f" {frag_index}"}' if is_frag else None) def report_unable_to_resume(self): """Report it was impossible to resume download.""" @@ -418,25 +431,16 @@ class FileDownloader(object): self._finish_multiline_status() return True, False - if subtitle is False: - min_sleep_interval = self.params.get('sleep_interval') - if min_sleep_interval: - max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval) - sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval) - self.to_screen( - '[download] Sleeping %s seconds ...' % ( - int(sleep_interval) if sleep_interval.is_integer() - else '%.2f' % sleep_interval)) - time.sleep(sleep_interval) + if subtitle: + sleep_interval = self.params.get('sleep_interval_subtitles') or 0 else: - sleep_interval_sub = 0 - if type(self.params.get('sleep_interval_subtitles')) is int: - sleep_interval_sub = self.params.get('sleep_interval_subtitles') - if sleep_interval_sub > 0: - self.to_screen( - '[download] Sleeping %s seconds ...' % ( - sleep_interval_sub)) - time.sleep(sleep_interval_sub) + min_sleep_interval = self.params.get('sleep_interval') or 0 + sleep_interval = random.uniform( + min_sleep_interval, self.params.get('max_sleep_interval') or min_sleep_interval) + if sleep_interval > 0: + self.to_screen(f'[download] Sleeping {sleep_interval:.2f} seconds ...') + time.sleep(sleep_interval) + ret = self.real_download(filename, info_dict) self._finish_multiline_status() return ret, True @@ -446,8 +450,7 @@ class FileDownloader(object): raise NotImplementedError('This method must be implemented by subclasses') def _hook_progress(self, status, info_dict): - if not self._progress_hooks: - return + # Ideally we want to make a copy of the dict, but that is too slow status['info_dict'] = info_dict # youtube-dl passes the same status object to all the hooks. # Some third party scripts seems to be relying on this. @@ -469,4 +472,4 @@ class FileDownloader(object): if exe is None: exe = os.path.basename(str_args[0]) - self.write_debug('%s command line: %s' % (exe, shell_quote(str_args))) + self.write_debug(f'{exe} command line: {shell_quote(str_args)}') diff --git a/plugins/youtube_download/yt_dlp/downloader/dash.py b/plugins/youtube_download/yt_dlp/downloader/dash.py index a845ee7..4328d73 100644 --- a/plugins/youtube_download/yt_dlp/downloader/dash.py +++ b/plugins/youtube_download/yt_dlp/downloader/dash.py @@ -1,10 +1,9 @@ -from __future__ import unicode_literals import time +import urllib.parse -from ..downloader import get_suitable_downloader +from . import get_suitable_downloader from .fragment import FragmentFD - -from ..utils import urljoin +from ..utils import update_url_query, urljoin class DashSegmentsFD(FragmentFD): @@ -42,24 +41,29 @@ class DashSegmentsFD(FragmentFD): self._prepare_and_start_frag_download(ctx, fmt) ctx['start'] = real_start - fragments_to_download = self._get_fragments(fmt, ctx) + extra_query = None + extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url') + if extra_param_to_segment_url: + extra_query = urllib.parse.parse_qs(extra_param_to_segment_url) + + fragments_to_download = self._get_fragments(fmt, ctx, extra_query) if real_downloader: self.to_screen( - '[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename())) + f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}') info_dict['fragments'] = list(fragments_to_download) fd = real_downloader(self.ydl, self.params) return fd.real_download(filename, info_dict) args.append([ctx, fragments_to_download, fmt]) - return self.download_and_append_fragments_multiple(*args) + return self.download_and_append_fragments_multiple(*args, is_fatal=lambda idx: idx == 0) def _resolve_fragments(self, fragments, ctx): fragments = fragments(ctx) if callable(fragments) else fragments return [next(iter(fragments))] if self.params.get('test') else fragments - def _get_fragments(self, fmt, ctx): + def _get_fragments(self, fmt, ctx, extra_query): fragment_base_url = fmt.get('fragment_base_url') fragments = self._resolve_fragments(fmt['fragments'], ctx) @@ -72,9 +76,12 @@ class DashSegmentsFD(FragmentFD): if not fragment_url: assert fragment_base_url fragment_url = urljoin(fragment_base_url, fragment['path']) + if extra_query: + fragment_url = update_url_query(fragment_url, extra_query) yield { 'frag_index': frag_index, + 'fragment_count': fragment.get('fragment_count'), 'index': i, 'url': fragment_url, } diff --git a/plugins/youtube_download/yt_dlp/downloader/external.py b/plugins/youtube_download/yt_dlp/downloader/external.py index f4fdcf1..5f54017 100644 --- a/plugins/youtube_download/yt_dlp/downloader/external.py +++ b/plugins/youtube_download/yt_dlp/downloader/external.py @@ -1,35 +1,44 @@ -from __future__ import unicode_literals - +import enum +import json import os.path import re import subprocess import sys import time +import uuid from .fragment import FragmentFD -from ..compat import ( - compat_setenv, - compat_str, -) -from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS +from ..compat import functools +from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor from ..utils import ( + Popen, + RetryManager, + _configuration_args, + check_executable, + classproperty, + cli_bool_option, cli_option, cli_valueless_option, - cli_bool_option, - _configuration_args, determine_ext, - encodeFilename, encodeArgument, + encodeFilename, + find_available_port, handle_youtubedl_headers, - check_executable, - Popen, remove_end, + sanitized_Request, + traverse_obj, ) +class Features(enum.Enum): + TO_STDOUT = enum.auto() + MULTIPLE_FORMATS = enum.auto() + + class ExternalFD(FragmentFD): SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps') - can_download_to_stdout = False + SUPPORTED_FEATURES = () + _CAPTURE_STDERR = True def real_download(self, filename, info_dict): self.report_destination(filename) @@ -55,7 +64,6 @@ class ExternalFD(FragmentFD): } if filename != '-': fsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen('\r[%s] Downloaded %s bytes' % (self.get_basename(), fsize)) self.try_rename(tmpfilename, filename) status.update({ 'downloaded_bytes': fsize, @@ -73,23 +81,32 @@ class ExternalFD(FragmentFD): def get_basename(cls): return cls.__name__[:-2].lower() - @property + @classproperty + def EXE_NAME(cls): + return cls.get_basename() + + @functools.cached_property def exe(self): - return self.get_basename() + return self.EXE_NAME @classmethod def available(cls, path=None): - path = check_executable(path or cls.get_basename(), [cls.AVAILABLE_OPT]) - if path: - cls.exe = path - return path - return False + path = check_executable( + cls.EXE_NAME if path in (None, cls.get_basename()) else path, + [cls.AVAILABLE_OPT]) + if not path: + return False + cls.exe = path + return path @classmethod def supports(cls, info_dict): - return ( - (cls.can_download_to_stdout or not info_dict.get('to_stdout')) - and info_dict['protocol'] in cls.SUPPORTED_PROTOCOLS) + return all(( + not info_dict.get('to_stdout') or Features.TO_STDOUT in cls.SUPPORTED_FEATURES, + '+' not in info_dict['protocol'] or Features.MULTIPLE_FORMATS in cls.SUPPORTED_FEATURES, + not traverse_obj(info_dict, ('hls_aes', ...), 'extra_param_to_segment_url'), + all(proto in cls.SUPPORTED_PROTOCOLS for proto in info_dict['protocol'].split('+')), + )) @classmethod def can_download(cls, info_dict, path=None): @@ -106,7 +123,7 @@ class ExternalFD(FragmentFD): def _configuration_args(self, keys=None, *args, **kwargs): return _configuration_args( - self.get_basename(), self.params.get('external_downloader_args'), self.get_basename(), + self.get_basename(), self.params.get('external_downloader_args'), self.EXE_NAME, keys, *args, **kwargs) def _call_downloader(self, tmpfilename, info_dict): @@ -116,33 +133,27 @@ class ExternalFD(FragmentFD): self._debug_cmd(cmd) if 'fragments' not in info_dict: - p = Popen(cmd, stderr=subprocess.PIPE) - _, stderr = p.communicate_or_kill() - if p.returncode != 0: - self.to_stderr(stderr.decode('utf-8', 'replace')) - return p.returncode + _, stderr, returncode = self._call_process(cmd, info_dict) + if returncode and stderr: + self.to_stderr(stderr) + return returncode - fragment_retries = self.params.get('fragment_retries', 0) skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) - count = 0 - while count <= fragment_retries: - p = Popen(cmd, stderr=subprocess.PIPE) - _, stderr = p.communicate_or_kill() - if p.returncode == 0: + retry_manager = RetryManager(self.params.get('fragment_retries'), self.report_retry, + frag_index=None, fatal=not skip_unavailable_fragments) + for retry in retry_manager: + _, stderr, returncode = self._call_process(cmd, info_dict) + if not returncode: break # TODO: Decide whether to retry based on error code # https://aria2.github.io/manual/en/html/aria2c.html#exit-status - self.to_stderr(stderr.decode('utf-8', 'replace')) - count += 1 - if count <= fragment_retries: - self.to_screen( - '[%s] Got error. Retrying fragments (attempt %d of %s)...' - % (self.get_basename(), count, self.format_retries(fragment_retries))) - if count > fragment_retries: - if not skip_unavailable_fragments: - self.report_error('Giving up after %s fragment retries' % fragment_retries) - return -1 + if stderr: + self.to_stderr(stderr) + retry.error = Exception() + continue + if not skip_unavailable_fragments and retry_manager.error: + return -1 decrypt_fragment = self.decrypter(info_dict) dest, _ = self.sanitize_open(tmpfilename, 'wb') @@ -150,7 +161,7 @@ class ExternalFD(FragmentFD): fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index) try: src, _ = self.sanitize_open(fragment_filename, 'rb') - except IOError as err: + except OSError as err: if skip_unavailable_fragments and frag_index > 1: self.report_skip_fragment(frag_index, err) continue @@ -159,20 +170,24 @@ class ExternalFD(FragmentFD): dest.write(decrypt_fragment(fragment, src.read())) src.close() if not self.params.get('keep_fragments', False): - os.remove(encodeFilename(fragment_filename)) + self.try_remove(encodeFilename(fragment_filename)) dest.close() - os.remove(encodeFilename('%s.frag.urls' % tmpfilename)) + self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename)) return 0 + def _call_process(self, cmd, info_dict): + return Popen.run(cmd, text=True, stderr=subprocess.PIPE) + class CurlFD(ExternalFD): AVAILABLE_OPT = '-V' + _CAPTURE_STDERR = False # curl writes the progress to stderr def _make_cmd(self, tmpfilename, info_dict): - cmd = [self.exe, '--location', '-o', tmpfilename] + cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed'] if info_dict.get('http_headers') is not None: for key, val in info_dict['http_headers'].items(): - cmd += ['--header', '%s: %s' % (key, val)] + cmd += ['--header', f'{key}: {val}'] cmd += self._bool_option('--continue-at', 'continuedl', '-', '0') cmd += self._valueless_option('--silent', 'noprogress') @@ -191,16 +206,6 @@ class CurlFD(ExternalFD): cmd += ['--', info_dict['url']] return cmd - def _call_downloader(self, tmpfilename, info_dict): - cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)] - - self._debug_cmd(cmd) - - # curl writes the progress to stderr so don't capture it. - p = Popen(cmd) - p.communicate_or_kill() - return p.returncode - class AxelFD(ExternalFD): AVAILABLE_OPT = '-V' @@ -209,7 +214,7 @@ class AxelFD(ExternalFD): cmd = [self.exe, '-o', tmpfilename] if info_dict.get('http_headers') is not None: for key, val in info_dict['http_headers'].items(): - cmd += ['-H', '%s: %s' % (key, val)] + cmd += ['-H', f'{key}: {val}'] cmd += self._configuration_args() cmd += ['--', info_dict['url']] return cmd @@ -219,10 +224,10 @@ class WgetFD(ExternalFD): AVAILABLE_OPT = '--version' def _make_cmd(self, tmpfilename, info_dict): - cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies'] + cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies', '--compression=auto'] if info_dict.get('http_headers') is not None: for key, val in info_dict['http_headers'].items(): - cmd += ['--header', '%s: %s' % (key, val)] + cmd += ['--header', f'{key}: {val}'] cmd += self._option('--limit-rate', 'ratelimit') retry = self._option('--tries', 'retries') if len(retry) == 2: @@ -230,7 +235,10 @@ class WgetFD(ExternalFD): retry[1] = '0' cmd += retry cmd += self._option('--bind-address', 'source_address') - cmd += self._option('--proxy', 'proxy') + proxy = self.params.get('proxy') + if proxy: + for var in ('http_proxy', 'https_proxy'): + cmd += ['--execute', f'{var}={proxy}'] cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate') cmd += self._configuration_args() cmd += ['--', info_dict['url']] @@ -250,10 +258,23 @@ class Aria2cFD(ExternalFD): check_results = (not re.search(feature, manifest) for feature in UNSUPPORTED_FEATURES) return all(check_results) + @staticmethod + def _aria2c_filename(fn): + return fn if os.path.isabs(fn) else f'.{os.path.sep}{fn}' + + def _call_downloader(self, tmpfilename, info_dict): + # FIXME: Disabled due to https://github.com/yt-dlp/yt-dlp/issues/5931 + if False and 'no-external-downloader-progress' not in self.params.get('compat_opts', []): + info_dict['__rpc'] = { + 'port': find_available_port() or 19190, + 'secret': str(uuid.uuid4()), + } + return super()._call_downloader(tmpfilename, info_dict) + def _make_cmd(self, tmpfilename, info_dict): cmd = [self.exe, '-c', '--console-log-level=warn', '--summary-interval=0', '--download-result=hide', - '--file-allocation=none', '-x16', '-j16', '-s16'] + '--http-accept-gzip=true', '--file-allocation=none', '-x16', '-j16', '-s16'] if 'fragments' in info_dict: cmd += ['--allow-overwrite=true', '--allow-piece-length-change=true'] else: @@ -261,7 +282,7 @@ class Aria2cFD(ExternalFD): if info_dict.get('http_headers') is not None: for key, val in info_dict['http_headers'].items(): - cmd += ['--header', '%s: %s' % (key, val)] + cmd += ['--header', f'{key}: {val}'] cmd += self._option('--max-overall-download-limit', 'ratelimit') cmd += self._option('--interface', 'source_address') cmd += self._option('--all-proxy', 'proxy') @@ -270,6 +291,12 @@ class Aria2cFD(ExternalFD): cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=') cmd += self._configuration_args() + if '__rpc' in info_dict: + cmd += [ + '--enable-rpc', + f'--rpc-listen-port={info_dict["__rpc"]["port"]}', + f'--rpc-secret={info_dict["__rpc"]["secret"]}'] + # aria2c strips out spaces from the beginning/end of filenames and paths. # We work around this issue by adding a "./" to the beginning of the # filename and relative path, and adding a "/" at the end of the path. @@ -278,11 +305,9 @@ class Aria2cFD(ExternalFD): # https://github.com/aria2/aria2/issues/1373 dn = os.path.dirname(tmpfilename) if dn: - if not os.path.isabs(dn): - dn = '.%s%s' % (os.path.sep, dn) - cmd += ['--dir', dn + os.path.sep] + cmd += ['--dir', self._aria2c_filename(dn) + os.path.sep] if 'fragments' not in info_dict: - cmd += ['--out', '.%s%s' % (os.path.sep, os.path.basename(tmpfilename))] + cmd += ['--out', self._aria2c_filename(os.path.basename(tmpfilename))] cmd += ['--auto-file-renaming=false'] if 'fragments' in info_dict: @@ -291,35 +316,114 @@ class Aria2cFD(ExternalFD): url_list = [] for frag_index, fragment in enumerate(info_dict['fragments']): fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index) - url_list.append('%s\n\tout=%s' % (fragment['url'], fragment_filename)) + url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename))) stream, _ = self.sanitize_open(url_list_file, 'wb') - stream.write('\n'.join(url_list).encode('utf-8')) + stream.write('\n'.join(url_list).encode()) stream.close() - cmd += ['-i', url_list_file] + cmd += ['-i', self._aria2c_filename(url_list_file)] else: cmd += ['--', info_dict['url']] return cmd + def aria2c_rpc(self, rpc_port, rpc_secret, method, params=()): + # Does not actually need to be UUID, just unique + sanitycheck = str(uuid.uuid4()) + d = json.dumps({ + 'jsonrpc': '2.0', + 'id': sanitycheck, + 'method': method, + 'params': [f'token:{rpc_secret}', *params], + }).encode('utf-8') + request = sanitized_Request( + f'http://localhost:{rpc_port}/jsonrpc', + data=d, headers={ + 'Content-Type': 'application/json', + 'Content-Length': f'{len(d)}', + 'Ytdl-request-proxy': '__noproxy__', + }) + with self.ydl.urlopen(request) as r: + resp = json.load(r) + assert resp.get('id') == sanitycheck, 'Something went wrong with RPC server' + return resp['result'] + + def _call_process(self, cmd, info_dict): + if '__rpc' not in info_dict: + return super()._call_process(cmd, info_dict) + + send_rpc = functools.partial(self.aria2c_rpc, info_dict['__rpc']['port'], info_dict['__rpc']['secret']) + started = time.time() + + fragmented = 'fragments' in info_dict + frag_count = len(info_dict['fragments']) if fragmented else 1 + status = { + 'filename': info_dict.get('_filename'), + 'status': 'downloading', + 'elapsed': 0, + 'downloaded_bytes': 0, + 'fragment_count': frag_count if fragmented else None, + 'fragment_index': 0 if fragmented else None, + } + self._hook_progress(status, info_dict) + + def get_stat(key, *obj, average=False): + val = tuple(filter(None, map(float, traverse_obj(obj, (..., ..., key))))) or [0] + return sum(val) / (len(val) if average else 1) + + with Popen(cmd, text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE) as p: + # Add a small sleep so that RPC client can receive response, + # or the connection stalls infinitely + time.sleep(0.2) + retval = p.poll() + while retval is None: + # We don't use tellStatus as we won't know the GID without reading stdout + # Ref: https://aria2.github.io/manual/en/html/aria2c.html#aria2.tellActive + active = send_rpc('aria2.tellActive') + completed = send_rpc('aria2.tellStopped', [0, frag_count]) + + downloaded = get_stat('totalLength', completed) + get_stat('completedLength', active) + speed = get_stat('downloadSpeed', active) + total = frag_count * get_stat('totalLength', active, completed, average=True) + if total < downloaded: + total = None + + status.update({ + 'downloaded_bytes': int(downloaded), + 'speed': speed, + 'total_bytes': None if fragmented else total, + 'total_bytes_estimate': total, + 'eta': (total - downloaded) / (speed or 1), + 'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None, + 'elapsed': time.time() - started + }) + self._hook_progress(status, info_dict) + + if not active and len(completed) >= frag_count: + send_rpc('aria2.shutdown') + retval = p.wait() + break + + time.sleep(0.1) + retval = p.poll() + + return '', p.stderr.read(), retval + class HttpieFD(ExternalFD): AVAILABLE_OPT = '--version' - - @classmethod - def available(cls, path=None): - return super().available(path or 'http') + EXE_NAME = 'http' def _make_cmd(self, tmpfilename, info_dict): cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']] if info_dict.get('http_headers') is not None: for key, val in info_dict['http_headers'].items(): - cmd += ['%s:%s' % (key, val)] + cmd += [f'{key}:{val}'] return cmd class FFmpegFD(ExternalFD): SUPPORTED_PROTOCOLS = ('http', 'https', 'ftp', 'ftps', 'm3u8', 'm3u8_native', 'rtsp', 'rtmp', 'rtmp_ffmpeg', 'mms', 'http_dash_segments') - can_download_to_stdout = True + SUPPORTED_FEATURES = (Features.TO_STDOUT, Features.MULTIPLE_FORMATS) @classmethod def available(cls, path=None): @@ -327,10 +431,6 @@ class FFmpegFD(ExternalFD): # Fixme: This may be wrong when --ffmpeg-location is used return FFmpegPostProcessor().available - @classmethod - def supports(cls, info_dict): - return all(proto in cls.SUPPORTED_PROTOCOLS for proto in info_dict['protocol'].split('+')) - def on_process_started(self, proc, stdin): """ Override this in subclasses """ pass @@ -345,7 +445,6 @@ class FFmpegFD(ExternalFD): and cls.can_download(info_dict)) def _call_downloader(self, tmpfilename, info_dict): - urls = [f['url'] for f in info_dict.get('requested_formats', [])] or [info_dict['url']] ffpp = FFmpegPostProcessor(downloader=self) if not ffpp.available: self.report_error('m3u8 download detected but ffmpeg could not be found. Please install') @@ -361,9 +460,11 @@ class FFmpegFD(ExternalFD): if not self.params.get('verbose'): args += ['-hide_banner'] - args += info_dict.get('_ffmpeg_args', []) + args += traverse_obj(info_dict, ('downloader_options', 'ffmpeg_args'), default=[]) - # This option exists only for compatibility. Extractors should use `_ffmpeg_args` instead + # These exists only for compatibility. Extractors should use + # info_dict['downloader_options']['ffmpeg_args'] instead + args += info_dict.get('_ffmpeg_args') or [] seekable = info_dict.get('_seekable') if seekable is not None: # setting -seekable prevents ffmpeg from guessing if the server @@ -373,21 +474,6 @@ class FFmpegFD(ExternalFD): # http://trac.ffmpeg.org/ticket/6125#comment:10 args += ['-seekable', '1' if seekable else '0'] - # start_time = info_dict.get('start_time') or 0 - # if start_time: - # args += ['-ss', compat_str(start_time)] - # end_time = info_dict.get('end_time') - # if end_time: - # args += ['-t', compat_str(end_time - start_time)] - - if info_dict.get('http_headers') is not None and re.match(r'^https?://', urls[0]): - # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv: - # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header. - headers = handle_youtubedl_headers(info_dict['http_headers']) - args += [ - '-headers', - ''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())] - env = None proxy = self.params.get('proxy') if proxy: @@ -404,8 +490,8 @@ class FFmpegFD(ExternalFD): # We could switch to the following code if we are able to detect version properly # args += ['-http_proxy', proxy] env = os.environ.copy() - compat_setenv('HTTP_PROXY', proxy, env=env) - compat_setenv('http_proxy', proxy, env=env) + env['HTTP_PROXY'] = proxy + env['http_proxy'] = proxy protocol = info_dict.get('protocol') @@ -435,20 +521,36 @@ class FFmpegFD(ExternalFD): if isinstance(conn, list): for entry in conn: args += ['-rtmp_conn', entry] - elif isinstance(conn, compat_str): + elif isinstance(conn, str): args += ['-rtmp_conn', conn] - for i, url in enumerate(urls): - args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', url] + start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end') + + selected_formats = info_dict.get('requested_formats') or [info_dict] + for i, fmt in enumerate(selected_formats): + if fmt.get('http_headers') and re.match(r'^https?://', fmt['url']): + headers_dict = handle_youtubedl_headers(fmt['http_headers']) + # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv: + # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header. + args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in headers_dict.items())]) + + if start_time: + args += ['-ss', str(start_time)] + if end_time: + args += ['-t', str(end_time - start_time)] + + args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']] + + if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'): + args += ['-c', 'copy'] - args += ['-c', 'copy'] if info_dict.get('requested_formats') or protocol == 'http_dash_segments': - for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]): + for i, fmt in enumerate(selected_formats): stream_number = fmt.get('manifest_stream_number', 0) args.extend(['-map', f'{i}:{stream_number}']) if self.params.get('test', False): - args += ['-fs', compat_str(self._TEST_FILE_SIZE)] + args += ['-fs', str(self._TEST_FILE_SIZE)] ext = info_dict['ext'] if protocol in ('m3u8', 'm3u8_native'): @@ -483,35 +585,35 @@ class FFmpegFD(ExternalFD): args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True)) self._debug_cmd(args) - proc = Popen(args, stdin=subprocess.PIPE, env=env) - if url in ('-', 'pipe:'): - self.on_process_started(proc, proc.stdin) - try: - retval = proc.wait() - except BaseException as e: - # subprocces.run would send the SIGKILL signal to ffmpeg and the - # mp4 file couldn't be played, but if we ask ffmpeg to quit it - # produces a file that is playable (this is mostly useful for live - # streams). Note that Windows is not affected and produces playable - # files (see https://github.com/ytdl-org/youtube-dl/issues/8300). - if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and url not in ('-', 'pipe:'): - proc.communicate_or_kill(b'q') - else: - proc.kill() - proc.wait() - raise - return retval + piped = any(fmt['url'] in ('-', 'pipe:') for fmt in selected_formats) + with Popen(args, stdin=subprocess.PIPE, env=env) as proc: + if piped: + self.on_process_started(proc, proc.stdin) + try: + retval = proc.wait() + except BaseException as e: + # subprocces.run would send the SIGKILL signal to ffmpeg and the + # mp4 file couldn't be played, but if we ask ffmpeg to quit it + # produces a file that is playable (this is mostly useful for live + # streams). Note that Windows is not affected and produces playable + # files (see https://github.com/ytdl-org/youtube-dl/issues/8300). + if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and not piped: + proc.communicate_or_kill(b'q') + else: + proc.kill(timeout=None) + raise + return retval class AVconvFD(FFmpegFD): pass -_BY_NAME = dict( - (klass.get_basename(), klass) +_BY_NAME = { + klass.get_basename(): klass for name, klass in globals().items() if name.endswith('FD') and name not in ('ExternalFD', 'FragmentFD') -) +} def list_external_downloaders(): @@ -519,8 +621,8 @@ def list_external_downloaders(): def get_external_downloader(external_downloader): - """ Given the name of the executable, see whether we support the given - downloader . """ - # Drop .exe extension on Windows + """ Given the name of the executable, see whether we support the given downloader """ bn = os.path.splitext(os.path.basename(external_downloader))[0] - return _BY_NAME.get(bn) + return _BY_NAME.get(bn) or next(( + klass for klass in _BY_NAME.values() if klass.EXE_NAME in bn + ), None) diff --git a/plugins/youtube_download/yt_dlp/downloader/f4m.py b/plugins/youtube_download/yt_dlp/downloader/f4m.py index 0008b7c..306f921 100644 --- a/plugins/youtube_download/yt_dlp/downloader/f4m.py +++ b/plugins/youtube_download/yt_dlp/downloader/f4m.py @@ -1,23 +1,14 @@ -from __future__ import division, unicode_literals - +import base64 import io import itertools +import struct import time +import urllib.error +import urllib.parse from .fragment import FragmentFD -from ..compat import ( - compat_b64decode, - compat_etree_fromstring, - compat_urlparse, - compat_urllib_error, - compat_urllib_parse_urlparse, - compat_struct_pack, - compat_struct_unpack, -) -from ..utils import ( - fix_xml_ampersands, - xpath_text, -) +from ..compat import compat_etree_fromstring +from ..utils import fix_xml_ampersands, xpath_text class DataTruncatedError(Exception): @@ -40,13 +31,13 @@ class FlvReader(io.BytesIO): # Utility functions for reading numbers and strings def read_unsigned_long_long(self): - return compat_struct_unpack('!Q', self.read_bytes(8))[0] + return struct.unpack('!Q', self.read_bytes(8))[0] def read_unsigned_int(self): - return compat_struct_unpack('!I', self.read_bytes(4))[0] + return struct.unpack('!I', self.read_bytes(4))[0] def read_unsigned_char(self): - return compat_struct_unpack('!B', self.read_bytes(1))[0] + return struct.unpack('!B', self.read_bytes(1))[0] def read_string(self): res = b'' @@ -193,7 +184,7 @@ def build_fragments_list(boot_info): first_frag_number = fragment_run_entry_table[0]['first'] fragments_counter = itertools.count(first_frag_number) for segment, fragments_count in segment_run_table['segment_run']: - # In some live HDS streams (for example Rai), `fragments_count` is + # In some live HDS streams (e.g. Rai), `fragments_count` is # abnormal and causing out-of-memory errors. It's OK to change the # number of fragments for live streams as they are updated periodically if fragments_count == 4294967295 and boot_info['live']: @@ -208,11 +199,11 @@ def build_fragments_list(boot_info): def write_unsigned_int(stream, val): - stream.write(compat_struct_pack('!I', val)) + stream.write(struct.pack('!I', val)) def write_unsigned_int_24(stream, val): - stream.write(compat_struct_pack('!I', val)[1:]) + stream.write(struct.pack('!I', val)[1:]) def write_flv_header(stream): @@ -261,8 +252,6 @@ class F4mFD(FragmentFD): A downloader for f4m manifests or AdobeHDS. """ - FD_NAME = 'f4m' - def _get_unencrypted_media(self, doc): media = doc.findall(_add_ns('media')) if not media: @@ -308,12 +297,12 @@ class F4mFD(FragmentFD): # 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m bootstrap_url = node.get('url') if bootstrap_url: - bootstrap_url = compat_urlparse.urljoin( + bootstrap_url = urllib.parse.urljoin( base_url, bootstrap_url) boot_info = self._get_bootstrap_from_url(bootstrap_url) else: bootstrap_url = None - bootstrap = compat_b64decode(node.text) + bootstrap = base64.b64decode(node.text) boot_info = read_bootstrap_info(bootstrap) return boot_info, bootstrap_url @@ -343,14 +332,14 @@ class F4mFD(FragmentFD): # Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec. man_base_url = get_base_url(doc) or man_url - base_url = compat_urlparse.urljoin(man_base_url, media.attrib['url']) + base_url = urllib.parse.urljoin(man_base_url, media.attrib['url']) bootstrap_node = doc.find(_add_ns('bootstrapInfo')) boot_info, bootstrap_url = self._parse_bootstrap_node( bootstrap_node, man_base_url) live = boot_info['live'] metadata_node = media.find(_add_ns('metadata')) if metadata_node is not None: - metadata = compat_b64decode(metadata_node.text) + metadata = base64.b64decode(metadata_node.text) else: metadata = None @@ -378,7 +367,7 @@ class F4mFD(FragmentFD): if not live: write_metadata_tag(dest_stream, metadata) - base_url_parsed = compat_urllib_parse_urlparse(base_url) + base_url_parsed = urllib.parse.urlparse(base_url) self._start_frag_download(ctx, info_dict) @@ -398,9 +387,10 @@ class F4mFD(FragmentFD): query.append(info_dict['extra_param_to_segment_url']) url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query)) try: - success, down_data = self._download_fragment(ctx, url_parsed.geturl(), info_dict) + success = self._download_fragment(ctx, url_parsed.geturl(), info_dict) if not success: return False + down_data = self._read_fragment(ctx) reader = FlvReader(down_data) while True: try: @@ -417,7 +407,7 @@ class F4mFD(FragmentFD): if box_type == b'mdat': self._append_fragment(ctx, box_data) break - except (compat_urllib_error.HTTPError, ) as err: + except urllib.error.HTTPError as err: if live and (err.code == 404 or err.code == 410): # We didn't keep up with the live window. Continue # with the next available fragment. @@ -434,6 +424,4 @@ class F4mFD(FragmentFD): msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1)) self.report_warning(msg) - self._finish_frag_download(ctx, info_dict) - - return True + return self._finish_frag_download(ctx, info_dict) diff --git a/plugins/youtube_download/yt_dlp/downloader/fc2.py b/plugins/youtube_download/yt_dlp/downloader/fc2.py new file mode 100644 index 0000000..f9763de --- /dev/null +++ b/plugins/youtube_download/yt_dlp/downloader/fc2.py @@ -0,0 +1,46 @@ +import threading + +from .common import FileDownloader +from .external import FFmpegFD + + +class FC2LiveFD(FileDownloader): + """ + Downloads FC2 live without being stopped.
+ Note, this is not a part of public API, and will be removed without notice. + DO NOT USE + """ + + def real_download(self, filename, info_dict): + ws = info_dict['ws'] + + heartbeat_lock = threading.Lock() + heartbeat_state = [None, 1] + + def heartbeat(): + if heartbeat_state[1] < 0: + return + + try: + heartbeat_state[1] += 1 + ws.send('{"name":"heartbeat","arguments":{},"id":%d}' % heartbeat_state[1]) + except Exception: + self.to_screen('[fc2:live] Heartbeat failed') + + with heartbeat_lock: + heartbeat_state[0] = threading.Timer(30, heartbeat) + heartbeat_state[0]._daemonic = True + heartbeat_state[0].start() + + heartbeat() + + new_info_dict = info_dict.copy() + new_info_dict.update({ + 'ws': None, + 'protocol': 'live_ffmpeg', + }) + try: + return FFmpegFD(self.ydl, self.params or {}).download(filename, new_info_dict) + finally: + # stop heartbeating + heartbeat_state[1] = -1 diff --git a/plugins/youtube_download/yt_dlp/downloader/fragment.py b/plugins/youtube_download/yt_dlp/downloader/fragment.py index 19c0990..039cb14 100644 --- a/plugins/youtube_download/yt_dlp/downloader/fragment.py +++ b/plugins/youtube_download/yt_dlp/downloader/fragment.py @@ -1,30 +1,23 @@ -from __future__ import division, unicode_literals - +import concurrent.futures +import contextlib import http.client import json import math import os +import struct import time - -try: - import concurrent.futures - can_threaded_download = True -except ImportError: - can_threaded_download = False +import urllib.error from .common import FileDownloader from .http import HttpFD from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 -from ..compat import ( - compat_os_name, - compat_urllib_error, - compat_struct_pack, -) +from ..compat import compat_os_name from ..utils import ( DownloadError, - error_to_compat_str, + RetryManager, encodeFilename, sanitized_Request, + traverse_obj, ) @@ -32,9 +25,7 @@ class HttpQuietDownloader(HttpFD): def to_screen(self, *args, **kargs): pass - def report_retry(self, err, count, retries): - super().to_screen( - f'[download] Got server HTTP error: {err}. Retrying (attempt {count} of {self.format_retries(retries)}) ...') + to_console_title = to_screen class FragmentFD(FileDownloader): @@ -74,9 +65,9 @@ class FragmentFD(FileDownloader): """ def report_retry_fragment(self, err, frag_index, count, retries): - self.to_screen( - '\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...' - % (error_to_compat_str(err), frag_index, count, self.format_retries(retries))) + self.deprecation_warning('yt_dlp.downloader.FragmentFD.report_retry_fragment is deprecated. ' + 'Use yt_dlp.downloader.FileDownloader.report_retry instead') + return self.report_retry(err, count, retries, frag_index) def report_skip_fragment(self, frag_index, err=None): err = f' {err};' if err else '' @@ -130,16 +121,23 @@ class FragmentFD(FileDownloader): 'request_data': request_data, 'ctx_id': ctx.get('ctx_id'), } - success = ctx['dl'].download(fragment_filename, fragment_info_dict) + success, _ = ctx['dl'].download(fragment_filename, fragment_info_dict) if not success: - return False, None + return False if fragment_info_dict.get('filetime'): ctx['fragment_filetime'] = fragment_info_dict.get('filetime') ctx['fragment_filename_sanitized'] = fragment_filename - return True, self._read_fragment(ctx) + return True def _read_fragment(self, ctx): - down, frag_sanitized = self.sanitize_open(ctx['fragment_filename_sanitized'], 'rb') + if not ctx.get('fragment_filename_sanitized'): + return None + try: + down, frag_sanitized = self.sanitize_open(ctx['fragment_filename_sanitized'], 'rb') + except FileNotFoundError: + if ctx.get('live'): + return None + raise ctx['fragment_filename_sanitized'] = frag_sanitized frag_content = down.read() down.close() @@ -153,7 +151,7 @@ class FragmentFD(FileDownloader): if self.__do_ytdl_file(ctx): self._write_ytdl_file(ctx) if not self.params.get('keep_fragments', False): - os.remove(encodeFilename(ctx['fragment_filename_sanitized'])) + self.try_remove(encodeFilename(ctx['fragment_filename_sanitized'])) del ctx['fragment_filename_sanitized'] def _prepare_frag_download(self, ctx): @@ -166,21 +164,13 @@ class FragmentFD(FileDownloader): total_frags_str += ' (not including %d ad)' % ad_frags else: total_frags_str = 'unknown (live)' - self.to_screen( - '[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str)) + self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') self.report_destination(ctx['filename']) - dl = HttpQuietDownloader( - self.ydl, - { - 'continuedl': True, - 'quiet': self.params.get('quiet'), - 'noprogress': True, - 'ratelimit': self.params.get('ratelimit'), - 'retries': self.params.get('retries', 0), - 'nopart': self.params.get('nopart', False), - 'test': self.params.get('test', False), - } - ) + dl = HttpQuietDownloader(self.ydl, { + **self.params, + 'noprogress': True, + 'test': False, + }) tmpfilename = self.temp_name(ctx['filename']) open_mode = 'wb' resume_len = 0 @@ -253,6 +243,9 @@ class FragmentFD(FileDownloader): if s['status'] not in ('downloading', 'finished'): return + if not total_frags and ctx.get('fragment_count'): + state['fragment_count'] = ctx['fragment_count'] + if ctx_id is not None and s.get('ctx_id') != ctx_id: return @@ -299,21 +292,26 @@ class FragmentFD(FileDownloader): if self.__do_ytdl_file(ctx): ytdl_filename = encodeFilename(self.ytdl_filename(ctx['filename'])) if os.path.isfile(ytdl_filename): - os.remove(ytdl_filename) + self.try_remove(ytdl_filename) elapsed = time.time() - ctx['started'] - if ctx['tmpfilename'] == '-': - downloaded_bytes = ctx['complete_frags_downloaded_bytes'] + to_file = ctx['tmpfilename'] != '-' + if to_file: + downloaded_bytes = os.path.getsize(encodeFilename(ctx['tmpfilename'])) else: + downloaded_bytes = ctx['complete_frags_downloaded_bytes'] + + if not downloaded_bytes: + if to_file: + self.try_remove(ctx['tmpfilename']) + self.report_error('The downloaded file is empty') + return False + elif to_file: self.try_rename(ctx['tmpfilename'], ctx['filename']) - if self.params.get('updatetime', True): - filetime = ctx.get('fragment_filetime') - if filetime: - try: - os.utime(ctx['filename'], (time.time(), filetime)) - except Exception: - pass - downloaded_bytes = os.path.getsize(encodeFilename(ctx['filename'])) + filetime = ctx.get('fragment_filetime') + if self.params.get('updatetime', True) and filetime: + with contextlib.suppress(Exception): + os.utime(ctx['filename'], (time.time(), filetime)) self._hook_progress({ 'downloaded_bytes': downloaded_bytes, @@ -325,6 +323,7 @@ class FragmentFD(FileDownloader): 'max_progress': ctx.get('max_progress'), 'progress_idx': ctx.get('progress_idx'), }, info_dict) + return True def _prepare_external_frag_download(self, ctx): if 'live' not in ctx: @@ -336,8 +335,7 @@ class FragmentFD(FileDownloader): total_frags_str += ' (not including %d ad)' % ad_frags else: total_frags_str = 'unknown (live)' - self.to_screen( - '[%s] Total fragments: %s' % (self.FD_NAME, total_frags_str)) + self.to_screen(f'[{self.FD_NAME}] Total fragments: {total_frags_str}') tmpfilename = self.temp_name(ctx['filename']) @@ -356,11 +354,14 @@ class FragmentFD(FileDownloader): return _key_cache[url] def decrypt_fragment(fragment, frag_content): + if frag_content is None: + return decrypt_info = fragment.get('decrypt_info') if not decrypt_info or decrypt_info['METHOD'] != 'AES-128': return frag_content - iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence']) - decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI']) + iv = decrypt_info.get('IV') or struct.pack('>8xq', fragment['media_sequence']) + decrypt_info['KEY'] = (decrypt_info.get('KEY') + or _get_key(traverse_obj(info_dict, ('hls_aes', 'uri')) or decrypt_info['URI'])) # Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block # size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded, # not what it decrypts to. @@ -370,7 +371,7 @@ class FragmentFD(FileDownloader): return decrypt_fragment - def download_and_append_fragments_multiple(self, *args, pack_func=None, finish_func=None): + def download_and_append_fragments_multiple(self, *args, **kwargs): ''' @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ... all args must be either tuple or list @@ -378,63 +379,76 @@ class FragmentFD(FileDownloader): interrupt_trigger = [True] max_progress = len(args) if max_progress == 1: - return self.download_and_append_fragments(*args[0], pack_func=pack_func, finish_func=finish_func) + return self.download_and_append_fragments(*args[0], **kwargs) max_workers = self.params.get('concurrent_fragment_downloads', 1) if max_progress > 1: self._prepare_multiline_status(max_progress) + is_live = any(traverse_obj(args, (..., 2, 'is_live'))) def thread_func(idx, ctx, fragments, info_dict, tpe): ctx['max_progress'] = max_progress ctx['progress_idx'] = idx return self.download_and_append_fragments( - ctx, fragments, info_dict, pack_func=pack_func, finish_func=finish_func, - tpe=tpe, interrupt_trigger=interrupt_trigger) + ctx, fragments, info_dict, **kwargs, tpe=tpe, interrupt_trigger=interrupt_trigger) class FTPE(concurrent.futures.ThreadPoolExecutor): # has to stop this or it's going to wait on the worker thread itself def __exit__(self, exc_type, exc_val, exc_tb): pass - spins = [] if compat_os_name == 'nt': - self.report_warning('Ctrl+C does not work on Windows when used with parallel threads. ' - 'This is a known issue and patches are welcome') + def future_result(future): + while True: + try: + return future.result(0.1) + except KeyboardInterrupt: + raise + except concurrent.futures.TimeoutError: + continue + else: + def future_result(future): + return future.result() + + def interrupt_trigger_iter(fg): + for f in fg: + if not interrupt_trigger[0]: + break + yield f + + spins = [] for idx, (ctx, fragments, info_dict) in enumerate(args): tpe = FTPE(math.ceil(max_workers / max_progress)) - job = tpe.submit(thread_func, idx, ctx, fragments, info_dict, tpe) + job = tpe.submit(thread_func, idx, ctx, interrupt_trigger_iter(fragments), info_dict, tpe) spins.append((tpe, job)) result = True for tpe, job in spins: try: - result = result and job.result() + result = result and future_result(job) except KeyboardInterrupt: interrupt_trigger[0] = False finally: tpe.shutdown(wait=True) - if not interrupt_trigger[0]: + if not interrupt_trigger[0] and not is_live: raise KeyboardInterrupt() + # we expect the user wants to stop and DO WANT the preceding postprocessors to run; + # so returning a intermediate result here instead of KeyboardInterrupt on live return result def download_and_append_fragments( - self, ctx, fragments, info_dict, *, pack_func=None, finish_func=None, - tpe=None, interrupt_trigger=None): - if not interrupt_trigger: - interrupt_trigger = (True, ) + self, ctx, fragments, info_dict, *, is_fatal=(lambda idx: False), + pack_func=(lambda content, idx: content), finish_func=None, + tpe=None, interrupt_trigger=(True, )): - fragment_retries = self.params.get('fragment_retries', 0) - is_fatal = ( - ((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0)) - if self.params.get('skip_unavailable_fragments', True) else (lambda _: True)) - - if not pack_func: - pack_func = lambda frag_content, _: frag_content + if not self.params.get('skip_unavailable_fragments', True): + is_fatal = lambda _: True def download_fragment(fragment, ctx): + if not interrupt_trigger[0]: + return + frag_index = ctx['fragment_index'] = fragment['frag_index'] ctx['last_error'] = None - if not interrupt_trigger[0]: - return False, frag_index headers = info_dict.get('http_headers', {}).copy() byte_range = fragment.get('byte_range') if byte_range: @@ -442,82 +456,78 @@ class FragmentFD(FileDownloader): # Never skip the first fragment fatal = is_fatal(fragment.get('index') or (frag_index - 1)) - count, frag_content = 0, None - while count <= fragment_retries: - try: - success, frag_content = self._download_fragment(ctx, fragment['url'], info_dict, headers) - if not success: - return False, frag_index - break - except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err: - # Unavailable (possibly temporary) fragments may be served. - # First we try to retry then either skip or abort. - # See https://github.com/ytdl-org/youtube-dl/issues/10165, - # https://github.com/ytdl-org/youtube-dl/issues/10448). - count += 1 - ctx['last_error'] = err - if count <= fragment_retries: - self.report_retry_fragment(err, frag_index, count, fragment_retries) - except DownloadError: - # Don't retry fragment if error occurred during HTTP downloading - # itself since it has own retry settings - if not fatal: - break - raise - if count > fragment_retries: - if not fatal: - return False, frag_index - ctx['dest_stream'].close() - self.report_error('Giving up after %s fragment retries' % fragment_retries) - return False, frag_index - return frag_content, frag_index + def error_callback(err, count, retries): + if fatal and count > retries: + ctx['dest_stream'].close() + self.report_retry(err, count, retries, frag_index, fatal) + ctx['last_error'] = err + + for retry in RetryManager(self.params.get('fragment_retries'), error_callback): + try: + ctx['fragment_count'] = fragment.get('fragment_count') + if not self._download_fragment(ctx, fragment['url'], info_dict, headers): + return + except (urllib.error.HTTPError, http.client.IncompleteRead) as err: + retry.error = err + continue + except DownloadError: # has own retry settings + if fatal: + raise def append_fragment(frag_content, frag_index, ctx): - if not frag_content: - if not is_fatal(frag_index - 1): - self.report_skip_fragment(frag_index, 'fragment not found') - return True - else: - ctx['dest_stream'].close() - self.report_error( - 'fragment %s not found, unable to continue' % frag_index) - return False - self._append_fragment(ctx, pack_func(frag_content, frag_index)) + if frag_content: + self._append_fragment(ctx, pack_func(frag_content, frag_index)) + elif not is_fatal(frag_index - 1): + self.report_skip_fragment(frag_index, 'fragment not found') + else: + ctx['dest_stream'].close() + self.report_error(f'fragment {frag_index} not found, unable to continue') + return False return True decrypt_fragment = self.decrypter(info_dict) max_workers = math.ceil( self.params.get('concurrent_fragment_downloads', 1) / ctx.get('max_progress', 1)) - if can_threaded_download and max_workers > 1: - + if max_workers > 1: def _download_fragment(fragment): ctx_copy = ctx.copy() - frag_content, frag_index = download_fragment(fragment, ctx_copy) - return fragment, frag_content, frag_index, ctx_copy.get('fragment_filename_sanitized') + download_fragment(fragment, ctx_copy) + return fragment, fragment['frag_index'], ctx_copy.get('fragment_filename_sanitized') self.report_warning('The download speed shown is only of one thread. This is a known issue and patches are welcome') with tpe or concurrent.futures.ThreadPoolExecutor(max_workers) as pool: - for fragment, frag_content, frag_index, frag_filename in pool.map(_download_fragment, fragments): - if not interrupt_trigger[0]: - break - ctx['fragment_filename_sanitized'] = frag_filename - ctx['fragment_index'] = frag_index - result = append_fragment(decrypt_fragment(fragment, frag_content), frag_index, ctx) - if not result: - return False + try: + for fragment, frag_index, frag_filename in pool.map(_download_fragment, fragments): + ctx.update({ + 'fragment_filename_sanitized': frag_filename, + 'fragment_index': frag_index, + }) + if not append_fragment(decrypt_fragment(fragment, self._read_fragment(ctx)), frag_index, ctx): + return False + except KeyboardInterrupt: + self._finish_multiline_status() + self.report_error( + 'Interrupted by user. Waiting for all threads to shutdown...', is_error=False, tb=False) + pool.shutdown(wait=False) + raise else: for fragment in fragments: if not interrupt_trigger[0]: break - frag_content, frag_index = download_fragment(fragment, ctx) - result = append_fragment(decrypt_fragment(fragment, frag_content), frag_index, ctx) + try: + download_fragment(fragment, ctx) + result = append_fragment( + decrypt_fragment(fragment, self._read_fragment(ctx)), fragment['frag_index'], ctx) + except KeyboardInterrupt: + if info_dict.get('is_live'): + break + raise if not result: return False if finish_func is not None: ctx['dest_stream'].write(finish_func()) ctx['dest_stream'].flush() - self._finish_frag_download(ctx, info_dict) - return True + return self._finish_frag_download(ctx, info_dict) diff --git a/plugins/youtube_download/yt_dlp/downloader/hls.py b/plugins/youtube_download/yt_dlp/downloader/hls.py index e932fd6..29d6f62 100644 --- a/plugins/youtube_download/yt_dlp/downloader/hls.py +++ b/plugins/youtube_download/yt_dlp/downloader/hls.py @@ -1,23 +1,21 @@ -from __future__ import unicode_literals - -import re -import io import binascii +import io +import re +import urllib.parse -from ..downloader import get_suitable_downloader -from .fragment import FragmentFD +from . import get_suitable_downloader from .external import FFmpegFD - -from ..compat import ( - compat_pycrypto_AES, - compat_urlparse, -) -from ..utils import ( - parse_m3u8_attributes, - update_url_query, - bug_reports_message, -) +from .fragment import FragmentFD from .. import webvtt +from ..dependencies import Cryptodome +from ..utils import ( + bug_reports_message, + parse_m3u8_attributes, + remove_start, + traverse_obj, + update_url_query, + urljoin, +) class HlsFD(FragmentFD): @@ -70,12 +68,18 @@ class HlsFD(FragmentFD): s = urlh.read().decode('utf-8', 'ignore') can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None - if can_download and not compat_pycrypto_AES and '#EXT-X-KEY:METHOD=AES-128' in s: - if FFmpegFD.available(): + if can_download: + has_ffmpeg = FFmpegFD.available() + no_crypto = not Cryptodome and '#EXT-X-KEY:METHOD=AES-128' in s + if no_crypto and has_ffmpeg: can_download, message = False, 'The stream has AES-128 encryption and pycryptodomex is not available' - else: + elif no_crypto: message = ('The stream has AES-128 encryption and neither ffmpeg nor pycryptodomex are available; ' 'Decryption will be performed natively, but will be extremely slow') + elif info_dict.get('extractor_key') == 'Generic' and re.search(r'(?m)#EXT-X-MEDIA-SEQUENCE:(?!0$)', s): + install_ffmpeg = '' if has_ffmpeg else 'install ffmpeg and ' + message = ('Live HLS streams are not supported by the native downloader. If this is a livestream, ' + f'please {install_ffmpeg}add "--downloader ffmpeg --hls-use-mpegts" to your command') if not can_download: has_drm = re.search('|'.join([ r'#EXT-X-FAXS-CM:', # Adobe Flash Access @@ -102,8 +106,7 @@ class HlsFD(FragmentFD): if real_downloader and not real_downloader.supports_manifest(s): real_downloader = None if real_downloader: - self.to_screen( - '[%s] Fragment downloads will be delegated to %s' % (self.FD_NAME, real_downloader.get_basename())) + self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}') def is_ad_fragment_start(s): return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s @@ -150,10 +153,17 @@ class HlsFD(FragmentFD): extra_query = None extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url') if extra_param_to_segment_url: - extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url) + extra_query = urllib.parse.parse_qs(extra_param_to_segment_url) i = 0 media_sequence = 0 decrypt_info = {'METHOD': 'NONE'} + external_aes_key = traverse_obj(info_dict, ('hls_aes', 'key')) + if external_aes_key: + external_aes_key = binascii.unhexlify(remove_start(external_aes_key, '0x')) + assert len(external_aes_key) in (16, 24, 32), 'Invalid length for HLS AES-128 key' + external_aes_iv = traverse_obj(info_dict, ('hls_aes', 'iv')) + if external_aes_iv: + external_aes_iv = binascii.unhexlify(remove_start(external_aes_iv, '0x').zfill(32)) byte_range = {} discontinuity_count = 0 frag_index = 0 @@ -169,10 +179,7 @@ class HlsFD(FragmentFD): frag_index += 1 if frag_index <= ctx['fragment_index']: continue - frag_url = ( - line - if re.match(r'^https?://', line) - else compat_urlparse.urljoin(man_url, line)) + frag_url = urljoin(man_url, line) if extra_query: frag_url = update_url_query(frag_url, extra_query) @@ -194,13 +201,18 @@ class HlsFD(FragmentFD): return False frag_index += 1 map_info = parse_m3u8_attributes(line[11:]) - frag_url = ( - map_info.get('URI') - if re.match(r'^https?://', map_info.get('URI')) - else compat_urlparse.urljoin(man_url, map_info.get('URI'))) + frag_url = urljoin(man_url, map_info.get('URI')) if extra_query: frag_url = update_url_query(frag_url, extra_query) + if map_info.get('BYTERANGE'): + splitted_byte_range = map_info.get('BYTERANGE').split('@') + sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end'] + byte_range = { + 'start': sub_range_start, + 'end': sub_range_start + int(splitted_byte_range[0]), + } + fragments.append({ 'frag_index': frag_index, 'url': frag_url, @@ -210,27 +222,22 @@ class HlsFD(FragmentFD): }) media_sequence += 1 - if map_info.get('BYTERANGE'): - splitted_byte_range = map_info.get('BYTERANGE').split('@') - sub_range_start = int(splitted_byte_range[1]) if len(splitted_byte_range) == 2 else byte_range['end'] - byte_range = { - 'start': sub_range_start, - 'end': sub_range_start + int(splitted_byte_range[0]), - } - elif line.startswith('#EXT-X-KEY'): decrypt_url = decrypt_info.get('URI') decrypt_info = parse_m3u8_attributes(line[11:]) if decrypt_info['METHOD'] == 'AES-128': - if 'IV' in decrypt_info: + if external_aes_iv: + decrypt_info['IV'] = external_aes_iv + elif 'IV' in decrypt_info: decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32)) - if not re.match(r'^https?://', decrypt_info['URI']): - decrypt_info['URI'] = compat_urlparse.urljoin( - man_url, decrypt_info['URI']) - if extra_query: - decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query) - if decrypt_url != decrypt_info['URI']: - decrypt_info['KEY'] = None + if external_aes_key: + decrypt_info['KEY'] = external_aes_key + else: + decrypt_info['URI'] = urljoin(man_url, decrypt_info['URI']) + if extra_query: + decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query) + if decrypt_url != decrypt_info['URI']: + decrypt_info['KEY'] = None elif line.startswith('#EXT-X-MEDIA-SEQUENCE'): media_sequence = int(line[22:]) @@ -339,7 +346,7 @@ class HlsFD(FragmentFD): continue block.write_into(output) - return output.getvalue().encode('utf-8') + return output.getvalue().encode() def fin_fragments(): dedup_window = extra_state.get('webvtt_dedup_window') @@ -350,7 +357,7 @@ class HlsFD(FragmentFD): for cue in dedup_window: webvtt.CueBlock.from_json(cue).write_into(output) - return output.getvalue().encode('utf-8') + return output.getvalue().encode() self.download_and_append_fragments( ctx, fragments, info_dict, pack_func=pack_fragment, finish_func=fin_fragments) diff --git a/plugins/youtube_download/yt_dlp/downloader/http.py b/plugins/youtube_download/yt_dlp/downloader/http.py index 34a1eb5..95c870e 100644 --- a/plugins/youtube_download/yt_dlp/downloader/http.py +++ b/plugins/youtube_download/yt_dlp/downloader/http.py @@ -1,26 +1,32 @@ -from __future__ import unicode_literals - -import errno +import http.client import os -import socket -import time import random -import re +import socket +import ssl +import time +import urllib.error from .common import FileDownloader -from ..compat import ( - compat_str, - compat_urllib_error, -) from ..utils import ( ContentTooShortError, - encodeFilename, - int_or_none, - sanitized_Request, + RetryManager, ThrottledDownload, - write_xattr, XAttrMetadataError, XAttrUnavailableError, + encodeFilename, + int_or_none, + parse_http_range, + sanitized_Request, + try_call, + write_xattr, +) + +RESPONSE_READ_EXCEPTIONS = ( + TimeoutError, + socket.timeout, # compat: py < 3.10 + ConnectionError, + ssl.SSLError, + http.client.HTTPException ) @@ -53,11 +59,11 @@ class HttpFD(FileDownloader): ctx.open_mode = 'wb' ctx.resume_len = 0 - ctx.data_len = None ctx.block_size = self.params.get('buffersize', 1024) ctx.start_time = time.time() - ctx.chunk_size = None - throttle_start = None + + # parse given Range + req_start, req_end, _ = parse_http_range(headers.get('Range')) if self.params.get('continuedl', True): # Establish possible resume length @@ -67,9 +73,6 @@ class HttpFD(FileDownloader): ctx.is_resume = ctx.resume_len > 0 - count = 0 - retries = self.params.get('retries', 0) - class SucceedDownload(Exception): pass @@ -80,43 +83,50 @@ class HttpFD(FileDownloader): class NextFragment(Exception): pass - def set_range(req, start, end): - range_header = 'bytes=%d-' % start - if end: - range_header += compat_str(end) - req.add_header('Range', range_header) - def establish_connection(): ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size) if not is_test and chunk_size else chunk_size) if ctx.resume_len > 0: range_start = ctx.resume_len + if req_start is not None: + # offset the beginning of Range to be within request + range_start += req_start if ctx.is_resume: self.report_resuming_byte(ctx.resume_len) ctx.open_mode = 'ab' + elif req_start is not None: + range_start = req_start elif ctx.chunk_size > 0: range_start = 0 else: range_start = None ctx.is_resume = False - range_end = range_start + ctx.chunk_size - 1 if ctx.chunk_size else None - if range_end and ctx.data_len is not None and range_end >= ctx.data_len: - range_end = ctx.data_len - 1 - has_range = range_start is not None - ctx.has_range = has_range + + if ctx.chunk_size: + chunk_aware_end = range_start + ctx.chunk_size - 1 + # we're not allowed to download outside Range + range_end = chunk_aware_end if req_end is None else min(chunk_aware_end, req_end) + elif req_end is not None: + # there's no need for chunked downloads, so download until the end of Range + range_end = req_end + else: + range_end = None + + if try_call(lambda: range_start > range_end): + ctx.resume_len = 0 + ctx.open_mode = 'wb' + raise RetryDownload(Exception(f'Conflicting range. (start={range_start} > end={range_end})')) + + if try_call(lambda: range_end >= ctx.content_len): + range_end = ctx.content_len - 1 + request = sanitized_Request(url, request_data, headers) + has_range = range_start is not None if has_range: - set_range(request, range_start, range_end) + request.add_header('Range', f'bytes={int(range_start)}-{int_or_none(range_end) or ""}') # Establish connection try: - try: - ctx.data = self.ydl.urlopen(request) - except (compat_urllib_error.URLError, ) as err: - # reason may not be available, e.g. for urllib2.HTTPError on python 2.6 - reason = getattr(err, 'reason', None) - if isinstance(reason, socket.timeout): - raise RetryDownload(err) - raise err + ctx.data = self.ydl.urlopen(request) # When trying to resume, Content-Range HTTP header of response has to be checked # to match the value of requested Range HTTP header. This is due to a webservers # that don't support resuming and serve a whole file with no Content-Range @@ -124,32 +134,27 @@ class HttpFD(FileDownloader): # https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799) if has_range: content_range = ctx.data.headers.get('Content-Range') - if content_range: - content_range_m = re.search(r'bytes (\d+)-(\d+)?(?:/(\d+))?', content_range) - # Content-Range is present and matches requested Range, resume is possible - if content_range_m: - if range_start == int(content_range_m.group(1)): - content_range_end = int_or_none(content_range_m.group(2)) - content_len = int_or_none(content_range_m.group(3)) - accept_content_len = ( - # Non-chunked download - not ctx.chunk_size - # Chunked download and requested piece or - # its part is promised to be served - or content_range_end == range_end - or content_len < range_end) - if accept_content_len: - ctx.data_len = content_len - return + content_range_start, content_range_end, content_len = parse_http_range(content_range) + # Content-Range is present and matches requested Range, resume is possible + if range_start == content_range_start and ( + # Non-chunked download + not ctx.chunk_size + # Chunked download and requested piece or + # its part is promised to be served + or content_range_end == range_end + or content_len < range_end): + ctx.content_len = content_len + if content_len or req_end: + ctx.data_len = min(content_len or req_end, req_end or content_len) - (req_start or 0) + return # Content-Range is either not present or invalid. Assuming remote webserver is # trying to send the whole file, resume is not possible, so wiping the local file # and performing entire redownload self.report_unable_to_resume() ctx.resume_len = 0 ctx.open_mode = 'wb' - ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None)) - return - except (compat_urllib_error.HTTPError, ) as err: + ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None)) + except urllib.error.HTTPError as err: if err.code == 416: # Unable to resume (requested range not satisfiable) try: @@ -157,7 +162,7 @@ class HttpFD(FileDownloader): ctx.data = self.ydl.urlopen( sanitized_Request(url, request_data, headers)) content_length = ctx.data.info()['Content-Length'] - except (compat_urllib_error.HTTPError, ) as err: + except urllib.error.HTTPError as err: if err.code < 500 or err.code >= 600: raise else: @@ -190,16 +195,22 @@ class HttpFD(FileDownloader): # Unexpected HTTP error raise raise RetryDownload(err) - except socket.timeout as err: + except urllib.error.URLError as err: + if isinstance(err.reason, ssl.CertificateError): + raise raise RetryDownload(err) - except socket.error as err: - if err.errno in (errno.ECONNRESET, errno.ETIMEDOUT): - # Connection reset is no problem, just retry - raise RetryDownload(err) - raise + # In urllib.request.AbstractHTTPHandler, the response is partially read on request. + # Any errors that occur during this will not be wrapped by URLError + except RESPONSE_READ_EXCEPTIONS as err: + raise RetryDownload(err) + + def close_stream(): + if ctx.stream is not None: + if not ctx.tmpfilename == '-': + ctx.stream.close() + ctx.stream = None def download(): - nonlocal throttle_start data_len = ctx.data.info().get('Content-length', None) # Range HTTP header may be ignored/unsupported by a webserver @@ -215,10 +226,12 @@ class HttpFD(FileDownloader): min_data_len = self.params.get('min_filesize') max_data_len = self.params.get('max_filesize') if min_data_len is not None and data_len < min_data_len: - self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len)) + self.to_screen( + f'\r[download] File is smaller than min-filesize ({data_len} bytes < {min_data_len} bytes). Aborting.') return False if max_data_len is not None and data_len > max_data_len: - self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len)) + self.to_screen( + f'\r[download] File is larger than max-filesize ({data_len} bytes > {max_data_len} bytes). Aborting.') return False byte_counter = 0 + ctx.resume_len @@ -230,28 +243,17 @@ class HttpFD(FileDownloader): before = start # start measuring def retry(e): - to_stdout = ctx.tmpfilename == '-' - if ctx.stream is not None: - if not to_stdout: - ctx.stream.close() - ctx.stream = None - ctx.resume_len = byte_counter if to_stdout else os.path.getsize(encodeFilename(ctx.tmpfilename)) + close_stream() + ctx.resume_len = (byte_counter if ctx.tmpfilename == '-' + else os.path.getsize(encodeFilename(ctx.tmpfilename))) raise RetryDownload(e) while True: try: # Download and write data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter)) - # socket.timeout is a subclass of socket.error but may not have - # errno set - except socket.timeout as e: - retry(e) - except socket.error as e: - # SSLError on python 2 (inherits socket.error) may have - # no errno set but this error message - if e.errno in (errno.ECONNRESET, errno.ETIMEDOUT) or getattr(e, 'message', None) == 'The read operation timed out': - retry(e) - raise + except RESPONSE_READ_EXCEPTIONS as err: + retry(err) byte_counter += len(data_block) @@ -267,19 +269,19 @@ class HttpFD(FileDownloader): assert ctx.stream is not None ctx.filename = self.undo_temp_name(ctx.tmpfilename) self.report_destination(ctx.filename) - except (OSError, IOError) as err: + except OSError as err: self.report_error('unable to open for writing: %s' % str(err)) return False if self.params.get('xattr_set_filesize', False) and data_len is not None: try: - write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8')) + write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode()) except (XAttrUnavailableError, XAttrMetadataError) as err: self.report_error('unable to set filesize xattr: %s' % str(err)) try: ctx.stream.write(data_block) - except (IOError, OSError) as err: + except OSError as err: self.to_stderr('\n') self.report_error('unable to write data: %s' % str(err)) return False @@ -322,16 +324,16 @@ class HttpFD(FileDownloader): if speed and speed < (self.params.get('throttledratelimit') or 0): # The speed must stay below the limit for 3 seconds # This prevents raising error when the speed temporarily goes down - if throttle_start is None: - throttle_start = now - elif now - throttle_start > 3: + if ctx.throttle_start is None: + ctx.throttle_start = now + elif now - ctx.throttle_start > 3: if ctx.stream is not None and ctx.tmpfilename != '-': ctx.stream.close() raise ThrottledDownload() elif speed: - throttle_start = None + ctx.throttle_start = None - if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len: + if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len: ctx.resume_len = byte_counter # ctx.block_size = block_size raise NextFragment() @@ -345,9 +347,7 @@ class HttpFD(FileDownloader): if data_len is not None and byte_counter != data_len: err = ContentTooShortError(byte_counter, int(data_len)) - if count <= retries: - retry(err) - raise err + retry(err) self.try_rename(ctx.tmpfilename, ctx.filename) @@ -366,21 +366,20 @@ class HttpFD(FileDownloader): return True - while count <= retries: + for retry in RetryManager(self.params.get('retries'), self.report_retry): try: establish_connection() return download() - except RetryDownload as e: - count += 1 - if count <= retries: - self.report_retry(e.source_error, count, retries) - else: - self.to_screen(f'[download] Got server HTTP error: {e.source_error}') + except RetryDownload as err: + retry.error = err.source_error continue except NextFragment: + retry.error = None + retry.attempt -= 1 continue except SucceedDownload: return True - - self.report_error('giving up after %s retries' % retries) + except: # noqa: E722 + close_stream() + raise return False diff --git a/plugins/youtube_download/yt_dlp/downloader/ism.py b/plugins/youtube_download/yt_dlp/downloader/ism.py index 09516ab..a157a8a 100644 --- a/plugins/youtube_download/yt_dlp/downloader/ism.py +++ b/plugins/youtube_download/yt_dlp/downloader/ism.py @@ -1,27 +1,23 @@ -from __future__ import unicode_literals - -import time import binascii import io +import struct +import time +import urllib.error from .fragment import FragmentFD -from ..compat import ( - compat_Struct, - compat_urllib_error, -) +from ..utils import RetryManager +u8 = struct.Struct('>B') +u88 = struct.Struct('>Bx') +u16 = struct.Struct('>H') +u1616 = struct.Struct('>Hxx') +u32 = struct.Struct('>I') +u64 = struct.Struct('>Q') -u8 = compat_Struct('>B') -u88 = compat_Struct('>Bx') -u16 = compat_Struct('>H') -u1616 = compat_Struct('>Hxx') -u32 = compat_Struct('>I') -u64 = compat_Struct('>Q') - -s88 = compat_Struct('>bx') -s16 = compat_Struct('>h') -s1616 = compat_Struct('>hxx') -s32 = compat_Struct('>i') +s88 = struct.Struct('>bx') +s16 = struct.Struct('>h') +s1616 = struct.Struct('>hxx') +s32 = struct.Struct('>i') unity_matrix = (s32.pack(0x10000) + s32.pack(0) * 3) * 2 + s32.pack(0x40000000) @@ -142,6 +138,8 @@ def write_piff_header(stream, params): if fourcc == 'AACL': sample_entry_box = box(b'mp4a', sample_entry_payload) + if fourcc == 'EC-3': + sample_entry_box = box(b'ec-3', sample_entry_payload) elif stream_type == 'video': sample_entry_payload += u16.pack(0) # pre defined sample_entry_payload += u16.pack(0) # reserved @@ -156,7 +154,7 @@ def write_piff_header(stream, params): sample_entry_payload += u16.pack(0x18) # depth sample_entry_payload += s16.pack(-1) # pre defined - codec_private_data = binascii.unhexlify(params['codec_private_data'].encode('utf-8')) + codec_private_data = binascii.unhexlify(params['codec_private_data'].encode()) if fourcc in ('H264', 'AVC1'): sps, pps = codec_private_data.split(u32.pack(1))[1:] avcc_payload = u8.pack(1) # configuration version @@ -235,8 +233,6 @@ class IsmFD(FragmentFD): Download segments in a ISM manifest """ - FD_NAME = 'ism' - def real_download(self, filename, info_dict): segments = info_dict['fragments'][:1] if self.params.get( 'test', False) else info_dict['fragments'] @@ -252,7 +248,6 @@ class IsmFD(FragmentFD): 'ism_track_written': False, }) - fragment_retries = self.params.get('fragment_retries', 0) skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) frag_index = 0 @@ -260,30 +255,29 @@ class IsmFD(FragmentFD): frag_index += 1 if frag_index <= ctx['fragment_index']: continue - count = 0 - while count <= fragment_retries: + + retry_manager = RetryManager(self.params.get('fragment_retries'), self.report_retry, + frag_index=frag_index, fatal=not skip_unavailable_fragments) + for retry in retry_manager: try: - success, frag_content = self._download_fragment(ctx, segment['url'], info_dict) + success = self._download_fragment(ctx, segment['url'], info_dict) if not success: return False + frag_content = self._read_fragment(ctx) + if not extra_state['ism_track_written']: tfhd_data = extract_box_data(frag_content, [b'moof', b'traf', b'tfhd']) info_dict['_download_params']['track_id'] = u32.unpack(tfhd_data[4:8])[0] write_piff_header(ctx['dest_stream'], info_dict['_download_params']) extra_state['ism_track_written'] = True self._append_fragment(ctx, frag_content) - break - except compat_urllib_error.HTTPError as err: - count += 1 - if count <= fragment_retries: - self.report_retry_fragment(err, frag_index, count, fragment_retries) - if count > fragment_retries: - if skip_unavailable_fragments: - self.report_skip_fragment(frag_index) + except urllib.error.HTTPError as err: + retry.error = err continue - self.report_error('giving up after %s fragment retries' % fragment_retries) - return False - self._finish_frag_download(ctx, info_dict) + if retry_manager.error: + if not skip_unavailable_fragments: + return False + self.report_skip_fragment(frag_index) - return True + return self._finish_frag_download(ctx, info_dict) diff --git a/plugins/youtube_download/yt_dlp/downloader/mhtml.py b/plugins/youtube_download/yt_dlp/downloader/mhtml.py index 1477f65..d977dce 100644 --- a/plugins/youtube_download/yt_dlp/downloader/mhtml.py +++ b/plugins/youtube_download/yt_dlp/downloader/mhtml.py @@ -1,24 +1,15 @@ -# coding: utf-8 -from __future__ import unicode_literals - import io import quopri import re import uuid from .fragment import FragmentFD -from ..utils import ( - escapeHTML, - formatSeconds, - srt_subtitles_timecode, - urljoin, -) +from ..compat import imghdr +from ..utils import escapeHTML, formatSeconds, srt_subtitles_timecode, urljoin from ..version import __version__ as YT_DLP_VERSION class MhtmlFD(FragmentFD): - FD_NAME = 'mhtml' - _STYLESHEET = """\ html, body { margin: 0; @@ -62,7 +53,7 @@ body > figure > img { def _escape_mime(s): return '=?utf-8?Q?' + (b''.join( bytes((b,)) if b >= 0x20 else b'=%02X' % b - for b in quopri.encodestring(s.encode('utf-8'), header=True) + for b in quopri.encodestring(s.encode(), header=True) )).decode('us-ascii') + '?=' def _gen_cid(self, i, fragment, frag_boundary): @@ -159,25 +150,22 @@ body > figure > img { length=len(stub), title=self._escape_mime(title), stub=stub - ).encode('utf-8')) + ).encode()) extra_state['header_written'] = True for i, fragment in enumerate(fragments): if (i + 1) <= ctx['fragment_index']: continue - fragment_url = urljoin(fragment_base_url, fragment['path']) - success, frag_content = self._download_fragment(ctx, fragment_url, info_dict) + fragment_url = fragment.get('url') + if not fragment_url: + assert fragment_base_url + fragment_url = urljoin(fragment_base_url, fragment['path']) + + success = self._download_fragment(ctx, fragment_url, info_dict) if not success: continue - - mime_type = b'image/jpeg' - if frag_content.startswith(b'\x89PNG\r\n\x1a\n'): - mime_type = b'image/png' - if frag_content.startswith((b'GIF87a', b'GIF89a')): - mime_type = b'image/gif' - if frag_content.startswith(b'RIFF') and frag_content[8:12] == 'WEBP': - mime_type = b'image/webp' + frag_content = self._read_fragment(ctx) frag_header = io.BytesIO() frag_header.write( @@ -185,7 +173,7 @@ body > figure > img { frag_header.write( b'Content-ID: <%b>\r\n' % self._gen_cid(i, fragment, frag_boundary).encode('us-ascii')) frag_header.write( - b'Content-type: %b\r\n' % mime_type) + b'Content-type: %b\r\n' % f'image/{imghdr.what(h=frag_content) or "jpeg"}'.encode()) frag_header.write( b'Content-length: %u\r\n' % len(frag_content)) frag_header.write( @@ -198,5 +186,4 @@ body > figure > img { ctx['dest_stream'].write( b'--%b--\r\n\r\n' % frag_boundary.encode('us-ascii')) - self._finish_frag_download(ctx, info_dict) - return True + return self._finish_frag_download(ctx, info_dict) diff --git a/plugins/youtube_download/yt_dlp/downloader/niconico.py b/plugins/youtube_download/yt_dlp/downloader/niconico.py index 521dfec..77ed39e 100644 --- a/plugins/youtube_download/yt_dlp/downloader/niconico.py +++ b/plugins/youtube_download/yt_dlp/downloader/niconico.py @@ -1,22 +1,17 @@ -# coding: utf-8 -from __future__ import unicode_literals - import threading +from . import get_suitable_downloader from .common import FileDownloader -from ..downloader import get_suitable_downloader -from ..extractor.niconico import NiconicoIE from ..utils import sanitized_Request class NiconicoDmcFD(FileDownloader): """ Downloading niconico douga from DMC with heartbeat """ - FD_NAME = 'niconico_dmc' - def real_download(self, filename, info_dict): - self.to_screen('[%s] Downloading from DMC' % self.FD_NAME) + from ..extractor.niconico import NiconicoIE + self.to_screen('[%s] Downloading from DMC' % self.FD_NAME) ie = NiconicoIE(self.ydl) info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict) @@ -54,4 +49,4 @@ class NiconicoDmcFD(FileDownloader): with heartbeat_lock: timer[0].cancel() download_complete = True - return success + return success diff --git a/plugins/youtube_download/yt_dlp/downloader/rtmp.py b/plugins/youtube_download/yt_dlp/downloader/rtmp.py index 90f1acf..0e09525 100644 --- a/plugins/youtube_download/yt_dlp/downloader/rtmp.py +++ b/plugins/youtube_download/yt_dlp/downloader/rtmp.py @@ -1,18 +1,15 @@ -from __future__ import unicode_literals - import os import re import subprocess import time from .common import FileDownloader -from ..compat import compat_str from ..utils import ( - check_executable, - encodeFilename, - encodeArgument, - get_exe_version, Popen, + check_executable, + encodeArgument, + encodeFilename, + get_exe_version, ) @@ -94,8 +91,7 @@ class RtmpFD(FileDownloader): self.to_screen('') return proc.wait() except BaseException: # Including KeyboardInterrupt - proc.kill() - proc.wait() + proc.kill(timeout=None) raise url = info_dict['url'] @@ -146,7 +142,7 @@ class RtmpFD(FileDownloader): if isinstance(conn, list): for entry in conn: basic_args += ['--conn', entry] - elif isinstance(conn, compat_str): + elif isinstance(conn, str): basic_args += ['--conn', conn] if protocol is not None: basic_args += ['--protocol', protocol] diff --git a/plugins/youtube_download/yt_dlp/downloader/rtsp.py b/plugins/youtube_download/yt_dlp/downloader/rtsp.py index 7815d59..e89269f 100644 --- a/plugins/youtube_download/yt_dlp/downloader/rtsp.py +++ b/plugins/youtube_download/yt_dlp/downloader/rtsp.py @@ -1,13 +1,8 @@ -from __future__ import unicode_literals - import os import subprocess from .common import FileDownloader -from ..utils import ( - check_executable, - encodeFilename, -) +from ..utils import check_executable, encodeFilename class RtspFD(FileDownloader): @@ -32,7 +27,7 @@ class RtspFD(FileDownloader): retval = subprocess.call(args) if retval == 0: fsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen('\r[%s] %s bytes' % (args[0], fsize)) + self.to_screen(f'\r[{args[0]}] {fsize} bytes') self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, diff --git a/plugins/youtube_download/yt_dlp/downloader/websocket.py b/plugins/youtube_download/yt_dlp/downloader/websocket.py index daac348..6837ff1 100644 --- a/plugins/youtube_download/yt_dlp/downloader/websocket.py +++ b/plugins/youtube_download/yt_dlp/downloader/websocket.py @@ -1,19 +1,12 @@ +import asyncio +import contextlib import os import signal -import asyncio import threading -try: - import websockets -except (ImportError, SyntaxError): - # websockets 3.10 on python 3.6 causes SyntaxError - # See https://github.com/yt-dlp/yt-dlp/issues/2633 - has_websockets = False -else: - has_websockets = True - from .common import FileDownloader from .external import FFmpegFD +from ..dependencies import websockets class FFmpegSinkFD(FileDownloader): @@ -26,14 +19,12 @@ class FFmpegSinkFD(FileDownloader): async def call_conn(proc, stdin): try: await self.real_connection(stdin, info_dict) - except (BrokenPipeError, OSError): + except OSError: pass finally: - try: + with contextlib.suppress(OSError): stdin.flush() stdin.close() - except OSError: - pass os.kill(os.getpid(), signal.SIGINT) class FFmpegStdinFD(FFmpegFD): diff --git a/plugins/youtube_download/yt_dlp/downloader/youtube_live_chat.py b/plugins/youtube_download/yt_dlp/downloader/youtube_live_chat.py index ef4205e..5928fec 100644 --- a/plugins/youtube_download/yt_dlp/downloader/youtube_live_chat.py +++ b/plugins/youtube_download/yt_dlp/downloader/youtube_live_chat.py @@ -1,29 +1,27 @@ -from __future__ import division, unicode_literals - import json import time +import urllib.error from .fragment import FragmentFD -from ..compat import compat_urllib_error from ..utils import ( - try_get, + RegexNotFoundError, + RetryManager, dict_get, int_or_none, - RegexNotFoundError, + try_get, ) -from ..extractor.youtube import YoutubeBaseInfoExtractor as YT_BaseIE class YoutubeLiveChatFD(FragmentFD): """ Downloads YouTube live chats fragment by fragment """ - FD_NAME = 'youtube_live_chat' - def real_download(self, filename, info_dict): video_id = info_dict['video_id'] self.to_screen('[%s] Downloading live chat' % self.FD_NAME) + if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat': + self.report_warning('Live chat download runs until the livestream ends. ' + 'If you wish to download the video simultaneously, run a separate yt-dlp instance') - fragment_retries = self.params.get('fragment_retries', 0) test = self.params.get('test', False) ctx = { @@ -32,7 +30,9 @@ class YoutubeLiveChatFD(FragmentFD): 'total_frags': None, } - ie = YT_BaseIE(self.ydl) + from ..extractor.youtube import YoutubeBaseInfoExtractor + + ie = YoutubeBaseInfoExtractor(self.ydl) start_time = int(time.time() * 1000) @@ -51,7 +51,7 @@ class YoutubeLiveChatFD(FragmentFD): replay_chat_item_action = action['replayChatItemAction'] offset = int(replay_chat_item_action['videoOffsetTimeMsec']) processed_fragment.extend( - json.dumps(action, ensure_ascii=False).encode('utf-8') + b'\n') + json.dumps(action, ensure_ascii=False).encode() + b'\n') if offset is not None: continuation = try_get( live_chat_continuation, @@ -93,7 +93,7 @@ class YoutubeLiveChatFD(FragmentFD): 'isLive': True, } processed_fragment.extend( - json.dumps(pseudo_action, ensure_ascii=False).encode('utf-8') + b'\n') + json.dumps(pseudo_action, ensure_ascii=False).encode() + b'\n') continuation_data_getters = [ lambda x: x['continuations'][0]['invalidationContinuationData'], lambda x: x['continuations'][0]['timedContinuationData'], @@ -109,12 +109,12 @@ class YoutubeLiveChatFD(FragmentFD): return continuation_id, live_offset, click_tracking_params def download_and_parse_fragment(url, frag_index, request_data=None, headers=None): - count = 0 - while count <= fragment_retries: + for retry in RetryManager(self.params.get('fragment_retries'), self.report_retry, frag_index=frag_index): try: - success, raw_fragment = dl_fragment(url, request_data, headers) + success = dl_fragment(url, request_data, headers) if not success: return False, None, None, None + raw_fragment = self._read_fragment(ctx) try: data = ie.extract_yt_initial_data(video_id, raw_fragment.decode('utf-8', 'replace')) except RegexNotFoundError: @@ -124,27 +124,22 @@ class YoutubeLiveChatFD(FragmentFD): live_chat_continuation = try_get( data, lambda x: x['continuationContents']['liveChatContinuation'], dict) or {} - if info_dict['protocol'] == 'youtube_live_chat_replay': - if frag_index == 1: - continuation_id, offset, click_tracking_params = try_refresh_replay_beginning(live_chat_continuation) - else: - continuation_id, offset, click_tracking_params = parse_actions_replay(live_chat_continuation) - elif info_dict['protocol'] == 'youtube_live_chat': - continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation) - return True, continuation_id, offset, click_tracking_params - except compat_urllib_error.HTTPError as err: - count += 1 - if count <= fragment_retries: - self.report_retry_fragment(err, frag_index, count, fragment_retries) - if count > fragment_retries: - self.report_error('giving up after %s fragment retries' % fragment_retries) - return False, None, None, None + + func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live + or frag_index == 1 and try_refresh_replay_beginning + or parse_actions_replay) + return (True, *func(live_chat_continuation)) + except urllib.error.HTTPError as err: + retry.error = err + continue + return False, None, None, None self._prepare_and_start_frag_download(ctx, info_dict) - success, raw_fragment = dl_fragment(info_dict['url']) + success = dl_fragment(info_dict['url']) if not success: return False + raw_fragment = self._read_fragment(ctx) try: data = ie.extract_yt_initial_data(video_id, raw_fragment.decode('utf-8', 'replace')) except RegexNotFoundError: @@ -185,7 +180,7 @@ class YoutubeLiveChatFD(FragmentFD): request_data['context']['clickTracking'] = {'clickTrackingParams': click_tracking_params} headers = ie.generate_api_headers(ytcfg=ytcfg, visitor_data=visitor_data) headers.update({'content-type': 'application/json'}) - fragment_request_data = json.dumps(request_data, ensure_ascii=False).encode('utf-8') + b'\n' + fragment_request_data = json.dumps(request_data, ensure_ascii=False).encode() + b'\n' success, continuation_id, offset, click_tracking_params = download_and_parse_fragment( url, frag_index, fragment_request_data, headers) else: @@ -196,8 +191,7 @@ class YoutubeLiveChatFD(FragmentFD): if test: break - self._finish_frag_download(ctx, info_dict) - return True + return self._finish_frag_download(ctx, info_dict) @staticmethod def parse_live_timestamp(action): diff --git a/plugins/youtube_download/yt_dlp/extractor/__init__.py b/plugins/youtube_download/yt_dlp/extractor/__init__.py index b354842..6bfa4bd 100644 --- a/plugins/youtube_download/yt_dlp/extractor/__init__.py +++ b/plugins/youtube_download/yt_dlp/extractor/__init__.py @@ -1,33 +1,15 @@ -import os +from ..compat.compat_utils import passthrough_module -from ..utils import load_plugins - -_LAZY_LOADER = False -if not os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'): - try: - from .lazy_extractors import * - from .lazy_extractors import _ALL_CLASSES - _LAZY_LOADER = True - except ImportError: - pass - -if not _LAZY_LOADER: - from .extractors import * - _ALL_CLASSES = [ - klass - for name, klass in globals().items() - if name.endswith('IE') and name != 'GenericIE' - ] - _ALL_CLASSES.append(GenericIE) - -_PLUGIN_CLASSES = load_plugins('extractor', 'IE', globals()) -_ALL_CLASSES = list(_PLUGIN_CLASSES.values()) + _ALL_CLASSES +passthrough_module(__name__, '.extractors') +del passthrough_module def gen_extractor_classes(): """ Return a list of supported extractors. The order does matter; the first extractor matched is the one handling the URL. """ + from .extractors import _ALL_CLASSES + return _ALL_CLASSES @@ -38,17 +20,23 @@ def gen_extractors(): return [klass() for klass in gen_extractor_classes()] -def list_extractors(age_limit): - """ - Return a list of extractors that are suitable for the given age, - sorted by extractor ID. - """ +def list_extractor_classes(age_limit=None): + """Return a list of extractors that are suitable for the given age, sorted by extractor name""" + from .generic import GenericIE - return sorted( - filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()), - key=lambda ie: ie.IE_NAME.lower()) + yield from sorted(filter( + lambda ie: ie.is_suitable(age_limit) and ie != GenericIE, + gen_extractor_classes()), key=lambda ie: ie.IE_NAME.lower()) + yield GenericIE + + +def list_extractors(age_limit=None): + """Return a list of extractor instances that are suitable for the given age, sorted by extractor name""" + return [ie() for ie in list_extractor_classes(age_limit)] def get_info_extractor(ie_name): """Returns the info extractor class with the given ie_name""" - return globals()[ie_name + 'IE'] + from . import extractors + + return getattr(extractors, f'{ie_name}IE') diff --git a/plugins/youtube_download/yt_dlp/extractor/_extractors.py b/plugins/youtube_download/yt_dlp/extractor/_extractors.py new file mode 100644 index 0000000..061a25a --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/_extractors.py @@ -0,0 +1,2404 @@ +# flake8: noqa: F401 + +from .youtube import ( # Youtube is moved to the top to improve performance + YoutubeIE, + YoutubeClipIE, + YoutubeFavouritesIE, + YoutubeNotificationsIE, + YoutubeHistoryIE, + YoutubeTabIE, + YoutubeLivestreamEmbedIE, + YoutubePlaylistIE, + YoutubeRecommendedIE, + YoutubeSearchDateIE, + YoutubeSearchIE, + YoutubeSearchURLIE, + YoutubeMusicSearchURLIE, + YoutubeSubscriptionsIE, + YoutubeStoriesIE, + YoutubeTruncatedIDIE, + YoutubeTruncatedURLIE, + YoutubeYtBeIE, + YoutubeYtUserIE, + YoutubeWatchLaterIE, + YoutubeShortsAudioPivotIE, + YoutubeConsentRedirectIE, +) + +from .abc import ( + ABCIE, + ABCIViewIE, + ABCIViewShowSeriesIE, +) +from .abcnews import ( + AbcNewsIE, + AbcNewsVideoIE, +) +from .abcotvs import ( + ABCOTVSIE, + ABCOTVSClipsIE, +) +from .abematv import ( + AbemaTVIE, + AbemaTVTitleIE, +) +from .academicearth import AcademicEarthCourseIE +from .acast import ( + ACastIE, + ACastChannelIE, +) +from .acfun import AcFunVideoIE, AcFunBangumiIE +from .adn import ADNIE +from .adobeconnect import AdobeConnectIE +from .adobetv import ( + AdobeTVEmbedIE, + AdobeTVIE, + AdobeTVShowIE, + AdobeTVChannelIE, + AdobeTVVideoIE, +) +from .adultswim import AdultSwimIE +from .aenetworks import ( + AENetworksIE, + AENetworksCollectionIE, + AENetworksShowIE, + HistoryTopicIE, + HistoryPlayerIE, + BiographyIE, +) +from .aeonco import AeonCoIE +from .afreecatv import ( + AfreecaTVIE, + AfreecaTVLiveIE, + AfreecaTVUserIE, +) +from .agora import ( + TokFMAuditionIE, + TokFMPodcastIE, + WyborczaPodcastIE, + WyborczaVideoIE, +) +from .airmozilla import AirMozillaIE +from .airtv import AirTVIE +from .aitube import AitubeKZVideoIE +from .aljazeera import AlJazeeraIE +from .alphaporno import AlphaPornoIE +from .amara import AmaraIE +from .alura import ( + AluraIE, + AluraCourseIE +) +from .amcnetworks import AMCNetworksIE +from .amazon import ( + AmazonStoreIE, + AmazonReviewsIE, +) +from .amazonminitv import ( + AmazonMiniTVIE, + AmazonMiniTVSeasonIE, + AmazonMiniTVSeriesIE, +) +from .americastestkitchen import ( + AmericasTestKitchenIE, + AmericasTestKitchenSeasonIE, +) +from .angel import AngelIE +from .anvato import AnvatoIE +from .aol import AolIE +from .allocine import AllocineIE +from .aliexpress import AliExpressLiveIE +from .alsace20tv import ( + Alsace20TVIE, + Alsace20TVEmbedIE, +) +from .apa import APAIE +from .aparat import AparatIE +from .appleconnect import AppleConnectIE +from .appletrailers import ( + AppleTrailersIE, + AppleTrailersSectionIE, +) +from .applepodcasts import ApplePodcastsIE +from .archiveorg import ( + ArchiveOrgIE, + YoutubeWebArchiveIE, + VLiveWebArchiveIE, +) +from .arcpublishing import ArcPublishingIE +from .arkena import ArkenaIE +from .ard import ( + ARDBetaMediathekIE, + ARDIE, + ARDMediathekIE, +) +from .arte import ( + ArteTVIE, + ArteTVEmbedIE, + ArteTVPlaylistIE, + ArteTVCategoryIE, +) +from .arnes import ArnesIE +from .asiancrush import ( + AsianCrushIE, + AsianCrushPlaylistIE, +) +from .atresplayer import AtresPlayerIE +from .atscaleconf import AtScaleConfEventIE +from .atttechchannel import ATTTechChannelIE +from .atvat import ATVAtIE +from .audimedia import AudiMediaIE +from .audioboom import AudioBoomIE +from .audiodraft import ( + AudiodraftCustomIE, + AudiodraftGenericIE, +) +from .audiomack import AudiomackIE, AudiomackAlbumIE +from .audius import ( + AudiusIE, + AudiusTrackIE, + AudiusPlaylistIE, + AudiusProfileIE, +) +from .awaan import ( + AWAANIE, + AWAANVideoIE, + AWAANLiveIE, + AWAANSeasonIE, +) +from .azmedien import AZMedienIE +from .baidu import BaiduVideoIE +from .banbye import ( + BanByeIE, + BanByeChannelIE, +) +from .bandaichannel import BandaiChannelIE +from .bandcamp import ( + BandcampIE, + BandcampAlbumIE, + BandcampWeeklyIE, + BandcampUserIE, +) +from .bannedvideo import BannedVideoIE +from .bbc import ( + BBCCoUkIE, + BBCCoUkArticleIE, + BBCCoUkIPlayerEpisodesIE, + BBCCoUkIPlayerGroupIE, + BBCCoUkPlaylistIE, + BBCIE, +) +from .beeg import BeegIE +from .behindkink import BehindKinkIE +from .bellmedia import BellMediaIE +from .beatbump import ( + BeatBumpVideoIE, + BeatBumpPlaylistIE, +) +from .beatport import BeatportIE +from .berufetv import BerufeTVIE +from .bet import BetIE +from .bfi import BFIPlayerIE +from .bfmtv import ( + BFMTVIE, + BFMTVLiveIE, + BFMTVArticleIE, +) +from .bibeltv import BibelTVIE +from .bigflix import BigflixIE +from .bigo import BigoIE +from .bild import BildIE +from .bilibili import ( + BiliBiliIE, + BiliBiliBangumiIE, + BiliBiliBangumiMediaIE, + BiliBiliSearchIE, + BilibiliCategoryIE, + BilibiliAudioIE, + BilibiliAudioAlbumIE, + BiliBiliPlayerIE, + BilibiliSpaceVideoIE, + BilibiliSpaceAudioIE, + BilibiliSpacePlaylistIE, + BiliIntlIE, + BiliIntlSeriesIE, + BiliLiveIE, +) +from .biobiochiletv import BioBioChileTVIE +from .bitchute import ( + BitChuteIE, + BitChuteChannelIE, +) +from .bitwave import ( + BitwaveReplayIE, + BitwaveStreamIE, +) +from .biqle import BIQLEIE +from .blackboardcollaborate import BlackboardCollaborateIE +from .bleacherreport import ( + BleacherReportIE, + BleacherReportCMSIE, +) +from .blogger import BloggerIE +from .bloomberg import BloombergIE +from .bokecc import BokeCCIE +from .bongacams import BongaCamsIE +from .bostonglobe import BostonGlobeIE +from .box import BoxIE +from .booyah import BooyahClipsIE +from .bpb import BpbIE +from .br import ( + BRIE, + BRMediathekIE, +) +from .bravotv import BravoTVIE +from .breakcom import BreakIE +from .breitbart import BreitBartIE +from .brightcove import ( + BrightcoveLegacyIE, + BrightcoveNewIE, +) +from .businessinsider import BusinessInsiderIE +from .bundesliga import BundesligaIE +from .buzzfeed import BuzzFeedIE +from .byutv import BYUtvIE +from .c56 import C56IE +from .cableav import CableAVIE +from .callin import CallinIE +from .caltrans import CaltransIE +from .cam4 import CAM4IE +from .camdemy import ( + CamdemyIE, + CamdemyFolderIE +) +from .cammodels import CamModelsIE +from .camsoda import CamsodaIE +from .camtasia import CamtasiaEmbedIE +from .camwithher import CamWithHerIE +from .canalalpha import CanalAlphaIE +from .canalplus import CanalplusIE +from .canalc2 import Canalc2IE +from .canvas import ( + CanvasIE, + CanvasEenIE, + VrtNUIE, + DagelijkseKostIE, +) +from .carambatv import ( + CarambaTVIE, + CarambaTVPageIE, +) +from .cartoonnetwork import CartoonNetworkIE +from .cbc import ( + CBCIE, + CBCPlayerIE, + CBCGemIE, + CBCGemPlaylistIE, + CBCGemLiveIE, +) +from .cbs import CBSIE +from .cbslocal import ( + CBSLocalIE, + CBSLocalArticleIE, +) +from .cbsinteractive import CBSInteractiveIE +from .cbsnews import ( + CBSNewsEmbedIE, + CBSNewsIE, + CBSNewsLiveVideoIE, +) +from .cbssports import ( + CBSSportsEmbedIE, + CBSSportsIE, + TwentyFourSevenSportsIE, +) +from .ccc import ( + CCCIE, + CCCPlaylistIE, +) +from .ccma import CCMAIE +from .cctv import CCTVIE +from .cda import CDAIE +from .cellebrite import CellebriteIE +from .ceskatelevize import CeskaTelevizeIE +from .cgtn import CGTNIE +from .channel9 import Channel9IE +from .charlierose import CharlieRoseIE +from .chaturbate import ChaturbateIE +from .chilloutzone import ChilloutzoneIE +from .chingari import ( + ChingariIE, + ChingariUserIE, +) +from .chirbit import ( + ChirbitIE, + ChirbitProfileIE, +) +from .cinchcast import CinchcastIE +from .cinemax import CinemaxIE +from .cinetecamilano import CinetecaMilanoIE +from .ciscolive import ( + CiscoLiveSessionIE, + CiscoLiveSearchIE, +) +from .ciscowebex import CiscoWebexIE +from .cjsw import CJSWIE +from .cliphunter import CliphunterIE +from .clippit import ClippitIE +from .cliprs import ClipRsIE +from .clipsyndicate import ClipsyndicateIE +from .closertotruth import CloserToTruthIE +from .cloudflarestream import CloudflareStreamIE +from .cloudy import CloudyIE +from .clubic import ClubicIE +from .clyp import ClypIE +from .cmt import CMTIE +from .cnbc import ( + CNBCIE, + CNBCVideoIE, +) +from .cnn import ( + CNNIE, + CNNBlogsIE, + CNNArticleIE, + CNNIndonesiaIE, +) +from .coub import CoubIE +from .comedycentral import ( + ComedyCentralIE, + ComedyCentralTVIE, +) +from .commonmistakes import CommonMistakesIE, UnicodeBOMIE +from .commonprotocols import ( + MmsIE, + RtmpIE, + ViewSourceIE, +) +from .condenast import CondeNastIE +from .contv import CONtvIE +from .corus import CorusIE +from .cpac import ( + CPACIE, + CPACPlaylistIE, +) +from .cozytv import CozyTVIE +from .cracked import CrackedIE +from .crackle import CrackleIE +from .craftsy import CraftsyIE +from .crooksandliars import CrooksAndLiarsIE +from .crowdbunker import ( + CrowdBunkerIE, + CrowdBunkerChannelIE, +) +from .crunchyroll import ( + CrunchyrollBetaIE, + CrunchyrollBetaShowIE, +) +from .cspan import CSpanIE, CSpanCongressIE +from .ctsnews import CtsNewsIE +from .ctv import CTVIE +from .ctvnews import CTVNewsIE +from .cultureunplugged import CultureUnpluggedIE +from .curiositystream import ( + CuriosityStreamIE, + CuriosityStreamCollectionsIE, + CuriosityStreamSeriesIE, +) +from .cwtv import CWTVIE +from .cybrary import ( + CybraryIE, + CybraryCourseIE +) +from .daftsex import DaftsexIE +from .dailymail import DailyMailIE +from .dailymotion import ( + DailymotionIE, + DailymotionPlaylistIE, + DailymotionUserIE, +) +from .dailywire import ( + DailyWireIE, + DailyWirePodcastIE, +) +from .damtomo import ( + DamtomoRecordIE, + DamtomoVideoIE, +) +from .daum import ( + DaumIE, + DaumClipIE, + DaumPlaylistIE, + DaumUserIE, +) +from .daystar import DaystarClipIE +from .dbtv import DBTVIE +from .dctp import DctpTvIE +from .deezer import ( + DeezerPlaylistIE, + DeezerAlbumIE, +) +from .democracynow import DemocracynowIE +from .detik import DetikEmbedIE +from .dfb import DFBIE +from .dhm import DHMIE +from .digg import DiggIE +from .dotsub import DotsubIE +from .douyutv import ( + DouyuShowIE, + DouyuTVIE, +) +from .dplay import ( + DPlayIE, + DiscoveryPlusIE, + HGTVDeIE, + GoDiscoveryIE, + TravelChannelIE, + CookingChannelIE, + HGTVUsaIE, + FoodNetworkIE, + InvestigationDiscoveryIE, + DestinationAmericaIE, + AmHistoryChannelIE, + ScienceChannelIE, + DIYNetworkIE, + DiscoveryLifeIE, + AnimalPlanetIE, + TLCIE, + MotorTrendIE, + MotorTrendOnDemandIE, + DiscoveryPlusIndiaIE, + DiscoveryNetworksDeIE, + DiscoveryPlusItalyIE, + DiscoveryPlusItalyShowIE, + DiscoveryPlusIndiaShowIE, +) +from .dreisat import DreiSatIE +from .drbonanza import DRBonanzaIE +from .drtuber import DrTuberIE +from .drtv import ( + DRTVIE, + DRTVLiveIE, + DRTVSeasonIE, + DRTVSeriesIE, +) +from .dtube import DTubeIE +from .dvtv import DVTVIE +from .duboku import ( + DubokuIE, + DubokuPlaylistIE +) +from .dumpert import DumpertIE +from .defense import DefenseGouvFrIE +from .deuxm import ( + DeuxMIE, + DeuxMNewsIE +) +from .digitalconcerthall import DigitalConcertHallIE +from .discovery import DiscoveryIE +from .disney import DisneyIE +from .dispeak import DigitallySpeakingIE +from .dropbox import DropboxIE +from .dropout import ( + DropoutSeasonIE, + DropoutIE +) +from .dw import ( + DWIE, + DWArticleIE, +) +from .eagleplatform import EaglePlatformIE, ClipYouEmbedIE +from .ebaumsworld import EbaumsWorldIE +from .echomsk import EchoMskIE +from .egghead import ( + EggheadCourseIE, + EggheadLessonIE, +) +from .ehow import EHowIE +from .eighttracks import EightTracksIE +from .einthusan import EinthusanIE +from .eitb import EitbIE +from .ellentube import ( + EllenTubeIE, + EllenTubeVideoIE, + EllenTubePlaylistIE, +) +from .elonet import ElonetIE +from .elpais import ElPaisIE +from .embedly import EmbedlyIE +from .engadget import EngadgetIE +from .epicon import ( + EpiconIE, + EpiconSeriesIE, +) +from .epoch import EpochIE +from .eporner import EpornerIE +from .eroprofile import ( + EroProfileIE, + EroProfileAlbumIE, +) +from .ertgr import ( + ERTFlixCodenameIE, + ERTFlixIE, + ERTWebtvEmbedIE, +) +from .escapist import EscapistIE +from .espn import ( + ESPNIE, + WatchESPNIE, + ESPNArticleIE, + FiveThirtyEightIE, + ESPNCricInfoIE, +) +from .esri import EsriVideoIE +from .europa import EuropaIE, EuroParlWebstreamIE +from .europeantour import EuropeanTourIE +from .eurosport import EurosportIE +from .euscreen import EUScreenIE +from .expotv import ExpoTVIE +from .expressen import ExpressenIE +from .extremetube import ExtremeTubeIE +from .eyedotv import EyedoTVIE +from .facebook import ( + FacebookIE, + FacebookPluginsVideoIE, + FacebookRedirectURLIE, + FacebookReelIE, +) +from .fancode import ( + FancodeVodIE, + FancodeLiveIE +) + +from .faz import FazIE +from .fc2 import ( + FC2IE, + FC2EmbedIE, + FC2LiveIE, +) +from .fczenit import FczenitIE +from .fifa import FifaIE +from .filmmodu import FilmmoduIE +from .filmon import ( + FilmOnIE, + FilmOnChannelIE, +) +from .filmweb import FilmwebIE +from .firsttv import FirstTVIE +from .fivetv import FiveTVIE +from .flickr import FlickrIE +from .folketinget import FolketingetIE +from .footyroom import FootyRoomIE +from .formula1 import Formula1IE +from .fourtube import ( + FourTubeIE, + PornTubeIE, + PornerBrosIE, + FuxIE, +) +from .fourzerostudio import ( + FourZeroStudioArchiveIE, + FourZeroStudioClipIE, +) +from .fox import FOXIE +from .fox9 import ( + FOX9IE, + FOX9NewsIE, +) +from .foxgay import FoxgayIE +from .foxnews import ( + FoxNewsIE, + FoxNewsArticleIE, + FoxNewsVideoIE, +) +from .foxsports import FoxSportsIE +from .fptplay import FptplayIE +from .franceinter import FranceInterIE +from .francetv import ( + FranceTVIE, + FranceTVSiteIE, + FranceTVInfoIE, +) +from .freesound import FreesoundIE +from .freespeech import FreespeechIE +from .frontendmasters import ( + FrontendMastersIE, + FrontendMastersLessonIE, + FrontendMastersCourseIE +) +from .freetv import ( + FreeTvIE, + FreeTvMoviesIE, +) +from .fujitv import FujiTVFODPlus7IE +from .funimation import ( + FunimationIE, + FunimationPageIE, + FunimationShowIE, +) +from .funk import FunkIE +from .fusion import FusionIE +from .fuyintv import FuyinTVIE +from .gab import ( + GabTVIE, + GabIE, +) +from .gaia import GaiaIE +from .gameinformer import GameInformerIE +from .gamejolt import ( + GameJoltIE, + GameJoltUserIE, + GameJoltGameIE, + GameJoltGameSoundtrackIE, + GameJoltCommunityIE, + GameJoltSearchIE, +) +from .gamespot import GameSpotIE +from .gamestar import GameStarIE +from .gaskrank import GaskrankIE +from .gazeta import GazetaIE +from .gdcvault import GDCVaultIE +from .gedidigital import GediDigitalIE +from .generic import GenericIE +from .genius import ( + GeniusIE, + GeniusLyricsIE, +) +from .gettr import ( + GettrIE, + GettrStreamingIE, +) +from .gfycat import GfycatIE +from .giantbomb import GiantBombIE +from .giga import GigaIE +from .glide import GlideIE +from .globo import ( + GloboIE, + GloboArticleIE, +) +from .go import GoIE +from .godtube import GodTubeIE +from .gofile import GofileIE +from .golem import GolemIE +from .goodgame import GoodGameIE +from .googledrive import ( + GoogleDriveIE, + GoogleDriveFolderIE, +) +from .googlepodcasts import ( + GooglePodcastsIE, + GooglePodcastsFeedIE, +) +from .googlesearch import GoogleSearchIE +from .gopro import GoProIE +from .goplay import GoPlayIE +from .goshgay import GoshgayIE +from .gotostage import GoToStageIE +from .gputechconf import GPUTechConfIE +from .gronkh import ( + GronkhIE, + GronkhFeedIE, + GronkhVodsIE +) +from .groupon import GrouponIE +from .harpodeon import HarpodeonIE +from .hbo import HBOIE +from .hearthisat import HearThisAtIE +from .heise import HeiseIE +from .hellporno import HellPornoIE +from .helsinki import HelsinkiIE +from .hentaistigma import HentaiStigmaIE +from .hgtv import HGTVComShowIE +from .hketv import HKETVIE +from .hidive import HiDiveIE +from .historicfilms import HistoricFilmsIE +from .hitbox import HitboxIE, HitboxLiveIE +from .hitrecord import HitRecordIE +from .holodex import HolodexIE +from .hotnewhiphop import HotNewHipHopIE +from .hotstar import ( + HotStarIE, + HotStarPrefixIE, + HotStarPlaylistIE, + HotStarSeasonIE, + HotStarSeriesIE, +) +from .howcast import HowcastIE +from .howstuffworks import HowStuffWorksIE +from .hrfensehen import HRFernsehenIE +from .hrti import ( + HRTiIE, + HRTiPlaylistIE, +) +from .hse import ( + HSEShowIE, + HSEProductIE, +) +from .genericembeds import ( + HTML5MediaEmbedIE, + QuotedHTMLIE, +) +from .huajiao import HuajiaoIE +from .huya import HuyaLiveIE +from .huffpost import HuffPostIE +from .hungama import ( + HungamaIE, + HungamaSongIE, + HungamaAlbumPlaylistIE, +) +from .hypem import HypemIE +from .hytale import HytaleIE +from .icareus import IcareusIE +from .ichinanalive import ( + IchinanaLiveIE, + IchinanaLiveClipIE, +) +from .ign import ( + IGNIE, + IGNVideoIE, + IGNArticleIE, +) +from .iheart import ( + IHeartRadioIE, + IHeartRadioPodcastIE, +) +from .iltalehti import IltalehtiIE +from .imdb import ( + ImdbIE, + ImdbListIE +) +from .imgur import ( + ImgurIE, + ImgurAlbumIE, + ImgurGalleryIE, +) +from .ina import InaIE +from .inc import IncIE +from .indavideo import IndavideoEmbedIE +from .infoq import InfoQIE +from .instagram import ( + InstagramIE, + InstagramIOSIE, + InstagramUserIE, + InstagramTagIE, + InstagramStoryIE, +) +from .internazionale import InternazionaleIE +from .internetvideoarchive import InternetVideoArchiveIE +from .iprima import ( + IPrimaIE, + IPrimaCNNIE +) +from .iqiyi import ( + IqiyiIE, + IqIE, + IqAlbumIE +) +from .islamchannel import ( + IslamChannelIE, + IslamChannelSeriesIE, +) +from .israelnationalnews import IsraelNationalNewsIE +from .itprotv import ( + ITProTVIE, + ITProTVCourseIE +) +from .itv import ( + ITVIE, + ITVBTCCIE, +) +from .ivi import ( + IviIE, + IviCompilationIE +) +from .ivideon import IvideonIE +from .iwara import ( + IwaraIE, + IwaraPlaylistIE, + IwaraUserIE, +) +from .ixigua import IxiguaIE +from .izlesene import IzleseneIE +from .jable import ( + JableIE, + JablePlaylistIE, +) +from .jamendo import ( + JamendoIE, + JamendoAlbumIE, +) +from .japandiet import ( + ShugiinItvLiveIE, + ShugiinItvLiveRoomIE, + ShugiinItvVodIE, + SangiinInstructionIE, + SangiinIE, +) +from .jeuxvideo import JeuxVideoIE +from .jove import JoveIE +from .joj import JojIE +from .jwplatform import JWPlatformIE +from .kakao import KakaoIE +from .kaltura import KalturaIE +from .kanal2 import Kanal2IE +from .kankanews import KankaNewsIE +from .karaoketv import KaraoketvIE +from .karrierevideos import KarriereVideosIE +from .keezmovies import KeezMoviesIE +from .kelbyone import KelbyOneIE +from .ketnet import KetnetIE +from .khanacademy import ( + KhanAcademyIE, + KhanAcademyUnitIE, +) +from .kick import ( + KickIE, + KickVODIE, +) +from .kicker import KickerIE +from .kickstarter import KickStarterIE +from .kinja import KinjaEmbedIE +from .kinopoisk import KinoPoiskIE +from .kompas import KompasVideoIE +from .konserthusetplay import KonserthusetPlayIE +from .koo import KooIE +from .kth import KTHIE +from .krasview import KrasViewIE +from .ku6 import Ku6IE +from .kusi import KUSIIE +from .kuwo import ( + KuwoIE, + KuwoAlbumIE, + KuwoChartIE, + KuwoSingerIE, + KuwoCategoryIE, + KuwoMvIE, +) +from .la7 import ( + LA7IE, + LA7PodcastEpisodeIE, + LA7PodcastIE, +) +from .laola1tv import ( + Laola1TvEmbedIE, + Laola1TvIE, + EHFTVIE, + ITTFIE, +) +from .lastfm import ( + LastFMIE, + LastFMPlaylistIE, + LastFMUserIE, +) +from .lbry import ( + LBRYIE, + LBRYChannelIE, +) +from .lci import LCIIE +from .lcp import ( + LcpPlayIE, + LcpIE, +) +from .lecture2go import Lecture2GoIE +from .lecturio import ( + LecturioIE, + LecturioCourseIE, + LecturioDeCourseIE, +) +from .leeco import ( + LeIE, + LePlaylistIE, + LetvCloudIE, +) +from .lego import LEGOIE +from .lemonde import LemondeIE +from .lenta import LentaIE +from .libraryofcongress import LibraryOfCongressIE +from .libsyn import LibsynIE +from .lifenews import ( + LifeNewsIE, + LifeEmbedIE, +) +from .likee import ( + LikeeIE, + LikeeUserIE +) +from .limelight import ( + LimelightMediaIE, + LimelightChannelIE, + LimelightChannelListIE, +) +from .line import ( + LineLiveIE, + LineLiveChannelIE, +) +from .linkedin import ( + LinkedInIE, + LinkedInLearningIE, + LinkedInLearningCourseIE, +) +from .linuxacademy import LinuxAcademyIE +from .liputan6 import Liputan6IE +from .listennotes import ListenNotesIE +from .litv import LiTVIE +from .livejournal import LiveJournalIE +from .livestream import ( + LivestreamIE, + LivestreamOriginalIE, + LivestreamShortenerIE, +) +from .livestreamfails import LivestreamfailsIE +from .lnkgo import ( + LnkGoIE, + LnkIE, +) +from .localnews8 import LocalNews8IE +from .lovehomeporn import LoveHomePornIE +from .lrt import ( + LRTVODIE, + LRTStreamIE +) +from .lynda import ( + LyndaIE, + LyndaCourseIE +) +from .m6 import M6IE +from .magentamusik360 import MagentaMusik360IE +from .mailru import ( + MailRuIE, + MailRuMusicIE, + MailRuMusicSearchIE, +) +from .mainstreaming import MainStreamingIE +from .malltv import MallTVIE +from .mangomolo import ( + MangomoloVideoIE, + MangomoloLiveIE, +) +from .manoto import ( + ManotoTVIE, + ManotoTVShowIE, + ManotoTVLiveIE, +) +from .manyvids import ManyVidsIE +from .maoritv import MaoriTVIE +from .markiza import ( + MarkizaIE, + MarkizaPageIE, +) +from .massengeschmacktv import MassengeschmackTVIE +from .masters import MastersIE +from .matchtv import MatchTVIE +from .mdr import MDRIE +from .medaltv import MedalTVIE +from .mediaite import MediaiteIE +from .mediaklikk import MediaKlikkIE +from .mediaset import ( + MediasetIE, + MediasetShowIE, +) +from .mediasite import ( + MediasiteIE, + MediasiteCatalogIE, + MediasiteNamedCatalogIE, +) +from .mediastream import ( + MediaStreamIE, + WinSportsVideoIE, +) +from .mediaworksnz import MediaWorksNZVODIE +from .medici import MediciIE +from .megaphone import MegaphoneIE +from .meipai import MeipaiIE +from .melonvod import MelonVODIE +from .meta import METAIE +from .metacafe import MetacafeIE +from .metacritic import MetacriticIE +from .mgoon import MgoonIE +from .mgtv import MGTVIE +from .miaopai import MiaoPaiIE +from .microsoftstream import MicrosoftStreamIE +from .microsoftvirtualacademy import ( + MicrosoftVirtualAcademyIE, + MicrosoftVirtualAcademyCourseIE, +) +from .microsoftembed import MicrosoftEmbedIE +from .mildom import ( + MildomIE, + MildomVodIE, + MildomClipIE, + MildomUserVodIE, +) +from .minds import ( + MindsIE, + MindsChannelIE, + MindsGroupIE, +) +from .ministrygrid import MinistryGridIE +from .minoto import MinotoIE +from .miomio import MioMioIE +from .mirrativ import ( + MirrativIE, + MirrativUserIE, +) +from .mirrorcouk import MirrorCoUKIE +from .mit import TechTVMITIE, OCWMITIE +from .mitele import MiTeleIE +from .mixch import ( + MixchIE, + MixchArchiveIE, +) +from .mixcloud import ( + MixcloudIE, + MixcloudUserIE, + MixcloudPlaylistIE, +) +from .mlb import ( + MLBIE, + MLBVideoIE, + MLBTVIE, + MLBArticleIE, +) +from .mlssoccer import MLSSoccerIE +from .mnet import MnetIE +from .mocha import MochaVideoIE +from .moevideo import MoeVideoIE +from .mofosex import ( + MofosexIE, + MofosexEmbedIE, +) +from .mojvideo import MojvideoIE +from .morningstar import MorningstarIE +from .motherless import ( + MotherlessIE, + MotherlessGroupIE +) +from .motorsport import MotorsportIE +from .movieclips import MovieClipsIE +from .moviepilot import MoviepilotIE +from .moview import MoviewPlayIE +from .moviezine import MoviezineIE +from .movingimage import MovingImageIE +from .msn import MSNIE +from .mtv import ( + MTVIE, + MTVVideoIE, + MTVServicesEmbeddedIE, + MTVDEIE, + MTVJapanIE, + MTVItaliaIE, + MTVItaliaProgrammaIE, +) +from .muenchentv import MuenchenTVIE +from .murrtube import MurrtubeIE, MurrtubeUserIE +from .musescore import MuseScoreIE +from .musicdex import ( + MusicdexSongIE, + MusicdexAlbumIE, + MusicdexArtistIE, + MusicdexPlaylistIE, +) +from .mwave import MwaveIE, MwaveMeetGreetIE +from .mxplayer import ( + MxplayerIE, + MxplayerShowIE, +) +from .mychannels import MyChannelsIE +from .myspace import MySpaceIE, MySpaceAlbumIE +from .myspass import MySpassIE +from .myvi import ( + MyviIE, + MyviEmbedIE, +) +from .myvideoge import MyVideoGeIE +from .myvidster import MyVidsterIE +from .n1 import ( + N1InfoAssetIE, + N1InfoIIE, +) +from .nate import ( + NateIE, + NateProgramIE, +) +from .nationalgeographic import ( + NationalGeographicVideoIE, + NationalGeographicTVIE, +) +from .naver import ( + NaverIE, + NaverLiveIE, + NaverNowIE, +) +from .nba import ( + NBAWatchEmbedIE, + NBAWatchIE, + NBAWatchCollectionIE, + NBAEmbedIE, + NBAIE, + NBAChannelIE, +) +from .nbc import ( + NBCIE, + NBCNewsIE, + NBCOlympicsIE, + NBCOlympicsStreamIE, + NBCSportsIE, + NBCSportsStreamIE, + NBCSportsVPlayerIE, + NBCStationsIE, +) +from .ndr import ( + NDRIE, + NJoyIE, + NDREmbedBaseIE, + NDREmbedIE, + NJoyEmbedIE, +) +from .ndtv import NDTVIE +from .nebula import ( + NebulaIE, + NebulaSubscriptionsIE, + NebulaChannelIE, +) +from .nerdcubed import NerdCubedFeedIE +from .netzkino import NetzkinoIE +from .neteasemusic import ( + NetEaseMusicIE, + NetEaseMusicAlbumIE, + NetEaseMusicSingerIE, + NetEaseMusicListIE, + NetEaseMusicMvIE, + NetEaseMusicProgramIE, + NetEaseMusicDjRadioIE, +) +from .netverse import ( + NetverseIE, + NetversePlaylistIE, + NetverseSearchIE, +) +from .newgrounds import ( + NewgroundsIE, + NewgroundsPlaylistIE, + NewgroundsUserIE, +) +from .newspicks import NewsPicksIE +from .newstube import NewstubeIE +from .newsy import NewsyIE +from .nextmedia import ( + NextMediaIE, + NextMediaActionNewsIE, + AppleDailyIE, + NextTVIE, +) +from .nexx import ( + NexxIE, + NexxEmbedIE, +) +from .nfb import NFBIE +from .nfhsnetwork import NFHSNetworkIE +from .nfl import ( + NFLIE, + NFLArticleIE, + NFLPlusEpisodeIE, + NFLPlusReplayIE, +) +from .nhk import ( + NhkVodIE, + NhkVodProgramIE, + NhkForSchoolBangumiIE, + NhkForSchoolSubjectIE, + NhkForSchoolProgramListIE, +) +from .nhl import NHLIE +from .nick import ( + NickIE, + NickBrIE, + NickDeIE, + NickNightIE, + NickRuIE, +) +from .niconico import ( + NiconicoIE, + NiconicoPlaylistIE, + NiconicoUserIE, + NiconicoSeriesIE, + NiconicoHistoryIE, + NicovideoSearchDateIE, + NicovideoSearchIE, + NicovideoSearchURLIE, + NicovideoTagURLIE, +) +from .ninecninemedia import ( + NineCNineMediaIE, + CPTwentyFourIE, +) +from .ninegag import NineGagIE +from .ninenow import NineNowIE +from .nintendo import NintendoIE +from .nitter import NitterIE +from .njpwworld import NJPWWorldIE +from .nobelprize import NobelPrizeIE +from .noice import NoicePodcastIE +from .nonktube import NonkTubeIE +from .noodlemagazine import NoodleMagazineIE +from .noovo import NoovoIE +from .normalboots import NormalbootsIE +from .nosvideo import NosVideoIE +from .nosnl import NOSNLArticleIE +from .nova import ( + NovaEmbedIE, + NovaIE, +) +from .novaplay import NovaPlayIE +from .nowness import ( + NownessIE, + NownessPlaylistIE, + NownessSeriesIE, +) +from .noz import NozIE +from .npo import ( + AndereTijdenIE, + NPOIE, + NPOLiveIE, + NPORadioIE, + NPORadioFragmentIE, + SchoolTVIE, + HetKlokhuisIE, + VPROIE, + WNLIE, +) +from .npr import NprIE +from .nrk import ( + NRKIE, + NRKPlaylistIE, + NRKSkoleIE, + NRKTVIE, + NRKTVDirekteIE, + NRKRadioPodkastIE, + NRKTVEpisodeIE, + NRKTVEpisodesIE, + NRKTVSeasonIE, + NRKTVSeriesIE, +) +from .nrl import NRLTVIE +from .ntvcojp import NTVCoJpCUIE +from .ntvde import NTVDeIE +from .ntvru import NTVRuIE +from .nytimes import ( + NYTimesIE, + NYTimesArticleIE, + NYTimesCookingIE, +) +from .nuvid import NuvidIE +from .nzherald import NZHeraldIE +from .nzonscreen import NZOnScreenIE +from .nzz import NZZIE +from .odatv import OdaTVIE +from .odnoklassniki import OdnoklassnikiIE +from .oftv import ( + OfTVIE, + OfTVPlaylistIE +) +from .oktoberfesttv import OktoberfestTVIE +from .olympics import OlympicsReplayIE +from .on24 import On24IE +from .ondemandkorea import OnDemandKoreaIE +from .onefootball import OneFootballIE +from .onenewsnz import OneNewsNZIE +from .oneplace import OnePlacePodcastIE +from .onet import ( + OnetIE, + OnetChannelIE, + OnetMVPIE, + OnetPlIE, +) +from .onionstudios import OnionStudiosIE +from .ooyala import ( + OoyalaIE, + OoyalaExternalIE, +) +from .opencast import ( + OpencastIE, + OpencastPlaylistIE, +) +from .openrec import ( + OpenRecIE, + OpenRecCaptureIE, + OpenRecMovieIE, +) +from .ora import OraTVIE +from .orf import ( + ORFTVthekIE, + ORFFM4StoryIE, + ORFRadioIE, + ORFIPTVIE, +) +from .outsidetv import OutsideTVIE +from .packtpub import ( + PacktPubIE, + PacktPubCourseIE, +) +from .palcomp3 import ( + PalcoMP3IE, + PalcoMP3ArtistIE, + PalcoMP3VideoIE, +) +from .pandoratv import PandoraTVIE +from .panopto import ( + PanoptoIE, + PanoptoListIE, + PanoptoPlaylistIE +) +from .paramountplus import ( + ParamountPlusIE, + ParamountPlusSeriesIE, +) +from .parler import ParlerIE +from .parlview import ParlviewIE +from .patreon import ( + PatreonIE, + PatreonCampaignIE +) +from .pbs import PBSIE +from .pearvideo import PearVideoIE +from .peekvids import PeekVidsIE, PlayVidsIE +from .peertube import ( + PeerTubeIE, + PeerTubePlaylistIE, +) +from .peertv import PeerTVIE +from .peloton import ( + PelotonIE, + PelotonLiveIE +) +from .people import PeopleIE +from .performgroup import PerformGroupIE +from .periscope import ( + PeriscopeIE, + PeriscopeUserIE, +) +from .philharmoniedeparis import PhilharmonieDeParisIE +from .phoenix import PhoenixIE +from .photobucket import PhotobucketIE +from .piapro import PiaproIE +from .picarto import ( + PicartoIE, + PicartoVodIE, +) +from .piksel import PikselIE +from .pinkbike import PinkbikeIE +from .pinterest import ( + PinterestIE, + PinterestCollectionIE, +) +from .pixivsketch import ( + PixivSketchIE, + PixivSketchUserIE, +) +from .pladform import PladformIE +from .planetmarathi import PlanetMarathiIE +from .platzi import ( + PlatziIE, + PlatziCourseIE, +) +from .playfm import PlayFMIE +from .playplustv import PlayPlusTVIE +from .plays import PlaysTVIE +from .playstuff import PlayStuffIE +from .playsuisse import PlaySuisseIE +from .playtvak import PlaytvakIE +from .playvid import PlayvidIE +from .playwire import PlaywireIE +from .plutotv import PlutoTVIE +from .pluralsight import ( + PluralsightIE, + PluralsightCourseIE, +) +from .podbayfm import PodbayFMIE, PodbayFMChannelIE +from .podchaser import PodchaserIE +from .podomatic import PodomaticIE +from .pokemon import ( + PokemonIE, + PokemonWatchIE, +) +from .pokergo import ( + PokerGoIE, + PokerGoCollectionIE, +) +from .polsatgo import PolsatGoIE +from .polskieradio import ( + PolskieRadioIE, + PolskieRadioLegacyIE, + PolskieRadioAuditionIE, + PolskieRadioCategoryIE, + PolskieRadioPlayerIE, + PolskieRadioPodcastIE, + PolskieRadioPodcastListIE, + PolskieRadioRadioKierowcowIE, +) +from .popcorntimes import PopcorntimesIE +from .popcorntv import PopcornTVIE +from .porn91 import Porn91IE +from .porncom import PornComIE +from .pornflip import PornFlipIE +from .pornhd import PornHdIE +from .pornhub import ( + PornHubIE, + PornHubUserIE, + PornHubPlaylistIE, + PornHubPagedVideoListIE, + PornHubUserVideosUploadIE, +) +from .pornotube import PornotubeIE +from .pornovoisines import PornoVoisinesIE +from .pornoxo import PornoXOIE +from .pornez import PornezIE +from .puhutv import ( + PuhuTVIE, + PuhuTVSerieIE, +) +from .prankcast import PrankCastIE +from .premiershiprugby import PremiershipRugbyIE +from .presstv import PressTVIE +from .projectveritas import ProjectVeritasIE +from .prosiebensat1 import ProSiebenSat1IE +from .prx import ( + PRXStoryIE, + PRXSeriesIE, + PRXAccountIE, + PRXStoriesSearchIE, + PRXSeriesSearchIE +) +from .puls4 import Puls4IE +from .pyvideo import PyvideoIE +from .qingting import QingTingIE +from .qqmusic import ( + QQMusicIE, + QQMusicSingerIE, + QQMusicAlbumIE, + QQMusicToplistIE, + QQMusicPlaylistIE, +) +from .r7 import ( + R7IE, + R7ArticleIE, +) +from .radiko import RadikoIE, RadikoRadioIE +from .radiocanada import ( + RadioCanadaIE, + RadioCanadaAudioVideoIE, +) +from .radiode import RadioDeIE +from .radiojavan import RadioJavanIE +from .radiobremen import RadioBremenIE +from .radiofrance import FranceCultureIE, RadioFranceIE +from .radiozet import RadioZetPodcastIE +from .radiokapital import ( + RadioKapitalIE, + RadioKapitalShowIE, +) +from .radlive import ( + RadLiveIE, + RadLiveChannelIE, + RadLiveSeasonIE, +) +from .rai import ( + RaiPlayIE, + RaiPlayLiveIE, + RaiPlayPlaylistIE, + RaiPlaySoundIE, + RaiPlaySoundLiveIE, + RaiPlaySoundPlaylistIE, + RaiNewsIE, + RaiSudtirolIE, + RaiIE, +) +from .raywenderlich import ( + RayWenderlichIE, + RayWenderlichCourseIE, +) +from .rbmaradio import RBMARadioIE +from .rcs import ( + RCSIE, + RCSEmbedsIE, + RCSVariousIE, +) +from .rcti import ( + RCTIPlusIE, + RCTIPlusSeriesIE, + RCTIPlusTVIE, +) +from .rds import RDSIE +from .redbee import ParliamentLiveUKIE, RTBFIE +from .redbulltv import ( + RedBullTVIE, + RedBullEmbedIE, + RedBullTVRrnContentIE, + RedBullIE, +) +from .reddit import RedditIE +from .redgifs import ( + RedGifsIE, + RedGifsSearchIE, + RedGifsUserIE, +) +from .redtube import RedTubeIE +from .regiotv import RegioTVIE +from .rentv import ( + RENTVIE, + RENTVArticleIE, +) +from .restudy import RestudyIE +from .reuters import ReutersIE +from .reverbnation import ReverbNationIE +from .rice import RICEIE +from .rmcdecouverte import RMCDecouverteIE +from .rockstargames import RockstarGamesIE +from .rokfin import ( + RokfinIE, + RokfinStackIE, + RokfinChannelIE, + RokfinSearchIE, +) +from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE +from .rottentomatoes import RottenTomatoesIE +from .rozhlas import ( + RozhlasIE, + RozhlasVltavaIE, +) +from .rte import RteIE, RteRadioIE +from .rtlnl import ( + RtlNlIE, + RTLLuTeleVODIE, + RTLLuArticleIE, + RTLLuLiveIE, + RTLLuRadioIE, +) +from .rtl2 import ( + RTL2IE, + RTL2YouIE, + RTL2YouSeriesIE, +) +from .rtnews import ( + RTNewsIE, + RTDocumentryIE, + RTDocumentryPlaylistIE, + RuptlyIE, +) +from .rtp import RTPIE +from .rtrfm import RTRFMIE +from .rts import RTSIE +from .rtve import ( + RTVEALaCartaIE, + RTVEAudioIE, + RTVELiveIE, + RTVEInfantilIE, + RTVETelevisionIE, +) +from .rtvnh import RTVNHIE +from .rtvs import RTVSIE +from .rtvslo import RTVSLOIE +from .ruhd import RUHDIE +from .rule34video import Rule34VideoIE +from .rumble import ( + RumbleEmbedIE, + RumbleIE, + RumbleChannelIE, +) +from .rutube import ( + RutubeIE, + RutubeChannelIE, + RutubeEmbedIE, + RutubeMovieIE, + RutubePersonIE, + RutubePlaylistIE, + RutubeTagsIE, +) +from .glomex import ( + GlomexIE, + GlomexEmbedIE, +) +from .megatvcom import ( + MegaTVComIE, + MegaTVComEmbedIE, +) +from .ant1newsgr import ( + Ant1NewsGrWatchIE, + Ant1NewsGrArticleIE, + Ant1NewsGrEmbedIE, +) +from .rutv import RUTVIE +from .ruutu import RuutuIE +from .ruv import ( + RuvIE, + RuvSpilaIE +) +from .safari import ( + SafariIE, + SafariApiIE, + SafariCourseIE, +) +from .saitosan import SaitosanIE +from .samplefocus import SampleFocusIE +from .sapo import SapoIE +from .savefrom import SaveFromIE +from .sbs import SBSIE +from .screen9 import Screen9IE +from .screencast import ScreencastIE +from .screencastify import ScreencastifyIE +from .screencastomatic import ScreencastOMaticIE +from .scrippsnetworks import ( + ScrippsNetworksWatchIE, + ScrippsNetworksIE, +) +from .scte import ( + SCTEIE, + SCTECourseIE, +) +from .scrolller import ScrolllerIE +from .seeker import SeekerIE +from .senategov import SenateISVPIE, SenateGovIE +from .sendtonews import SendtoNewsIE +from .servus import ServusIE +from .sevenplus import SevenPlusIE +from .sexu import SexuIE +from .seznamzpravy import ( + SeznamZpravyIE, + SeznamZpravyArticleIE, +) +from .shahid import ( + ShahidIE, + ShahidShowIE, +) +from .shared import ( + SharedIE, + VivoIE, +) +from .sharevideos import ShareVideosEmbedIE +from .sibnet import SibnetEmbedIE +from .shemaroome import ShemarooMeIE +from .showroomlive import ShowRoomLiveIE +from .simplecast import ( + SimplecastIE, + SimplecastEpisodeIE, + SimplecastPodcastIE, +) +from .sina import SinaIE +from .sixplay import SixPlayIE +from .skeb import SkebIE +from .skyit import ( + SkyItPlayerIE, + SkyItVideoIE, + SkyItVideoLiveIE, + SkyItIE, + SkyItArteIE, + CieloTVItIE, + TV8ItIE, +) +from .skylinewebcams import SkylineWebcamsIE +from .skynewsarabia import ( + SkyNewsArabiaIE, + SkyNewsArabiaArticleIE, +) +from .skynewsau import SkyNewsAUIE +from .sky import ( + SkyNewsIE, + SkyNewsStoryIE, + SkySportsIE, + SkySportsNewsIE, +) +from .slideshare import SlideshareIE +from .slideslive import SlidesLiveIE +from .slutload import SlutloadIE +from .smotrim import SmotrimIE +from .snotr import SnotrIE +from .sohu import SohuIE +from .sonyliv import ( + SonyLIVIE, + SonyLIVSeriesIE, +) +from .soundcloud import ( + SoundcloudEmbedIE, + SoundcloudIE, + SoundcloudSetIE, + SoundcloudRelatedIE, + SoundcloudUserIE, + SoundcloudUserPermalinkIE, + SoundcloudTrackStationIE, + SoundcloudPlaylistIE, + SoundcloudSearchIE, +) +from .soundgasm import ( + SoundgasmIE, + SoundgasmProfileIE +) +from .southpark import ( + SouthParkIE, + SouthParkDeIE, + SouthParkDkIE, + SouthParkEsIE, + SouthParkLatIE, + SouthParkNlIE +) +from .sovietscloset import ( + SovietsClosetIE, + SovietsClosetPlaylistIE +) +from .spankbang import ( + SpankBangIE, + SpankBangPlaylistIE, +) +from .spankwire import SpankwireIE +from .spiegel import SpiegelIE +from .spike import ( + BellatorIE, + ParamountNetworkIE, +) +from .startrek import StarTrekIE +from .stitcher import ( + StitcherIE, + StitcherShowIE, +) +from .sport5 import Sport5IE +from .sportbox import SportBoxIE +from .sportdeutschland import SportDeutschlandIE +from .spotify import ( + SpotifyIE, + SpotifyShowIE, +) +from .spreaker import ( + SpreakerIE, + SpreakerPageIE, + SpreakerShowIE, + SpreakerShowPageIE, +) +from .springboardplatform import SpringboardPlatformIE +from .sprout import SproutIE +from .srgssr import ( + SRGSSRIE, + SRGSSRPlayIE, +) +from .srmediathek import SRMediathekIE +from .stanfordoc import StanfordOpenClassroomIE +from .startv import StarTVIE +from .steam import ( + SteamIE, + SteamCommunityBroadcastIE, +) +from .storyfire import ( + StoryFireIE, + StoryFireUserIE, + StoryFireSeriesIE, +) +from .streamable import StreamableIE +from .streamanity import StreamanityIE +from .streamcloud import StreamcloudIE +from .streamcz import StreamCZIE +from .streamff import StreamFFIE +from .streetvoice import StreetVoiceIE +from .stretchinternet import StretchInternetIE +from .stripchat import StripchatIE +from .stv import STVPlayerIE +from .substack import SubstackIE +from .sunporno import SunPornoIE +from .sverigesradio import ( + SverigesRadioEpisodeIE, + SverigesRadioPublicationIE, +) +from .svt import ( + SVTIE, + SVTPageIE, + SVTPlayIE, + SVTSeriesIE, +) +from .swearnet import SwearnetEpisodeIE +from .swrmediathek import SWRMediathekIE +from .syvdk import SYVDKIE +from .syfy import SyfyIE +from .sztvhu import SztvHuIE +from .tagesschau import TagesschauIE +from .tass import TassIE +from .tbs import TBSIE +from .tdslifeway import TDSLifewayIE +from .teachable import ( + TeachableIE, + TeachableCourseIE, +) +from .teachertube import ( + TeacherTubeIE, + TeacherTubeUserIE, +) +from .teachingchannel import TeachingChannelIE +from .teamcoco import TeamcocoIE +from .teamtreehouse import TeamTreeHouseIE +from .techtalks import TechTalksIE +from .ted import ( + TedEmbedIE, + TedPlaylistIE, + TedSeriesIE, + TedTalkIE, +) +from .tele5 import Tele5IE +from .tele13 import Tele13IE +from .telebruxelles import TeleBruxellesIE +from .telecinco import TelecincoIE +from .telegraaf import TelegraafIE +from .telegram import TelegramEmbedIE +from .telemb import TeleMBIE +from .telemundo import TelemundoIE +from .telequebec import ( + TeleQuebecIE, + TeleQuebecSquatIE, + TeleQuebecEmissionIE, + TeleQuebecLiveIE, + TeleQuebecVideoIE, +) +from .teletask import TeleTaskIE +from .telewebion import TelewebionIE +from .tempo import TempoIE +from .tencent import ( + IflixEpisodeIE, + IflixSeriesIE, + VQQSeriesIE, + VQQVideoIE, + WeTvEpisodeIE, + WeTvSeriesIE, +) +from .tennistv import TennisTVIE +from .tenplay import TenPlayIE +from .testurl import TestURLIE +from .tf1 import TF1IE +from .tfo import TFOIE +from .theholetv import TheHoleTvIE +from .theintercept import TheInterceptIE +from .theplatform import ( + ThePlatformIE, + ThePlatformFeedIE, +) +from .thestar import TheStarIE +from .thesun import TheSunIE +from .theta import ( + ThetaVideoIE, + ThetaStreamIE, +) +from .theweatherchannel import TheWeatherChannelIE +from .thisamericanlife import ThisAmericanLifeIE +from .thisav import ThisAVIE +from .thisoldhouse import ThisOldHouseIE +from .thisvid import ( + ThisVidIE, + ThisVidMemberIE, + ThisVidPlaylistIE, +) +from .threespeak import ( + ThreeSpeakIE, + ThreeSpeakUserIE, +) +from .threeqsdn import ThreeQSDNIE +from .tiktok import ( + TikTokIE, + TikTokUserIE, + TikTokSoundIE, + TikTokEffectIE, + TikTokTagIE, + TikTokVMIE, + TikTokLiveIE, + DouyinIE, +) +from .tinypic import TinyPicIE +from .tmz import TMZIE +from .tnaflix import ( + TNAFlixNetworkEmbedIE, + TNAFlixIE, + EMPFlixIE, + MovieFapIE, +) +from .toggle import ( + ToggleIE, + MeWatchIE, +) +from .toggo import ( + ToggoIE, +) +from .tokentube import ( + TokentubeIE, + TokentubeChannelIE +) +from .tonline import TOnlineIE +from .toongoggles import ToonGogglesIE +from .toutv import TouTvIE +from .toypics import ToypicsUserIE, ToypicsIE +from .traileraddict import TrailerAddictIE +from .triller import ( + TrillerIE, + TrillerUserIE, +) +from .trilulilu import TriluliluIE +from .trovo import ( + TrovoIE, + TrovoVodIE, + TrovoChannelVodIE, + TrovoChannelClipIE, +) +from .trtcocuk import TrtCocukVideoIE +from .trueid import TrueIDIE +from .trunews import TruNewsIE +from .truth import TruthIE +from .trutv import TruTVIE +from .tube8 import Tube8IE +from .tubetugraz import TubeTuGrazIE, TubeTuGrazSeriesIE +from .tubitv import ( + TubiTvIE, + TubiTvShowIE, +) +from .tumblr import TumblrIE +from .tunein import ( + TuneInClipIE, + TuneInStationIE, + TuneInProgramIE, + TuneInTopicIE, + TuneInShortenerIE, +) +from .tunepk import TunePkIE +from .turbo import TurboIE +from .tv2 import ( + TV2IE, + TV2ArticleIE, + KatsomoIE, + MTVUutisetArticleIE, +) +from .tv24ua import ( + TV24UAVideoIE, +) +from .tv2dk import ( + TV2DKIE, + TV2DKBornholmPlayIE, +) +from .tv2hu import ( + TV2HuIE, + TV2HuSeriesIE, +) +from .tv4 import TV4IE +from .tv5mondeplus import TV5MondePlusIE +from .tv5unis import ( + TV5UnisVideoIE, + TV5UnisIE, +) +from .tva import ( + TVAIE, + QubIE, +) +from .tvanouvelles import ( + TVANouvellesIE, + TVANouvellesArticleIE, +) +from .tvc import ( + TVCIE, + TVCArticleIE, +) +from .tver import TVerIE +from .tvigle import TvigleIE +from .tviplayer import TVIPlayerIE +from .tvland import TVLandIE +from .tvn24 import TVN24IE +from .tvnet import TVNetIE +from .tvnoe import TVNoeIE +from .tvnow import ( + TVNowIE, + TVNowFilmIE, + TVNowNewIE, + TVNowSeasonIE, + TVNowAnnualIE, + TVNowShowIE, +) +from .tvopengr import ( + TVOpenGrWatchIE, + TVOpenGrEmbedIE, +) +from .tvp import ( + TVPEmbedIE, + TVPIE, + TVPStreamIE, + TVPVODSeriesIE, + TVPVODVideoIE, +) +from .tvplay import ( + TVPlayIE, + ViafreeIE, + TVPlayHomeIE, +) +from .tvplayer import TVPlayerIE +from .tweakers import TweakersIE +from .twentyfourvideo import TwentyFourVideoIE +from .twentymin import TwentyMinutenIE +from .twentythreevideo import TwentyThreeVideoIE +from .twitcasting import ( + TwitCastingIE, + TwitCastingLiveIE, + TwitCastingUserIE, +) +from .twitch import ( + TwitchVodIE, + TwitchCollectionIE, + TwitchVideosIE, + TwitchVideosClipsIE, + TwitchVideosCollectionsIE, + TwitchStreamIE, + TwitchClipsIE, +) +from .twitter import ( + TwitterCardIE, + TwitterIE, + TwitterAmplifyIE, + TwitterBroadcastIE, + TwitterSpacesIE, + TwitterShortenerIE, +) +from .txxx import ( + TxxxIE, + PornTopIE, +) +from .udemy import ( + UdemyIE, + UdemyCourseIE +) +from .udn import UDNEmbedIE +from .ufctv import ( + UFCTVIE, + UFCArabiaIE, +) +from .ukcolumn import UkColumnIE +from .uktvplay import UKTVPlayIE +from .digiteka import DigitekaIE +from .dlive import ( + DLiveVODIE, + DLiveStreamIE, +) +from .drooble import DroobleIE +from .umg import UMGDeIE +from .unistra import UnistraIE +from .unity import UnityIE +from .unscripted import UnscriptedNewsVideoIE +from .unsupported import KnownDRMIE, KnownPiracyIE +from .uol import UOLIE +from .uplynk import ( + UplynkIE, + UplynkPreplayIE, +) +from .urort import UrortIE +from .urplay import URPlayIE +from .usanetwork import USANetworkIE +from .usatoday import USATodayIE +from .ustream import UstreamIE, UstreamChannelIE +from .ustudio import ( + UstudioIE, + UstudioEmbedIE, +) +from .utreon import UtreonIE +from .varzesh3 import Varzesh3IE +from .vbox7 import Vbox7IE +from .veehd import VeeHDIE +from .veo import VeoIE +from .veoh import ( + VeohIE, + VeohUserIE +) +from .vesti import VestiIE +from .vevo import ( + VevoIE, + VevoPlaylistIE, +) +from .vgtv import ( + BTArticleIE, + BTVestlendingenIE, + VGTVIE, +) +from .vh1 import VH1IE +from .vice import ( + ViceIE, + ViceArticleIE, + ViceShowIE, +) +from .vidbit import VidbitIE +from .viddler import ViddlerIE +from .videa import VideaIE +from .videocampus_sachsen import ( + VideocampusSachsenIE, + ViMPPlaylistIE, +) +from .videodetective import VideoDetectiveIE +from .videofyme import VideofyMeIE +from .videoken import ( + VideoKenIE, + VideoKenPlayerIE, + VideoKenPlaylistIE, + VideoKenCategoryIE, + VideoKenTopicIE, +) +from .videomore import ( + VideomoreIE, + VideomoreVideoIE, + VideomoreSeasonIE, +) +from .videopress import VideoPressIE +from .vidio import ( + VidioIE, + VidioPremierIE, + VidioLiveIE +) +from .vidlii import VidLiiIE +from .viewlift import ( + ViewLiftIE, + ViewLiftEmbedIE, +) +from .viidea import ViideaIE +from .vimeo import ( + VimeoIE, + VimeoAlbumIE, + VimeoChannelIE, + VimeoGroupsIE, + VimeoLikesIE, + VimeoOndemandIE, + VimeoProIE, + VimeoReviewIE, + VimeoUserIE, + VimeoWatchLaterIE, + VHXEmbedIE, +) +from .vimm import ( + VimmIE, + VimmRecordingIE, +) +from .vimple import VimpleIE +from .vine import ( + VineIE, + VineUserIE, +) +from .viki import ( + VikiIE, + VikiChannelIE, +) +from .viqeo import ViqeoIE +from .viu import ( + ViuIE, + ViuPlaylistIE, + ViuOTTIE, +) +from .vk import ( + VKIE, + VKUserVideosIE, + VKWallPostIE, +) +from .vodlocker import VodlockerIE +from .vodpl import VODPlIE +from .vodplatform import VODPlatformIE +from .voicerepublic import VoiceRepublicIE +from .voicy import ( + VoicyIE, + VoicyChannelIE, +) +from .volejtv import VolejTVIE +from .voot import ( + VootIE, + VootSeriesIE, +) +from .voxmedia import ( + VoxMediaVolumeIE, + VoxMediaIE, +) +from .vrt import VRTIE +from .vrak import VrakIE +from .vrv import ( + VRVIE, + VRVSeriesIE, +) +from .vshare import VShareIE +from .vtm import VTMIE +from .medialaan import MedialaanIE +from .vuclip import VuClipIE +from .vupload import VuploadIE +from .vvvvid import ( + VVVVIDIE, + VVVVIDShowIE, +) +from .vyborymos import VyboryMosIE +from .vzaar import VzaarIE +from .wakanim import WakanimIE +from .walla import WallaIE +from .washingtonpost import ( + WashingtonPostIE, + WashingtonPostArticleIE, +) +from .wasdtv import ( + WASDTVStreamIE, + WASDTVRecordIE, + WASDTVClipIE, +) +from .wat import WatIE +from .watchbox import WatchBoxIE +from .watchindianporn import WatchIndianPornIE +from .wdr import ( + WDRIE, + WDRPageIE, + WDRElefantIE, + WDRMobileIE, +) +from .webcamerapl import WebcameraplIE +from .webcaster import ( + WebcasterIE, + WebcasterFeedIE, +) +from .webofstories import ( + WebOfStoriesIE, + WebOfStoriesPlaylistIE, +) +from .weibo import ( + WeiboIE, + WeiboMobileIE +) +from .weiqitv import WeiqiTVIE +from .wikimedia import WikimediaIE +from .willow import WillowIE +from .wimtv import WimTVIE +from .whowatch import WhoWatchIE +from .wistia import ( + WistiaIE, + WistiaPlaylistIE, + WistiaChannelIE, +) +from .wordpress import ( + WordpressPlaylistEmbedIE, + WordpressMiniAudioPlayerEmbedIE, +) +from .worldstarhiphop import WorldStarHipHopIE +from .wppilot import ( + WPPilotIE, + WPPilotChannelsIE, +) +from .wrestleuniverse import ( + WrestleUniverseVODIE, + WrestleUniversePPVIE, +) +from .wsj import ( + WSJIE, + WSJArticleIE, +) +from .wwe import WWEIE +from .xanimu import XanimuIE +from .xbef import XBefIE +from .xboxclips import XboxClipsIE +from .xfileshare import XFileShareIE +from .xhamster import ( + XHamsterIE, + XHamsterEmbedIE, + XHamsterUserIE, +) +from .ximalaya import ( + XimalayaIE, + XimalayaAlbumIE +) +from .xinpianchang import XinpianchangIE +from .xminus import XMinusIE +from .xnxx import XNXXIE +from .xstream import XstreamIE +from .xtube import XTubeUserIE, XTubeIE +from .xuite import XuiteIE +from .xvideos import XVideosIE +from .xxxymovies import XXXYMoviesIE +from .yahoo import ( + YahooIE, + YahooSearchIE, + YahooGyaOPlayerIE, + YahooGyaOIE, + YahooJapanNewsIE, +) +from .yandexdisk import YandexDiskIE +from .yandexmusic import ( + YandexMusicTrackIE, + YandexMusicAlbumIE, + YandexMusicPlaylistIE, + YandexMusicArtistTracksIE, + YandexMusicArtistAlbumsIE, +) +from .yandexvideo import ( + YandexVideoIE, + YandexVideoPreviewIE, + ZenYandexIE, + ZenYandexChannelIE, +) +from .yapfiles import YapFilesIE +from .yesjapan import YesJapanIE +from .yinyuetai import YinYueTaiIE +from .yle_areena import YleAreenaIE +from .ynet import YnetIE +from .youjizz import YouJizzIE +from .youku import ( + YoukuIE, + YoukuShowIE, +) +from .younow import ( + YouNowLiveIE, + YouNowChannelIE, + YouNowMomentIE, +) +from .youporn import YouPornIE +from .yourporn import YourPornIE +from .yourupload import YourUploadIE +from .zapiks import ZapiksIE +from .zattoo import ( + BBVTVIE, + BBVTVLiveIE, + BBVTVRecordingsIE, + EinsUndEinsTVIE, + EinsUndEinsTVLiveIE, + EinsUndEinsTVRecordingsIE, + EWETVIE, + EWETVLiveIE, + EWETVRecordingsIE, + GlattvisionTVIE, + GlattvisionTVLiveIE, + GlattvisionTVRecordingsIE, + MNetTVIE, + MNetTVLiveIE, + MNetTVRecordingsIE, + NetPlusTVIE, + NetPlusTVLiveIE, + NetPlusTVRecordingsIE, + OsnatelTVIE, + OsnatelTVLiveIE, + OsnatelTVRecordingsIE, + QuantumTVIE, + QuantumTVLiveIE, + QuantumTVRecordingsIE, + SaltTVIE, + SaltTVLiveIE, + SaltTVRecordingsIE, + SAKTVIE, + SAKTVLiveIE, + SAKTVRecordingsIE, + VTXTVIE, + VTXTVLiveIE, + VTXTVRecordingsIE, + WalyTVIE, + WalyTVLiveIE, + WalyTVRecordingsIE, + ZattooIE, + ZattooLiveIE, + ZattooMoviesIE, + ZattooRecordingsIE, +) +from .zdf import ZDFIE, ZDFChannelIE +from .zee5 import ( + Zee5IE, + Zee5SeriesIE, +) +from .zeenews import ZeeNewsIE +from .zhihu import ZhihuIE +from .zingmp3 import ( + ZingMp3IE, + ZingMp3AlbumIE, + ZingMp3ChartHomeIE, + ZingMp3WeekChartIE, + ZingMp3ChartMusicVideoIE, + ZingMp3UserIE, +) +from .zoom import ZoomIE +from .zype import ZypeIE diff --git a/plugins/youtube_download/yt_dlp/extractor/abc.py b/plugins/youtube_download/yt_dlp/extractor/abc.py index 9d6f5a4..0ca76b8 100644 --- a/plugins/youtube_download/yt_dlp/extractor/abc.py +++ b/plugins/youtube_download/yt_dlp/extractor/abc.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - import hashlib import hmac import re @@ -157,8 +155,6 @@ class ABCIE(InfoExtractor): 'format_id': format_id }) - self._sort_formats(formats) - return { 'id': video_id, 'title': self._og_search_title(webpage), @@ -213,7 +209,7 @@ class ABCIViewIE(InfoExtractor): 'hdnea': token, }) - for sd in ('720', 'sd', 'sd-low'): + for sd in ('1080', '720', 'sd', 'sd-low'): sd_url = try_get( stream, lambda x: x['streams']['hls'][sd], compat_str) if not sd_url: @@ -223,7 +219,6 @@ class ABCIViewIE(InfoExtractor): entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) if formats: break - self._sort_formats(formats) subtitles = {} src_vtt = stream.get('captions', {}).get('src-vtt') diff --git a/plugins/youtube_download/yt_dlp/extractor/abcnews.py b/plugins/youtube_download/yt_dlp/extractor/abcnews.py index 296b8ce..a57295b 100644 --- a/plugins/youtube_download/yt_dlp/extractor/abcnews.py +++ b/plugins/youtube_download/yt_dlp/extractor/abcnews.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .amp import AMPIE from .common import InfoExtractor from ..utils import ( diff --git a/plugins/youtube_download/yt_dlp/extractor/abcotvs.py b/plugins/youtube_download/yt_dlp/extractor/abcotvs.py index 5bff466..6dca19d 100644 --- a/plugins/youtube_download/yt_dlp/extractor/abcotvs.py +++ b/plugins/youtube_download/yt_dlp/extractor/abcotvs.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .common import InfoExtractor from ..compat import compat_str from ..utils import ( @@ -82,7 +78,6 @@ class ABCOTVSIE(InfoExtractor): 'url': mp4_url, 'width': 640, }) - self._sort_formats(formats) image = video.get('image') or {} @@ -123,7 +118,6 @@ class ABCOTVSClipsIE(InfoExtractor): title = video_data['title'] formats = self._extract_m3u8_formats( video_data['videoURL'].split('?')[0], video_id, 'mp4') - self._sort_formats(formats) return { 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/abematv.py b/plugins/youtube_download/yt_dlp/extractor/abematv.py new file mode 100644 index 0000000..f611c1f --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/abematv.py @@ -0,0 +1,522 @@ +import base64 +import binascii +import functools +import hashlib +import hmac +import io +import json +import re +import struct +import time +import urllib.parse +import urllib.request +import urllib.response +import uuid + +from .common import InfoExtractor +from ..aes import aes_ecb_decrypt +from ..utils import ( + ExtractorError, + bytes_to_intlist, + decode_base_n, + int_or_none, + intlist_to_bytes, + OnDemandPagedList, + request_to_url, + time_seconds, + traverse_obj, + update_url_query, +) + +# NOTE: network handler related code is temporary thing until network stack overhaul PRs are merged (#2861/#2862) + + +def add_opener(ydl, handler): + ''' Add a handler for opening URLs, like _download_webpage ''' + # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426 + # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605 + assert isinstance(ydl._opener, urllib.request.OpenerDirector) + ydl._opener.add_handler(handler) + + +def remove_opener(ydl, handler): + ''' + Remove handler(s) for opening URLs + @param handler Either handler object itself or handler type. + Specifying handler type will remove all handler which isinstance returns True. + ''' + # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426 + # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605 + opener = ydl._opener + assert isinstance(ydl._opener, urllib.request.OpenerDirector) + if isinstance(handler, (type, tuple)): + find_cp = lambda x: isinstance(x, handler) + else: + find_cp = lambda x: x is handler + + removed = [] + for meth in dir(handler): + if meth in ["redirect_request", "do_open", "proxy_open"]: + # oops, coincidental match + continue + + i = meth.find("_") + protocol = meth[:i] + condition = meth[i + 1:] + + if condition.startswith("error"): + j = condition.find("_") + i + 1 + kind = meth[j + 1:] + try: + kind = int(kind) + except ValueError: + pass + lookup = opener.handle_error.get(protocol, {}) + opener.handle_error[protocol] = lookup + elif condition == "open": + kind = protocol + lookup = opener.handle_open + elif condition == "response": + kind = protocol + lookup = opener.process_response + elif condition == "request": + kind = protocol + lookup = opener.process_request + else: + continue + + handlers = lookup.setdefault(kind, []) + if handlers: + handlers[:] = [x for x in handlers if not find_cp(x)] + + removed.append(x for x in handlers if find_cp(x)) + + if removed: + for x in opener.handlers: + if find_cp(x): + x.add_parent(None) + opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)] + + +class AbemaLicenseHandler(urllib.request.BaseHandler): + handler_order = 499 + STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' + HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E' + + def __init__(self, ie: 'AbemaTVIE'): + # the protocol that this should really handle is 'abematv-license://' + # abematv_license_open is just a placeholder for development purposes + # ref. https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/urllib/request.py#L510 + setattr(self, 'abematv-license_open', getattr(self, 'abematv_license_open')) + self.ie = ie + + def _get_videokey_from_ticket(self, ticket): + to_show = self.ie.get_param('verbose', False) + media_token = self.ie._get_media_token(to_show=to_show) + + license_response = self.ie._download_json( + 'https://license.abema.io/abematv-hls', None, note='Requesting playback license' if to_show else False, + query={'t': media_token}, + data=json.dumps({ + 'kv': 'a', + 'lt': ticket + }).encode('utf-8'), + headers={ + 'Content-Type': 'application/json', + }) + + res = decode_base_n(license_response['k'], table=self.STRTABLE) + encvideokey = bytes_to_intlist(struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)) + + h = hmac.new( + binascii.unhexlify(self.HKEY), + (license_response['cid'] + self.ie._DEVICE_ID).encode('utf-8'), + digestmod=hashlib.sha256) + enckey = bytes_to_intlist(h.digest()) + + return intlist_to_bytes(aes_ecb_decrypt(encvideokey, enckey)) + + def abematv_license_open(self, url): + url = request_to_url(url) + ticket = urllib.parse.urlparse(url).netloc + response_data = self._get_videokey_from_ticket(ticket) + return urllib.response.addinfourl(io.BytesIO(response_data), headers={ + 'Content-Length': len(response_data), + }, url=url, code=200) + + +class AbemaTVBaseIE(InfoExtractor): + _USERTOKEN = None + _DEVICE_ID = None + _MEDIATOKEN = None + + _SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe' + + @classmethod + def _generate_aks(cls, deviceid): + deviceid = deviceid.encode('utf-8') + # add 1 hour and then drop minute and secs + ts_1hour = int((time_seconds() // 3600 + 1) * 3600) + time_struct = time.gmtime(ts_1hour) + ts_1hour_str = str(ts_1hour).encode('utf-8') + + tmp = None + + def mix_once(nonce): + nonlocal tmp + h = hmac.new(cls._SECRETKEY, digestmod=hashlib.sha256) + h.update(nonce) + tmp = h.digest() + + def mix_tmp(count): + nonlocal tmp + for i in range(count): + mix_once(tmp) + + def mix_twist(nonce): + nonlocal tmp + mix_once(base64.urlsafe_b64encode(tmp).rstrip(b'=') + nonce) + + mix_once(cls._SECRETKEY) + mix_tmp(time_struct.tm_mon) + mix_twist(deviceid) + mix_tmp(time_struct.tm_mday % 5) + mix_twist(ts_1hour_str) + mix_tmp(time_struct.tm_hour % 5) + + return base64.urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8') + + def _get_device_token(self): + if self._USERTOKEN: + return self._USERTOKEN + + username, _ = self._get_login_info() + AbemaTVBaseIE._USERTOKEN = username and self.cache.load(self._NETRC_MACHINE, username) + if AbemaTVBaseIE._USERTOKEN: + # try authentication with locally stored token + try: + self._get_media_token(True) + return + except ExtractorError as e: + self.report_warning(f'Failed to login with cached user token; obtaining a fresh one ({e})') + + AbemaTVBaseIE._DEVICE_ID = str(uuid.uuid4()) + aks = self._generate_aks(self._DEVICE_ID) + user_data = self._download_json( + 'https://api.abema.io/v1/users', None, note='Authorizing', + data=json.dumps({ + 'deviceId': self._DEVICE_ID, + 'applicationKeySecret': aks, + }).encode('utf-8'), + headers={ + 'Content-Type': 'application/json', + }) + AbemaTVBaseIE._USERTOKEN = user_data['token'] + + # don't allow adding it 2 times or more, though it's guarded + remove_opener(self._downloader, AbemaLicenseHandler) + add_opener(self._downloader, AbemaLicenseHandler(self)) + + return self._USERTOKEN + + def _get_media_token(self, invalidate=False, to_show=True): + if not invalidate and self._MEDIATOKEN: + return self._MEDIATOKEN + + AbemaTVBaseIE._MEDIATOKEN = self._download_json( + 'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False, + query={ + 'osName': 'android', + 'osVersion': '6.0.1', + 'osLang': 'ja_JP', + 'osTimezone': 'Asia/Tokyo', + 'appId': 'tv.abema', + 'appVersion': '3.27.1' + }, headers={ + 'Authorization': f'bearer {self._get_device_token()}', + })['token'] + + return self._MEDIATOKEN + + def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'): + return self._download_json( + f'https://api.abema.io/{endpoint}', video_id, query=query or {}, + note=note, + headers={ + 'Authorization': f'bearer {self._get_device_token()}', + }) + + def _extract_breadcrumb_list(self, webpage, video_id): + for jld in re.finditer( + r'(?is)]+type=(["\']?)application/ld\+json\1[^>]*>(?P.+?)', + webpage): + jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False) + if traverse_obj(jsonld, '@type') != 'BreadcrumbList': + continue + items = traverse_obj(jsonld, ('itemListElement', ..., 'name')) + if items: + return items + return [] + + +class AbemaTVIE(AbemaTVBaseIE): + _VALID_URL = r'https?://abema\.tv/(?Pnow-on-air|video/episode|channels/.+?/slots)/(?P[^?/]+)' + _NETRC_MACHINE = 'abematv' + _TESTS = [{ + 'url': 'https://abema.tv/video/episode/194-25_s2_p1', + 'info_dict': { + 'id': '194-25_s2_p1', + 'title': '第1話 「チーズケーキ」 「モーニング再び」', + 'series': '異世界食堂2', + 'series_number': 2, + 'episode': '第1話 「チーズケーキ」 「モーニング再び」', + 'episode_number': 1, + }, + 'skip': 'expired', + }, { + 'url': 'https://abema.tv/channels/anime-live2/slots/E8tvAnMJ7a9a5d', + 'info_dict': { + 'id': 'E8tvAnMJ7a9a5d', + 'title': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】', + 'series': 'ゆるキャン△ SEASON2', + 'episode': 'ゆるキャン△ SEASON2 全話一挙【無料ビデオ72時間】', + 'series_number': 2, + 'episode_number': 1, + 'description': 'md5:9c5a3172ae763278f9303922f0ea5b17', + }, + 'skip': 'expired', + }, { + 'url': 'https://abema.tv/video/episode/87-877_s1282_p31047', + 'info_dict': { + 'id': 'E8tvAnMJ7a9a5d', + 'title': '第5話『光射す』', + 'description': 'md5:56d4fc1b4f7769ded5f923c55bb4695d', + 'thumbnail': r're:https://hayabusa\.io/.+', + 'series': '相棒', + 'episode': '第5話『光射す』', + }, + 'skip': 'expired', + }, { + 'url': 'https://abema.tv/now-on-air/abema-anime', + 'info_dict': { + 'id': 'abema-anime', + # this varies + # 'title': '女子高生の無駄づかい 全話一挙【無料ビデオ72時間】', + 'description': 'md5:55f2e61f46a17e9230802d7bcc913d5f', + 'is_live': True, + }, + 'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server', + }] + _TIMETABLE = None + + def _perform_login(self, username, password): + self._get_device_token() + if self.cache.load(self._NETRC_MACHINE, username) and self._get_media_token(): + self.write_debug('Skipping logging in') + return + + if '@' in username: # don't strictly check if it's email address or not + ep, method = 'user/email', 'email' + else: + ep, method = 'oneTimePassword', 'userId' + + login_response = self._download_json( + f'https://api.abema.io/v1/auth/{ep}', None, note='Logging in', + data=json.dumps({ + method: username, + 'password': password + }).encode('utf-8'), headers={ + 'Authorization': f'bearer {self._get_device_token()}', + 'Origin': 'https://abema.tv', + 'Referer': 'https://abema.tv/', + 'Content-Type': 'application/json', + }) + + AbemaTVBaseIE._USERTOKEN = login_response['token'] + self._get_media_token(True) + self.cache.store(self._NETRC_MACHINE, username, AbemaTVBaseIE._USERTOKEN) + + def _real_extract(self, url): + # starting download using infojson from this extractor is undefined behavior, + # and never be fixed in the future; you must trigger downloads by directly specifying URL. + # (unless there's a way to hook before downloading by extractor) + video_id, video_type = self._match_valid_url(url).group('id', 'type') + headers = { + 'Authorization': 'Bearer ' + self._get_device_token(), + } + video_type = video_type.split('/')[-1] + + webpage = self._download_webpage(url, video_id) + canonical_url = self._search_regex( + r'(.+?)', webpage, 'title', default=None) + if not title: + jsonld = None + for jld in re.finditer( + r'(?is)(?:)?]+type=(["\']?)application/ld\+json\1[^>]*>(?P.+?)', + webpage): + jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False) + if jsonld: + break + if jsonld: + title = jsonld.get('caption') + if not title and video_type == 'now-on-air': + if not self._TIMETABLE: + # cache the timetable because it goes to 5MiB in size (!!) + self._TIMETABLE = self._download_json( + 'https://api.abema.io/v1/timetable/dataSet?debug=false', video_id, + headers=headers) + now = time_seconds(hours=9) + for slot in self._TIMETABLE.get('slots', []): + if slot.get('channelId') != video_id: + continue + if slot['startAt'] <= now and now < slot['endAt']: + title = slot['title'] + break + + # read breadcrumb on top of page + breadcrumb = self._extract_breadcrumb_list(webpage, video_id) + if breadcrumb: + # breadcrumb list translates to: (e.g. 1st test for this IE) + # Home > Anime (genre) > Isekai Shokudo 2 (series name) > Episode 1 "Cheese cakes" "Morning again" (episode title) + # hence this works + info['series'] = breadcrumb[-2] + info['episode'] = breadcrumb[-1] + if not title: + title = info['episode'] + + description = self._html_search_regex( + (r'(.+?)

(.+?)[^?/]+)' + _PAGE_SIZE = 25 + + _TESTS = [{ + 'url': 'https://abema.tv/video/title/90-1597', + 'info_dict': { + 'id': '90-1597', + 'title': 'シャッフルアイランド', + }, + 'playlist_mincount': 2, + }, { + 'url': 'https://abema.tv/video/title/193-132', + 'info_dict': { + 'id': '193-132', + 'title': '真心が届く~僕とスターのオフィス・ラブ!?~', + }, + 'playlist_mincount': 16, + }, { + 'url': 'https://abema.tv/video/title/25-102', + 'info_dict': { + 'id': '25-102', + 'title': 'ソードアート・オンライン アリシゼーション', + }, + 'playlist_mincount': 24, + }] + + def _fetch_page(self, playlist_id, series_version, page): + programs = self._call_api( + f'v1/video/series/{playlist_id}/programs', playlist_id, + note=f'Downloading page {page + 1}', + query={ + 'seriesVersion': series_version, + 'offset': str(page * self._PAGE_SIZE), + 'order': 'seq', + 'limit': str(self._PAGE_SIZE), + }) + yield from ( + self.url_result(f'https://abema.tv/video/episode/{x}') + for x in traverse_obj(programs, ('programs', ..., 'id'))) + + def _entries(self, playlist_id, series_version): + return OnDemandPagedList( + functools.partial(self._fetch_page, playlist_id, series_version), + self._PAGE_SIZE) + + def _real_extract(self, url): + playlist_id = self._match_id(url) + series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id) + + return self.playlist_result( + self._entries(playlist_id, series_info['version']), playlist_id=playlist_id, + playlist_title=series_info.get('title'), + playlist_description=series_info.get('content')) diff --git a/plugins/youtube_download/yt_dlp/extractor/academicearth.py b/plugins/youtube_download/yt_dlp/extractor/academicearth.py index 3409550..d9691cb 100644 --- a/plugins/youtube_download/yt_dlp/extractor/academicearth.py +++ b/plugins/youtube_download/yt_dlp/extractor/academicearth.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - import re from .common import InfoExtractor diff --git a/plugins/youtube_download/yt_dlp/extractor/acast.py b/plugins/youtube_download/yt_dlp/extractor/acast.py index 63587c5..f2f828f 100644 --- a/plugins/youtube_download/yt_dlp/extractor/acast.py +++ b/plugins/youtube_download/yt_dlp/extractor/acast.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .common import InfoExtractor from ..utils import ( clean_html, diff --git a/plugins/youtube_download/yt_dlp/extractor/acfun.py b/plugins/youtube_download/yt_dlp/extractor/acfun.py new file mode 100644 index 0000000..dc57929 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/acfun.py @@ -0,0 +1,199 @@ +from .common import InfoExtractor +from ..utils import ( + float_or_none, + format_field, + int_or_none, + traverse_obj, + parse_codecs, + parse_qs, +) + + +class AcFunVideoBaseIE(InfoExtractor): + def _extract_metadata(self, video_id, video_info): + playjson = self._parse_json(video_info['ksPlayJson'], video_id) + + formats, subtitles = [], {} + for video in traverse_obj(playjson, ('adaptationSet', 0, 'representation')): + fmts, subs = self._extract_m3u8_formats_and_subtitles(video['url'], video_id, 'mp4', fatal=False) + formats.extend(fmts) + self._merge_subtitles(subs, target=subtitles) + for f in fmts: + f.update({ + 'fps': float_or_none(video.get('frameRate')), + 'width': int_or_none(video.get('width')), + 'height': int_or_none(video.get('height')), + 'tbr': float_or_none(video.get('avgBitrate')), + **parse_codecs(video.get('codecs', '')) + }) + + return { + 'id': video_id, + 'formats': formats, + 'subtitles': subtitles, + 'duration': float_or_none(video_info.get('durationMillis'), 1000), + 'timestamp': int_or_none(video_info.get('uploadTime'), 1000), + 'http_headers': {'Referer': 'https://www.acfun.cn/'}, + } + + +class AcFunVideoIE(AcFunVideoBaseIE): + _VALID_URL = r'https?://www\.acfun\.cn/v/ac(?P[_\d]+)' + + _TESTS = [{ + 'url': 'https://www.acfun.cn/v/ac35457073', + 'info_dict': { + 'id': '35457073', + 'ext': 'mp4', + 'duration': 174.208, + 'timestamp': 1656403967, + 'title': '1 8 岁 现 状', + 'description': '“赶紧回去!班主任查班了!”', + 'uploader': '锤子game', + 'uploader_id': '51246077', + 'thumbnail': r're:^https?://.*\.(jpg|jpeg)', + 'upload_date': '20220628', + 'like_count': int, + 'view_count': int, + 'comment_count': int, + 'tags': list, + }, + }, { + # example for len(video_list) > 1 + 'url': 'https://www.acfun.cn/v/ac35468952_2', + 'info_dict': { + 'id': '35468952_2', + 'ext': 'mp4', + 'title': '【动画剧集】Rocket & Groot Season 1(2022)/火箭浣熊与格鲁特第1季 P02 S01E02 十拿九穩', + 'duration': 90.459, + 'uploader': '比令', + 'uploader_id': '37259967', + 'upload_date': '20220629', + 'timestamp': 1656479962, + 'tags': list, + 'like_count': int, + 'view_count': int, + 'comment_count': int, + 'thumbnail': r're:^https?://.*\.(jpg|jpeg)', + 'description': 'md5:67583aaf3a0f933bd606bc8a2d3ebb17', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + json_all = self._search_json(r'window.videoInfo\s*=', webpage, 'videoInfo', video_id) + + title = json_all.get('title') + video_list = json_all.get('videoList') or [] + video_internal_id = traverse_obj(json_all, ('currentVideoInfo', 'id')) + if video_internal_id and len(video_list) > 1: + part_idx, part_video_info = next( + (idx + 1, v) for (idx, v) in enumerate(video_list) + if v['id'] == video_internal_id) + title = f'{title} P{part_idx:02d} {part_video_info["title"]}' + + return { + **self._extract_metadata(video_id, json_all['currentVideoInfo']), + 'title': title, + 'thumbnail': json_all.get('coverUrl'), + 'description': json_all.get('description'), + 'uploader': traverse_obj(json_all, ('user', 'name')), + 'uploader_id': traverse_obj(json_all, ('user', 'href')), + 'tags': traverse_obj(json_all, ('tagList', ..., 'name')), + 'view_count': int_or_none(json_all.get('viewCount')), + 'like_count': int_or_none(json_all.get('likeCountShow')), + 'comment_count': int_or_none(json_all.get('commentCountShow')), + } + + +class AcFunBangumiIE(AcFunVideoBaseIE): + _VALID_URL = r'https?://www\.acfun\.cn/bangumi/(?Paa[_\d]+)' + + _TESTS = [{ + 'url': 'https://www.acfun.cn/bangumi/aa6002917_36188_1745457?ac=2', + 'info_dict': { + 'id': 'aa6002917_36188_1745457__2', + 'ext': 'mp4', + 'title': '【7月】租借女友 水原千鹤角色曲『DATE』特别PV', + 'upload_date': '20200916', + 'timestamp': 1600243813, + 'duration': 92.091, + }, + }, { + 'url': 'https://www.acfun.cn/bangumi/aa5023171_36188_1750645', + 'info_dict': { + 'id': 'aa5023171_36188_1750645', + 'ext': 'mp4', + 'title': '红孩儿之趴趴蛙寻石记 第5话 ', + 'duration': 760.0, + 'season': '红孩儿之趴趴蛙寻石记', + 'season_id': 5023171, + 'season_number': 1, # series has only 1 season + 'episode': 'Episode 5', + 'episode_number': 5, + 'upload_date': '20181223', + 'timestamp': 1545552185, + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)', + 'comment_count': int, + }, + }, { + 'url': 'https://www.acfun.cn/bangumi/aa6065485_36188_1885061', + 'info_dict': { + 'id': 'aa6065485_36188_1885061', + 'ext': 'mp4', + 'title': '叽歪老表(第二季) 第5话 坚不可摧', + 'season': '叽歪老表(第二季)', + 'season_number': 2, + 'season_id': 6065485, + 'episode': '坚不可摧', + 'episode_number': 5, + 'upload_date': '20220324', + 'timestamp': 1648082786, + 'duration': 105.002, + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)', + 'comment_count': int, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + ac_idx = parse_qs(url).get('ac', [None])[-1] + video_id = f'{video_id}{format_field(ac_idx, None, "__%s")}' + + webpage = self._download_webpage(url, video_id) + json_bangumi_data = self._search_json(r'window.bangumiData\s*=', webpage, 'bangumiData', video_id) + + if ac_idx: + video_info = json_bangumi_data['hlVideoInfo'] + return { + **self._extract_metadata(video_id, video_info), + 'title': video_info.get('title'), + } + + video_info = json_bangumi_data['currentVideoInfo'] + + season_id = json_bangumi_data.get('bangumiId') + season_number = season_id and next(( + idx for idx, v in enumerate(json_bangumi_data.get('relatedBangumis') or [], 1) + if v.get('id') == season_id), 1) + + json_bangumi_list = self._search_json( + r'window\.bangumiList\s*=', webpage, 'bangumiList', video_id, fatal=False) + video_internal_id = int_or_none(traverse_obj(json_bangumi_data, ('currentVideoInfo', 'id'))) + episode_number = video_internal_id and next(( + idx for idx, v in enumerate(json_bangumi_list.get('items') or [], 1) + if v.get('videoId') == video_internal_id), None) + + return { + **self._extract_metadata(video_id, video_info), + 'title': json_bangumi_data.get('showTitle'), + 'thumbnail': json_bangumi_data.get('image'), + 'season': json_bangumi_data.get('bangumiTitle'), + 'season_id': season_id, + 'season_number': season_number, + 'episode': json_bangumi_data.get('title'), + 'episode_number': episode_number, + 'comment_count': int_or_none(json_bangumi_data.get('commentCount')), + } diff --git a/plugins/youtube_download/yt_dlp/extractor/adn.py b/plugins/youtube_download/yt_dlp/extractor/adn.py index 0863e0d..f1f55e8 100644 --- a/plugins/youtube_download/yt_dlp/extractor/adn.py +++ b/plugins/youtube_download/yt_dlp/extractor/adn.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import base64 import binascii import json @@ -31,30 +28,34 @@ from ..utils import ( class ADNIE(InfoExtractor): - IE_DESC = 'Anime Digital Network' - _VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P\d+)' - _TEST = { - 'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites', - 'md5': '0319c99885ff5547565cacb4f3f9348d', + IE_DESC = 'Animation Digital Network' + _VALID_URL = r'https?://(?:www\.)?(?:animation|anime)digitalnetwork\.fr/video/[^/]+/(?P\d+)' + _TESTS = [{ + 'url': 'https://animationdigitalnetwork.fr/video/fruits-basket/9841-episode-1-a-ce-soir', + 'md5': '1c9ef066ceb302c86f80c2b371615261', 'info_dict': { - 'id': '7778', + 'id': '9841', 'ext': 'mp4', - 'title': 'Blue Exorcist - Kyôto Saga - Episode 1', - 'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5', - 'series': 'Blue Exorcist - Kyôto Saga', - 'duration': 1467, - 'release_date': '20170106', + 'title': 'Fruits Basket - Episode 1', + 'description': 'md5:14be2f72c3c96809b0ca424b0097d336', + 'series': 'Fruits Basket', + 'duration': 1437, + 'release_date': '20190405', 'comment_count': int, 'average_rating': float, - 'season_number': 2, - 'episode': 'Début des hostilités', + 'season_number': 1, + 'episode': 'À ce soir !', 'episode_number': 1, - } - } + }, + 'skip': 'Only available in region (FR, ...)', + }, { + 'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites', + 'only_matching': True, + }] - _NETRC_MACHINE = 'animedigitalnetwork' - _BASE_URL = 'http://animedigitalnetwork.fr' - _API_BASE_URL = 'https://gw.api.animedigitalnetwork.fr/' + _NETRC_MACHINE = 'animationdigitalnetwork' + _BASE = 'animationdigitalnetwork.fr' + _API_BASE_URL = 'https://gw.api.' + _BASE + '/' _PLAYER_BASE_URL = _API_BASE_URL + 'player/' _HEADERS = {} _LOGIN_ERR_MESSAGE = 'Unable to log in' @@ -78,14 +79,14 @@ class ADNIE(InfoExtractor): if subtitle_location: enc_subtitles = self._download_webpage( subtitle_location, video_id, 'Downloading subtitles data', - fatal=False, headers={'Origin': 'https://animedigitalnetwork.fr'}) + fatal=False, headers={'Origin': 'https://' + self._BASE}) if not enc_subtitles: return None - # http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js + # http://animationdigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js dec_subtitles = unpad_pkcs7(aes_cbc_decrypt_bytes( compat_b64decode(enc_subtitles[24:]), - binascii.unhexlify(self._K + 'ab9f52f5baae7c72'), + binascii.unhexlify(self._K + '7fac1178830cfe0c'), compat_b64decode(enc_subtitles[:24]))) subtitles_json = self._parse_json(dec_subtitles.decode(), None, fatal=False) if not subtitles_json: @@ -126,10 +127,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text''' }]) return subtitles - def _real_initialize(self): - username, password = self._get_login_info() - if not username: - return + def _perform_login(self, username, password): try: access_token = (self._download_json( self._API_BASE_URL + 'authentication/login', None, @@ -170,7 +168,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text''' }, data=b'')['token'] links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link') - self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)]) + self._K = ''.join(random.choices('0123456789abcdef', k=16)) message = bytes_to_intlist(json.dumps({ 'k': self._K, 't': token, @@ -237,7 +235,6 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text''' for f in m3u8_formats: f['language'] = 'fr' formats.extend(m3u8_formats) - self._sort_formats(formats) video = (self._download_json( self._API_BASE_URL + 'video/%s' % video_id, video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/adobeconnect.py b/plugins/youtube_download/yt_dlp/extractor/adobeconnect.py index e688ddd..8963b12 100644 --- a/plugins/youtube_download/yt_dlp/extractor/adobeconnect.py +++ b/plugins/youtube_download/yt_dlp/extractor/adobeconnect.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..compat import ( compat_parse_qs, @@ -14,7 +11,7 @@ class AdobeConnectIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - title = self._html_search_regex(r'(.+?)', webpage, 'title') + title = self._html_extract_title(webpage) qs = compat_parse_qs(self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, 'swf url').split('?')[1]) is_live = qs.get('isLive', ['false'])[0] == 'true' formats = [] diff --git a/plugins/youtube_download/yt_dlp/extractor/adobepass.py b/plugins/youtube_download/yt_dlp/extractor/adobepass.py index bebcafa..e5944f7 100644 --- a/plugins/youtube_download/yt_dlp/extractor/adobepass.py +++ b/plugins/youtube_download/yt_dlp/extractor/adobepass.py @@ -1,26 +1,20 @@ -# coding: utf-8 -from __future__ import unicode_literals - +import getpass import json import re import time +import urllib.error import xml.etree.ElementTree as etree from .common import InfoExtractor -from ..compat import ( - compat_kwargs, - compat_urlparse, - compat_getpass -) +from ..compat import compat_urlparse from ..utils import ( - unescapeHTML, - urlencode_postdata, - unified_timestamp, - ExtractorError, NO_DEFAULT, + ExtractorError, + unescapeHTML, + unified_timestamp, + urlencode_postdata, ) - MSO_INFO = { 'DTV': { 'name': 'DIRECTV', @@ -1345,10 +1339,20 @@ MSO_INFO = { 'username_field': 'username', 'password_field': 'password', }, + 'Suddenlink': { + 'name': 'Suddenlink', + 'username_field': 'username', + 'password_field': 'password', + }, + 'AlticeOne': { + 'name': 'Optimum TV', + 'username_field': 'j_username', + 'password_field': 'j_password', + }, } -class AdobePassIE(InfoExtractor): +class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor _SERVICE_PROVIDER_TEMPLATE = 'https://sp.auth.adobe.com/adobe-services/%s' _USER_AGENT = 'Mozilla/5.0 (X11; Linux i686; rv:47.0) Gecko/20100101 Firefox/47.0' _MVPD_CACHE = 'ap-mvpd' @@ -1360,7 +1364,7 @@ class AdobePassIE(InfoExtractor): headers.update(kwargs.get('headers', {})) kwargs['headers'] = headers return super(AdobePassIE, self)._download_webpage_handle( - *args, **compat_kwargs(kwargs)) + *args, **kwargs) @staticmethod def _get_mvpd_resource(provider_id, title, guid, rating): @@ -1429,32 +1433,34 @@ class AdobePassIE(InfoExtractor): guid = xml_text(resource, 'guid') if '<' in resource else resource count = 0 while count < 2: - requestor_info = self._downloader.cache.load(self._MVPD_CACHE, requestor_id) or {} + requestor_info = self.cache.load(self._MVPD_CACHE, requestor_id) or {} authn_token = requestor_info.get('authn_token') if authn_token and is_expired(authn_token, 'simpleTokenExpires'): authn_token = None if not authn_token: - # TODO add support for other TV Providers mso_id = self.get_param('ap_mso') + if mso_id: + username, password = self._get_login_info('ap_username', 'ap_password', mso_id) + if not username or not password: + raise_mvpd_required() + mso_info = MSO_INFO[mso_id] + + provider_redirect_page_res = self._download_webpage_handle( + self._SERVICE_PROVIDER_TEMPLATE % 'authenticate/saml', video_id, + 'Downloading Provider Redirect Page', query={ + 'noflash': 'true', + 'mso_id': mso_id, + 'requestor_id': requestor_id, + 'no_iframe': 'false', + 'domain_name': 'adobe.com', + 'redirect_url': url, + }) + elif not self._cookies_passed: + raise_mvpd_required() + if not mso_id: - raise_mvpd_required() - username, password = self._get_login_info('ap_username', 'ap_password', mso_id) - if not username or not password: - raise_mvpd_required() - mso_info = MSO_INFO[mso_id] - - provider_redirect_page_res = self._download_webpage_handle( - self._SERVICE_PROVIDER_TEMPLATE % 'authenticate/saml', video_id, - 'Downloading Provider Redirect Page', query={ - 'noflash': 'true', - 'mso_id': mso_id, - 'requestor_id': requestor_id, - 'no_iframe': 'false', - 'domain_name': 'adobe.com', - 'redirect_url': url, - }) - - if mso_id == 'Comcast_SSO': + pass + elif mso_id == 'Comcast_SSO': # Comcast page flow varies by video site and whether you # are on Comcast's network. provider_redirect_page, urlh = provider_redirect_page_res @@ -1502,7 +1508,7 @@ class AdobePassIE(InfoExtractor): 'send_confirm_link': False, 'send_token': True })) - philo_code = compat_getpass('Type auth code you have received [Return]: ') + philo_code = getpass.getpass('Type auth code you have received [Return]: ') self._download_webpage( 'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({ 'token': philo_code @@ -1635,6 +1641,58 @@ class AdobePassIE(InfoExtractor): urlh.geturl(), video_id, 'Sending final bookend', query=hidden_data) + post_form(mvpd_confirm_page_res, 'Confirming Login') + elif mso_id == 'Suddenlink': + # Suddenlink is similar to SlingTV in using a tab history count and a meta refresh, + # but they also do a dynmaic redirect using javascript that has to be followed as well + first_bookend_page, urlh = post_form( + provider_redirect_page_res, 'Pressing Continue...') + + hidden_data = self._hidden_inputs(first_bookend_page) + hidden_data['history_val'] = 1 + + provider_login_redirect_page_res = self._download_webpage_handle( + urlh.geturl(), video_id, 'Sending First Bookend', + query=hidden_data) + + provider_login_redirect_page, urlh = provider_login_redirect_page_res + + # Some website partners seem to not have the extra ajaxurl redirect step, so we check if we already + # have the login prompt or not + if 'id="password" type="password" name="password"' in provider_login_redirect_page: + provider_login_page_res = provider_login_redirect_page_res + else: + provider_tryauth_url = self._html_search_regex( + r'url:\s*[\'"]([^\'"]+)', provider_login_redirect_page, 'ajaxurl') + provider_tryauth_page = self._download_webpage( + provider_tryauth_url, video_id, 'Submitting TryAuth', + query=hidden_data) + + provider_login_page_res = self._download_webpage_handle( + f'https://authorize.suddenlink.net/saml/module.php/authSynacor/login.php?AuthState={provider_tryauth_page}', + video_id, 'Getting Login Page', + query=hidden_data) + + provider_association_redirect, urlh = post_form( + provider_login_page_res, 'Logging in', { + mso_info['username_field']: username, + mso_info['password_field']: password + }) + + provider_refresh_redirect_url = extract_redirect_url( + provider_association_redirect, url=urlh.geturl()) + + last_bookend_page, urlh = self._download_webpage_handle( + provider_refresh_redirect_url, video_id, + 'Downloading Auth Association Redirect Page') + + hidden_data = self._hidden_inputs(last_bookend_page) + hidden_data['history_val'] = 3 + + mvpd_confirm_page_res = self._download_webpage_handle( + urlh.geturl(), video_id, 'Sending Final Bookend', + query=hidden_data) + post_form(mvpd_confirm_page_res, 'Confirming Login') else: # Some providers (e.g. DIRECTV NOW) have another meta refresh @@ -1652,25 +1710,30 @@ class AdobePassIE(InfoExtractor): mso_info.get('username_field', 'username'): username, mso_info.get('password_field', 'password'): password } - if mso_id == 'Cablevision': + if mso_id in ('Cablevision', 'AlticeOne'): form_data['_eventId_proceed'] = '' mvpd_confirm_page_res = post_form(provider_login_page_res, 'Logging in', form_data) if mso_id != 'Rogers': post_form(mvpd_confirm_page_res, 'Confirming Login') - session = self._download_webpage( - self._SERVICE_PROVIDER_TEMPLATE % 'session', video_id, - 'Retrieving Session', data=urlencode_postdata({ - '_method': 'GET', - 'requestor_id': requestor_id, - }), headers=mvpd_headers) + try: + session = self._download_webpage( + self._SERVICE_PROVIDER_TEMPLATE % 'session', video_id, + 'Retrieving Session', data=urlencode_postdata({ + '_method': 'GET', + 'requestor_id': requestor_id, + }), headers=mvpd_headers) + except ExtractorError as e: + if not mso_id and isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 401: + raise_mvpd_required() + raise if '\d+)' + _EMBED_REGEX = [r']+src=[\'"](?P(?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]'] _TEST = { # From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners @@ -270,7 +268,6 @@ class AdobeTVVideoIE(AdobeTVBaseIE): 'width': int_or_none(source.get('width') or None), 'url': source_src, }) - self._sort_formats(formats) # For both metadata and downloaded files the duration varies among # formats. I just pick the max one diff --git a/plugins/youtube_download/yt_dlp/extractor/adultswim.py b/plugins/youtube_download/yt_dlp/extractor/adultswim.py index c97cfc1..bd29eb4 100644 --- a/plugins/youtube_download/yt_dlp/extractor/adultswim.py +++ b/plugins/youtube_download/yt_dlp/extractor/adultswim.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import json from .turner import TurnerBaseIE @@ -183,7 +180,6 @@ class AdultSwimIE(TurnerBaseIE): info['subtitles'].setdefault('en', []).append({ 'url': asset_url, }) - self._sort_formats(info['formats']) return info else: diff --git a/plugins/youtube_download/yt_dlp/extractor/aenetworks.py b/plugins/youtube_download/yt_dlp/extractor/aenetworks.py index 8025de5..d7c4010 100644 --- a/plugins/youtube_download/yt_dlp/extractor/aenetworks.py +++ b/plugins/youtube_download/yt_dlp/extractor/aenetworks.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .theplatform import ThePlatformIE from ..utils import ( ExtractorError, @@ -12,7 +8,7 @@ from ..utils import ( ) -class AENetworksBaseIE(ThePlatformIE): +class AENetworksBaseIE(ThePlatformIE): # XXX: Do not subclass from concrete IE _BASE_URL_REGEX = r'''(?x)https?:// (?:(?:www|play|watch)\.)? (?P @@ -32,14 +28,17 @@ class AENetworksBaseIE(ThePlatformIE): } def _extract_aen_smil(self, smil_url, video_id, auth=None): - query = {'mbr': 'true'} + query = { + 'mbr': 'true', + 'formats': 'M3U+none,MPEG-DASH+none,MPEG4,MP3', + } if auth: query['auth'] = auth TP_SMIL_QUERY = [{ 'assetTypes': 'high_video_ak', - 'switch': 'hls_high_ak' + 'switch': 'hls_high_ak', }, { - 'assetTypes': 'high_video_s3' + 'assetTypes': 'high_video_s3', }, { 'assetTypes': 'high_video_s3', 'switch': 'hls_high_fastly', @@ -63,7 +62,6 @@ class AENetworksBaseIE(ThePlatformIE): subtitles = self._merge_subtitles(subtitles, tp_subtitles) if last_e and not formats: raise last_e - self._sort_formats(formats) return { 'id': video_id, 'formats': formats, @@ -305,7 +303,6 @@ class HistoryTopicIE(AENetworksBaseIE): class HistoryPlayerIE(AENetworksBaseIE): IE_NAME = 'history:player' _VALID_URL = r'https?://(?:www\.)?(?P(?:history|biography)\.com)/player/(?P\d+)' - _TESTS = [] def _real_extract(self, url): domain, video_id = self._match_valid_url(url).groups() diff --git a/plugins/youtube_download/yt_dlp/extractor/aeonco.py b/plugins/youtube_download/yt_dlp/extractor/aeonco.py new file mode 100644 index 0000000..4655862 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/aeonco.py @@ -0,0 +1,40 @@ +from .common import InfoExtractor +from .vimeo import VimeoIE + + +class AeonCoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?aeon\.co/videos/(?P[^/?]+)' + _TESTS = [{ + 'url': 'https://aeon.co/videos/raw-solar-storm-footage-is-the-punk-rock-antidote-to-sleek-james-webb-imagery', + 'md5': 'e5884d80552c9b6ea8d268a258753362', + 'info_dict': { + 'id': '1284717', + 'ext': 'mp4', + 'title': 'Brilliant Noise', + 'thumbnail': 'https://i.vimeocdn.com/video/21006315-1a1e49da8b07fd908384a982b4ba9ff0268c509a474576ebdf7b1392f4acae3b-d_960', + 'uploader': 'Semiconductor', + 'uploader_id': 'semiconductor', + 'uploader_url': 'https://vimeo.com/semiconductor', + 'duration': 348 + } + }, { + 'url': 'https://aeon.co/videos/dazzling-timelapse-shows-how-microbes-spoil-our-food-and-sometimes-enrich-it', + 'md5': '4e5f3dad9dbda0dbfa2da41a851e631e', + 'info_dict': { + 'id': '728595228', + 'ext': 'mp4', + 'title': 'Wrought', + 'thumbnail': 'https://i.vimeocdn.com/video/1484618528-c91452611f9a4e4497735a533da60d45b2fe472deb0c880f0afaab0cd2efb22a-d_1280', + 'uploader': 'Biofilm Productions', + 'uploader_id': 'user140352216', + 'uploader_url': 'https://vimeo.com/user140352216', + 'duration': 1344 + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + vimeo_id = self._search_regex(r'hosterId":\s*"(?P[0-9]+)', webpage, 'vimeo id') + vimeo_url = VimeoIE._smuggle_referrer(f'https://player.vimeo.com/video/{vimeo_id}', 'https://aeon.co') + return self.url_result(vimeo_url, VimeoIE) diff --git a/plugins/youtube_download/yt_dlp/extractor/afreecatv.py b/plugins/youtube_download/yt_dlp/extractor/afreecatv.py index 8085348..9276fe7 100644 --- a/plugins/youtube_download/yt_dlp/extractor/afreecatv.py +++ b/plugins/youtube_download/yt_dlp/extractor/afreecatv.py @@ -1,14 +1,12 @@ -# coding: utf-8 -from __future__ import unicode_literals - +import functools import re from .common import InfoExtractor -from ..compat import compat_xpath from ..utils import ( + ExtractorError, + OnDemandPagedList, date_from_str, determine_ext, - ExtractorError, int_or_none, qualities, traverse_obj, @@ -32,7 +30,7 @@ class AfreecaTVIE(InfoExtractor): /app/(?:index|read_ucc_bbs)\.cgi| /player/[Pp]layer\.(?:swf|html) )\?.*?\bnTitleNo=| - vod\.afreecatv\.com/PLAYER/STATION/ + vod\.afreecatv\.com/(PLAYER/STATION|player)/ ) (?P\d+) ''' @@ -170,6 +168,9 @@ class AfreecaTVIE(InfoExtractor): }, { 'url': 'http://vod.afreecatv.com/PLAYER/STATION/15055030', 'only_matching': True, + }, { + 'url': 'http://vod.afreecatv.com/player/15055030', + 'only_matching': True, }] @staticmethod @@ -181,14 +182,7 @@ class AfreecaTVIE(InfoExtractor): video_key['part'] = int(m.group('part')) return video_key - def _real_initialize(self): - self._login() - - def _login(self): - username, password = self._get_login_info() - if username is None: - return - + def _perform_login(self, username, password): login_form = { 'szWork': 'login', 'szType': 'json', @@ -284,7 +278,7 @@ class AfreecaTVIE(InfoExtractor): else: raise ExtractorError('Unable to download video info') - video_element = video_xml.findall(compat_xpath('./track/video'))[-1] + video_element = video_xml.findall('./track/video')[-1] if video_element is None or video_element.text is None: raise ExtractorError( 'Video %s does not exist' % video_id, expected=True) @@ -314,7 +308,7 @@ class AfreecaTVIE(InfoExtractor): if not video_url: entries = [] - file_elements = video_element.findall(compat_xpath('./file')) + file_elements = video_element.findall('./file') one = len(file_elements) == 1 for file_num, file_element in enumerate(file_elements, start=1): file_url = url_or_none(file_element.text) @@ -344,7 +338,6 @@ class AfreecaTVIE(InfoExtractor): }] if not formats and not self.get_param('ignore_no_formats'): continue - self._sort_formats(formats) file_info = common_entry.copy() file_info.update({ 'id': format_id, @@ -386,7 +379,7 @@ class AfreecaTVIE(InfoExtractor): return info -class AfreecaTVLiveIE(AfreecaTVIE): +class AfreecaTVLiveIE(AfreecaTVIE): # XXX: Do not subclass from concrete IE IE_NAME = 'afreecatv:live' _VALID_URL = r'https?://play\.afreeca(?:tv)?\.com/(?P[^/]+)(?:/(?P\d+))?' @@ -416,26 +409,35 @@ class AfreecaTVLiveIE(AfreecaTVIE): def _real_extract(self, url): broadcaster_id, broadcast_no = self._match_valid_url(url).group('id', 'bno') + password = self.get_param('videopassword') info = self._download_json(self._LIVE_API_URL, broadcaster_id, fatal=False, data=urlencode_postdata({'bid': broadcaster_id})) or {} channel_info = info.get('CHANNEL') or {} broadcaster_id = channel_info.get('BJID') or broadcaster_id broadcast_no = channel_info.get('BNO') or broadcast_no + password_protected = channel_info.get('BPWD') if not broadcast_no: raise ExtractorError(f'Unable to extract broadcast number ({broadcaster_id} may not be live)', expected=True) + if password_protected == 'Y' and password is None: + raise ExtractorError( + 'This livestream is protected by a password, use the --video-password option', + expected=True) formats = [] quality_key = qualities(self._QUALITIES) for quality_str in self._QUALITIES: + params = { + 'bno': broadcast_no, + 'stream_type': 'common', + 'type': 'aid', + 'quality': quality_str, + } + if password is not None: + params['pwd'] = password aid_response = self._download_json( self._LIVE_API_URL, broadcast_no, fatal=False, - data=urlencode_postdata({ - 'bno': broadcast_no, - 'stream_type': 'common', - 'type': 'aid', - 'quality': quality_str, - }), + data=urlencode_postdata(params), note=f'Downloading access token for {quality_str} stream', errnote=f'Unable to download access token for {quality_str} stream') aid = traverse_obj(aid_response, ('CHANNEL', 'AID')) @@ -461,8 +463,6 @@ class AfreecaTVLiveIE(AfreecaTVIE): 'quality': quality_key(quality_str), }) - self._sort_formats(formats) - station_info = self._download_json( 'https://st.afreecatv.com/api/get_station_status.php', broadcast_no, query={'szBjId': broadcaster_id}, fatal=False, @@ -477,3 +477,57 @@ class AfreecaTVLiveIE(AfreecaTVIE): 'formats': formats, 'is_live': True, } + + +class AfreecaTVUserIE(InfoExtractor): + IE_NAME = 'afreecatv:user' + _VALID_URL = r'https?://bj\.afreeca(?:tv)?\.com/(?P[^/]+)/vods/?(?P[^/]+)?' + _TESTS = [{ + 'url': 'https://bj.afreecatv.com/ryuryu24/vods/review', + 'info_dict': { + '_type': 'playlist', + 'id': 'ryuryu24', + 'title': 'ryuryu24 - review', + }, + 'playlist_count': 218, + }, { + 'url': 'https://bj.afreecatv.com/parang1995/vods/highlight', + 'info_dict': { + '_type': 'playlist', + 'id': 'parang1995', + 'title': 'parang1995 - highlight', + }, + 'playlist_count': 997, + }, { + 'url': 'https://bj.afreecatv.com/ryuryu24/vods', + 'info_dict': { + '_type': 'playlist', + 'id': 'ryuryu24', + 'title': 'ryuryu24 - all', + }, + 'playlist_count': 221, + }, { + 'url': 'https://bj.afreecatv.com/ryuryu24/vods/balloonclip', + 'info_dict': { + '_type': 'playlist', + 'id': 'ryuryu24', + 'title': 'ryuryu24 - balloonclip', + }, + 'playlist_count': 0, + }] + _PER_PAGE = 60 + + def _fetch_page(self, user_id, user_type, page): + page += 1 + info = self._download_json(f'https://bjapi.afreecatv.com/api/{user_id}/vods/{user_type}', user_id, + query={'page': page, 'per_page': self._PER_PAGE, 'orderby': 'reg_date'}, + note=f'Downloading {user_type} video page {page}') + for item in info['data']: + yield self.url_result( + f'https://vod.afreecatv.com/player/{item["title_no"]}/', AfreecaTVIE, item['title_no']) + + def _real_extract(self, url): + user_id, user_type = self._match_valid_url(url).group('id', 'slug_type') + user_type = user_type or 'all' + entries = OnDemandPagedList(functools.partial(self._fetch_page, user_id, user_type), self._PER_PAGE) + return self.playlist_result(entries, user_id, f'{user_id} - {user_type}') diff --git a/plugins/youtube_download/yt_dlp/extractor/agora.py b/plugins/youtube_download/yt_dlp/extractor/agora.py new file mode 100644 index 0000000..abb2d3f --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/agora.py @@ -0,0 +1,251 @@ +import functools +import uuid + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + OnDemandPagedList, + int_or_none, + month_by_name, + parse_duration, + try_call, +) + + +class WyborczaVideoIE(InfoExtractor): + # this id is not an article id, it has to be extracted from the article + _VALID_URL = r'(?:wyborcza:video:|https?://wyborcza\.pl/(?:api-)?video/)(?P\d+)' + IE_NAME = 'wyborcza:video' + _TESTS = [{ + 'url': 'wyborcza:video:26207634', + 'info_dict': { + 'id': '26207634', + 'ext': 'mp4', + 'title': '- Polska w 2020 r. jest innym państwem niż w 2015 r. Nie zmieniła się konstytucja, ale jest to już inny ustrój - mówi Adam Bodnar', + 'description': ' ', + 'uploader': 'Dorota Roman', + 'duration': 2474, + 'thumbnail': r're:https://.+\.jpg', + }, + }, { + 'url': 'https://wyborcza.pl/video/26207634', + 'only_matching': True, + }, { + 'url': 'https://wyborcza.pl/api-video/26207634', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + meta = self._download_json(f'https://wyborcza.pl/api-video/{video_id}', video_id) + + formats = [] + base_url = meta['redirector'].replace('http://', 'https://') + meta['basePath'] + for quality in ('standard', 'high'): + if not meta['files'].get(quality): + continue + formats.append({ + 'url': base_url + meta['files'][quality], + 'height': int_or_none( + self._search_regex( + r'p(\d+)[a-z]+\.mp4$', meta['files'][quality], + 'mp4 video height', default=None)), + 'format_id': quality, + }) + if meta['files'].get('dash'): + formats.extend(self._extract_mpd_formats(base_url + meta['files']['dash'], video_id)) + + return { + 'id': video_id, + 'formats': formats, + 'title': meta.get('title'), + 'description': meta.get('lead'), + 'uploader': meta.get('signature'), + 'thumbnail': meta.get('imageUrl'), + 'duration': meta.get('duration'), + } + + +class WyborczaPodcastIE(InfoExtractor): + _VALID_URL = r'''(?x) + https?://(?:www\.)?(?: + wyborcza\.pl/podcast(?:/0,172673\.html)?| + wysokieobcasy\.pl/wysokie-obcasy/0,176631\.html + )(?:\?(?:[^&#]+?&)*podcast=(?P\d+))? + ''' + _TESTS = [{ + 'url': 'https://wyborcza.pl/podcast/0,172673.html?podcast=100720#S.main_topic-K.C-B.6-L.1.podcast', + 'info_dict': { + 'id': '100720', + 'ext': 'mp3', + 'title': 'Cyfrodziewczyny. Kim były pionierki polskiej informatyki ', + 'uploader': 'Michał Nogaś ', + 'upload_date': '20210117', + 'description': 'md5:49f0a06ffc4c1931210d3ab1416a651d', + 'duration': 3684.0, + 'thumbnail': r're:https://.+\.jpg', + }, + }, { + 'url': 'https://www.wysokieobcasy.pl/wysokie-obcasy/0,176631.html?podcast=100673', + 'info_dict': { + 'id': '100673', + 'ext': 'mp3', + 'title': 'Czym jest ubóstwo menstruacyjne i dlaczego dotyczy każdej i każdego z nas?', + 'uploader': 'Agnieszka Urazińska ', + 'upload_date': '20210115', + 'description': 'md5:c161dc035f8dbb60077011fc41274899', + 'duration': 1803.0, + 'thumbnail': r're:https://.+\.jpg', + }, + }, { + 'url': 'https://wyborcza.pl/podcast', + 'info_dict': { + 'id': '334', + 'title': 'Gościnnie: Wyborcza, 8:10', + 'series': 'Gościnnie: Wyborcza, 8:10', + }, + 'playlist_mincount': 370, + }, { + 'url': 'https://www.wysokieobcasy.pl/wysokie-obcasy/0,176631.html', + 'info_dict': { + 'id': '395', + 'title': 'Gościnnie: Wysokie Obcasy', + 'series': 'Gościnnie: Wysokie Obcasy', + }, + 'playlist_mincount': 12, + }] + + def _real_extract(self, url): + podcast_id = self._match_id(url) + + if not podcast_id: # playlist + podcast_id = '395' if 'wysokieobcasy.pl/' in url else '334' + return self.url_result(TokFMAuditionIE._create_url(podcast_id), TokFMAuditionIE, podcast_id) + + meta = self._download_json('https://wyborcza.pl/api/podcast', podcast_id, + query={'guid': podcast_id, 'type': 'wo' if 'wysokieobcasy.pl/' in url else None}) + + day, month, year = self._search_regex(r'^(\d\d?) (\w+) (\d{4})$', meta.get('publishedDate'), + 'upload date', group=(1, 2, 3), default=(None, None, None)) + return { + 'id': podcast_id, + 'url': meta['url'], + 'title': meta.get('title'), + 'description': meta.get('description'), + 'thumbnail': meta.get('imageUrl'), + 'duration': parse_duration(meta.get('duration')), + 'uploader': meta.get('author'), + 'upload_date': try_call(lambda: f'{year}{month_by_name(month, lang="pl"):0>2}{day:0>2}'), + } + + +class TokFMPodcastIE(InfoExtractor): + _VALID_URL = r'(?:https?://audycje\.tokfm\.pl/podcast/|tokfm:podcast:)(?P\d+),?' + IE_NAME = 'tokfm:podcast' + _TESTS = [{ + 'url': 'https://audycje.tokfm.pl/podcast/91275,-Systemowy-rasizm-Czy-zamieszki-w-USA-po-morderstwie-w-Minneapolis-doprowadza-do-zmian-w-sluzbach-panstwowych', + 'info_dict': { + 'id': '91275', + 'ext': 'aac', + 'title': 'md5:a9b15488009065556900169fb8061cce', + 'episode': 'md5:a9b15488009065556900169fb8061cce', + 'series': 'Analizy', + }, + }] + + def _real_extract(self, url): + media_id = self._match_id(url) + + # in case it breaks see this but it returns a lot of useless data + # https://api.podcast.radioagora.pl/api4/getPodcasts?podcast_id=100091&with_guests=true&with_leaders_for_mobile=true + metadata = self._download_json( + f'https://audycje.tokfm.pl/getp/3{media_id}', media_id, 'Downloading podcast metadata') + if not metadata: + raise ExtractorError('No such podcast', expected=True) + metadata = metadata[0] + + formats = [] + for ext in ('aac', 'mp3'): + url_data = self._download_json( + f'https://api.podcast.radioagora.pl/api4/getSongUrl?podcast_id={media_id}&device_id={uuid.uuid4()}&ppre=false&audio={ext}', + media_id, 'Downloading podcast %s URL' % ext) + # prevents inserting the mp3 (default) multiple times + if 'link_ssl' in url_data and f'.{ext}' in url_data['link_ssl']: + formats.append({ + 'url': url_data['link_ssl'], + 'ext': ext, + 'vcodec': 'none', + 'acodec': ext, + }) + + return { + 'id': media_id, + 'formats': formats, + 'title': metadata.get('podcast_name'), + 'series': metadata.get('series_name'), + 'episode': metadata.get('podcast_name'), + } + + +class TokFMAuditionIE(InfoExtractor): + _VALID_URL = r'(?:https?://audycje\.tokfm\.pl/audycja/|tokfm:audition:)(?P\d+),?' + IE_NAME = 'tokfm:audition' + _TESTS = [{ + 'url': 'https://audycje.tokfm.pl/audycja/218,Analizy', + 'info_dict': { + 'id': '218', + 'title': 'Analizy', + 'series': 'Analizy', + }, + 'playlist_count': 1635, + }] + + _PAGE_SIZE = 30 + _HEADERS = { + 'User-Agent': 'Mozilla/5.0 (Linux; Android 9; Redmi 3S Build/PQ3A.190801.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/87.0.4280.101 Mobile Safari/537.36', + } + + @staticmethod + def _create_url(id): + return f'https://audycje.tokfm.pl/audycja/{id}' + + def _real_extract(self, url): + audition_id = self._match_id(url) + + data = self._download_json( + f'https://api.podcast.radioagora.pl/api4/getSeries?series_id={audition_id}', + audition_id, 'Downloading audition metadata', headers=self._HEADERS) + if not data: + raise ExtractorError('No such audition', expected=True) + data = data[0] + + entries = OnDemandPagedList(functools.partial( + self._fetch_page, audition_id, data), self._PAGE_SIZE) + + return { + '_type': 'playlist', + 'id': audition_id, + 'title': data.get('series_name'), + 'series': data.get('series_name'), + 'entries': entries, + } + + def _fetch_page(self, audition_id, data, page): + for retry in self.RetryManager(): + podcast_page = self._download_json( + f'https://api.podcast.radioagora.pl/api4/getPodcasts?series_id={audition_id}&limit=30&offset={page}&with_guests=true&with_leaders_for_mobile=true', + audition_id, f'Downloading podcast list page {page + 1}', headers=self._HEADERS) + if not podcast_page: + retry.error = ExtractorError('Agora returned empty page', expected=True) + + for podcast in podcast_page: + yield { + '_type': 'url_transparent', + 'url': podcast['podcast_sharing_url'], + 'ie_key': TokFMPodcastIE.ie_key(), + 'title': podcast.get('podcast_name'), + 'episode': podcast.get('podcast_name'), + 'description': podcast.get('podcast_description'), + 'timestamp': int_or_none(podcast.get('podcast_timestamp')), + 'series': data.get('series_name'), + } diff --git a/plugins/youtube_download/yt_dlp/extractor/airmozilla.py b/plugins/youtube_download/yt_dlp/extractor/airmozilla.py index 9e38136..669556b 100644 --- a/plugins/youtube_download/yt_dlp/extractor/airmozilla.py +++ b/plugins/youtube_download/yt_dlp/extractor/airmozilla.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor diff --git a/plugins/youtube_download/yt_dlp/extractor/airtv.py b/plugins/youtube_download/yt_dlp/extractor/airtv.py new file mode 100644 index 0000000..0b73a96 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/airtv.py @@ -0,0 +1,96 @@ +from .common import InfoExtractor +from .youtube import YoutubeIE +from ..utils import ( + determine_ext, + int_or_none, + mimetype2ext, + parse_iso8601, + traverse_obj +) + + +class AirTVIE(InfoExtractor): + _VALID_URL = r'https?://www\.air\.tv/watch\?v=(?P\w+)' + _TESTS = [{ + # without youtube_id + 'url': 'https://www.air.tv/watch?v=W87jcWleSn2hXZN47zJZsQ', + 'info_dict': { + 'id': 'W87jcWleSn2hXZN47zJZsQ', + 'ext': 'mp4', + 'release_date': '20221003', + 'release_timestamp': 1664792603, + 'channel_id': 'vgfManQlRQKgoFQ8i8peFQ', + 'title': 'md5:c12d49ed367c3dadaa67659aff43494c', + 'upload_date': '20221003', + 'duration': 151, + 'view_count': int, + 'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg', + 'timestamp': 1664792603, + } + }, { + # with youtube_id + 'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q', + 'info_dict': { + 'id': '2ZTqmpee-bQ', + 'ext': 'mp4', + 'comment_count': int, + 'tags': 'count:11', + 'channel_follower_count': int, + 'like_count': int, + 'uploader': 'Newsflare', + 'thumbnail': 'https://i.ytimg.com/vi_webp/2ZTqmpee-bQ/maxresdefault.webp', + 'availability': 'public', + 'title': 'Geese Chase Alligator Across Golf Course', + 'uploader_id': 'NewsflareBreaking', + 'channel_url': 'https://www.youtube.com/channel/UCzSSoloGEz10HALUAbYhngQ', + 'description': 'md5:99b21d9cea59330149efbd9706e208f5', + 'age_limit': 0, + 'channel_id': 'UCzSSoloGEz10HALUAbYhngQ', + 'uploader_url': 'http://www.youtube.com/user/NewsflareBreaking', + 'view_count': int, + 'categories': ['News & Politics'], + 'live_status': 'not_live', + 'playable_in_embed': True, + 'channel': 'Newsflare', + 'duration': 37, + 'upload_date': '20180511', + } + }] + + def _get_formats_and_subtitle(self, json_data, video_id): + formats, subtitles = [], {} + for source in traverse_obj(json_data, 'sources', 'sources_desktop', ...): + ext = determine_ext(source.get('src'), mimetype2ext(source.get('type'))) + if ext == 'm3u8': + fmts, subs = self._extract_m3u8_formats_and_subtitles(source.get('src'), video_id) + formats.extend(fmts) + self._merge_subtitles(subs, target=subtitles) + else: + formats.append({'url': source.get('src'), 'ext': ext}) + return formats, subtitles + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + nextjs_json = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['initialState']['videos'][display_id] + if nextjs_json.get('youtube_id'): + return self.url_result( + f'https://www.youtube.com/watch?v={nextjs_json.get("youtube_id")}', YoutubeIE) + + formats, subtitles = self._get_formats_and_subtitle(nextjs_json, display_id) + return { + 'id': display_id, + 'title': nextjs_json.get('title') or self._html_search_meta('og:title', webpage), + 'formats': formats, + 'subtitles': subtitles, + 'description': nextjs_json.get('description') or None, + 'duration': int_or_none(nextjs_json.get('duration')), + 'thumbnails': [ + {'url': thumbnail} + for thumbnail in traverse_obj(nextjs_json, ('default_thumbnails', ...))], + 'channel_id': traverse_obj(nextjs_json, 'channel', 'channel_slug'), + 'timestamp': parse_iso8601(nextjs_json.get('created')), + 'release_timestamp': parse_iso8601(nextjs_json.get('published')), + 'view_count': int_or_none(nextjs_json.get('views')), + } diff --git a/plugins/youtube_download/yt_dlp/extractor/aitube.py b/plugins/youtube_download/yt_dlp/extractor/aitube.py new file mode 100644 index 0000000..89a6450 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/aitube.py @@ -0,0 +1,60 @@ +from .common import InfoExtractor +from ..utils import int_or_none, merge_dicts + + +class AitubeKZVideoIE(InfoExtractor): + _VALID_URL = r'https?://aitube\.kz/(?:video|embed/)\?(?:[^\?]+)?id=(?P[\w-]+)' + _TESTS = [{ + # id paramater as first parameter + 'url': 'https://aitube.kz/video?id=9291d29b-c038-49a1-ad42-3da2051d353c&playlistId=d55b1f5f-ef2a-4f23-b646-2a86275b86b7&season=1', + 'info_dict': { + 'id': '9291d29b-c038-49a1-ad42-3da2051d353c', + 'ext': 'mp4', + 'duration': 2174.0, + 'channel_id': '94962f73-013b-432c-8853-1bd78ca860fe', + 'like_count': int, + 'channel': 'ASTANA TV', + 'comment_count': int, + 'view_count': int, + 'description': 'Смотреть любимые сериалы и видео, поделиться видео и сериалами с друзьями и близкими', + 'thumbnail': 'https://cdn.static02.aitube.kz/kz.aitudala.aitube.staticaccess/files/ddf2a2ff-bee3-409b-b5f2-2a8202bba75b', + 'upload_date': '20221102', + 'timestamp': 1667370519, + 'title': 'Ангел хранитель 1 серия', + 'channel_follower_count': int, + } + }, { + # embed url + 'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c', + 'only_matching': True, + }, { + # id parameter is not as first paramater + 'url': 'https://aitube.kz/video?season=1&id=9291d29b-c038-49a1-ad42-3da2051d353c&playlistId=d55b1f5f-ef2a-4f23-b646-2a86275b86b7', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + nextjs_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['videoInfo'] + json_ld_data = self._search_json_ld(webpage, video_id) + + formats, subtitles = self._extract_m3u8_formats_and_subtitles( + f'https://api-http.aitube.kz/kz.aitudala.aitube.staticaccess/video/{video_id}/video', video_id) + + return merge_dicts({ + 'id': video_id, + 'title': nextjs_data.get('title') or self._html_search_meta(['name', 'og:title'], webpage), + 'description': nextjs_data.get('description'), + 'formats': formats, + 'subtitles': subtitles, + 'view_count': (nextjs_data.get('viewCount') + or int_or_none(self._html_search_meta('ya:ovs:views_total', webpage))), + 'like_count': nextjs_data.get('likeCount'), + 'channel': nextjs_data.get('channelTitle'), + 'channel_id': nextjs_data.get('channelId'), + 'thumbnail': nextjs_data.get('coverUrl'), + 'comment_count': nextjs_data.get('commentCount'), + 'channel_follower_count': int_or_none(nextjs_data.get('channelSubscriberCount')), + }, json_ld_data) diff --git a/plugins/youtube_download/yt_dlp/extractor/aliexpress.py b/plugins/youtube_download/yt_dlp/extractor/aliexpress.py index 6f241e6..2e83f2e 100644 --- a/plugins/youtube_download/yt_dlp/extractor/aliexpress.py +++ b/plugins/youtube_download/yt_dlp/extractor/aliexpress.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..compat import compat_str from ..utils import ( @@ -18,7 +15,7 @@ class AliExpressLiveIE(InfoExtractor): 'id': '2800002704436634', 'ext': 'mp4', 'title': 'CASIMA7.22', - 'thumbnail': r're:http://.*\.jpg', + 'thumbnail': r're:https?://.*\.jpg', 'uploader': 'CASIMA Official Store', 'timestamp': 1500717600, 'upload_date': '20170722', diff --git a/plugins/youtube_download/yt_dlp/extractor/aljazeera.py b/plugins/youtube_download/yt_dlp/extractor/aljazeera.py index 7bcdb7a..124bab0 100644 --- a/plugins/youtube_download/yt_dlp/extractor/aljazeera.py +++ b/plugins/youtube_download/yt_dlp/extractor/aljazeera.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import json from .common import InfoExtractor diff --git a/plugins/youtube_download/yt_dlp/extractor/allocine.py b/plugins/youtube_download/yt_dlp/extractor/allocine.py index cd533ac..2d342cf 100644 --- a/plugins/youtube_download/yt_dlp/extractor/allocine.py +++ b/plugins/youtube_download/yt_dlp/extractor/allocine.py @@ -1,12 +1,10 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..compat import compat_str from ..utils import ( int_or_none, qualities, remove_end, + strip_or_none, try_get, unified_timestamp, url_basename, @@ -102,10 +100,7 @@ class AllocineIE(InfoExtractor): video_id = display_id media_data = self._download_json( 'http://www.allocine.fr/ws/AcVisiondataV5.ashx?media=%s' % video_id, display_id) - title = remove_end( - self._html_search_regex( - r'(?s)(.+?)', webpage, 'title').strip(), - ' - AlloCiné') + title = remove_end(strip_or_none(self._html_extract_title(webpage), ' - AlloCiné')) for key, value in media_data['video'].items(): if not key.endswith('Path'): continue @@ -117,8 +112,6 @@ class AllocineIE(InfoExtractor): }) duration, view_count, timestamp = [None] * 3 - self._sort_formats(formats) - return { 'id': video_id, 'display_id': display_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/alphaporno.py b/plugins/youtube_download/yt_dlp/extractor/alphaporno.py index 3a6d99f..8d5b472 100644 --- a/plugins/youtube_download/yt_dlp/extractor/alphaporno.py +++ b/plugins/youtube_download/yt_dlp/extractor/alphaporno.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import ( parse_iso8601, diff --git a/plugins/youtube_download/yt_dlp/extractor/alsace20tv.py b/plugins/youtube_download/yt_dlp/extractor/alsace20tv.py new file mode 100644 index 0000000..ea3332e --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/alsace20tv.py @@ -0,0 +1,83 @@ +from .common import InfoExtractor +from ..utils import ( + clean_html, + dict_get, + get_element_by_class, + int_or_none, + unified_strdate, + url_or_none, +) + + +class Alsace20TVBaseIE(InfoExtractor): + def _extract_video(self, video_id, url=None): + info = self._download_json( + 'https://www.alsace20.tv/visionneuse/visio_v9_js.php?key=%s&habillage=0&mode=html' % (video_id, ), + video_id) or {} + title = info.get('titre') + + formats = [] + for res, fmt_url in (info.get('files') or {}).items(): + formats.extend( + self._extract_smil_formats(fmt_url, video_id, fatal=False) + if '/smil:_' in fmt_url + else self._extract_mpd_formats(fmt_url, video_id, mpd_id=res, fatal=False)) + + webpage = (url and self._download_webpage(url, video_id, fatal=False)) or '' + thumbnail = url_or_none(dict_get(info, ('image', 'preview', )) or self._og_search_thumbnail(webpage)) + upload_date = self._search_regex(r'/(\d{6})_', thumbnail, 'upload_date', default=None) + upload_date = unified_strdate('20%s-%s-%s' % (upload_date[:2], upload_date[2:4], upload_date[4:])) if upload_date else None + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': clean_html(get_element_by_class('wysiwyg', webpage)), + 'upload_date': upload_date, + 'thumbnail': thumbnail, + 'duration': int_or_none(self._og_search_property('video:duration', webpage) if webpage else None), + 'view_count': int_or_none(info.get('nb_vues')), + } + + +class Alsace20TVIE(Alsace20TVBaseIE): + _VALID_URL = r'https?://(?:www\.)?alsace20\.tv/(?:[\w-]+/)+[\w-]+-(?P[\w]+)' + _TESTS = [{ + 'url': 'https://www.alsace20.tv/VOD/Actu/JT/Votre-JT-jeudi-3-fevrier-lyNHCXpYJh.html', + 'info_dict': { + 'id': 'lyNHCXpYJh', + 'ext': 'mp4', + 'description': 'md5:fc0bc4a0692d3d2dba4524053de4c7b7', + 'title': 'Votre JT du jeudi 3 février', + 'upload_date': '20220203', + 'thumbnail': r're:https?://.+\.jpg', + 'duration': 1073, + 'view_count': int, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + return self._extract_video(video_id, url) + + +class Alsace20TVEmbedIE(Alsace20TVBaseIE): + _VALID_URL = r'https?://(?:www\.)?alsace20\.tv/emb/(?P[\w]+)' + _TESTS = [{ + 'url': 'https://www.alsace20.tv/emb/lyNHCXpYJh', + # 'md5': 'd91851bf9af73c0ad9b2cdf76c127fbb', + 'info_dict': { + 'id': 'lyNHCXpYJh', + 'ext': 'mp4', + 'title': 'Votre JT du jeudi 3 février', + 'upload_date': '20220203', + 'thumbnail': r're:https?://.+\.jpg', + 'view_count': int, + }, + 'params': { + 'format': 'bestvideo', + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + return self._extract_video(video_id) diff --git a/plugins/youtube_download/yt_dlp/extractor/alura.py b/plugins/youtube_download/yt_dlp/extractor/alura.py index f5325de..bfe066b 100644 --- a/plugins/youtube_download/yt_dlp/extractor/alura.py +++ b/plugins/youtube_download/yt_dlp/extractor/alura.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor @@ -66,22 +63,13 @@ class AluraIE(InfoExtractor): f['height'] = int('720' if m.group('res') == 'hd' else '480') formats.extend(video_format) - self._sort_formats(formats) - return { 'id': video_id, 'title': video_title, "formats": formats } - def _real_initialize(self): - self._login() - - def _login(self): - username, password = self._get_login_info() - if username is None: - return - pass + def _perform_login(self, username, password): login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading login popup') @@ -123,7 +111,7 @@ class AluraIE(InfoExtractor): raise ExtractorError('Unable to log in') -class AluraCourseIE(AluraIE): +class AluraCourseIE(AluraIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'https?://(?:cursos\.)?alura\.com\.br/course/(?P[^/]+)' _LOGIN_URL = 'https://cursos.alura.com.br/loginForm?urlAfterLogin=/loginForm' diff --git a/plugins/youtube_download/yt_dlp/extractor/amara.py b/plugins/youtube_download/yt_dlp/extractor/amara.py index 61d4695..5018710 100644 --- a/plugins/youtube_download/yt_dlp/extractor/amara.py +++ b/plugins/youtube_download/yt_dlp/extractor/amara.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from .youtube import YoutubeIE from .vimeo import VimeoIE diff --git a/plugins/youtube_download/yt_dlp/extractor/amazon.py b/plugins/youtube_download/yt_dlp/extractor/amazon.py index 07b1b18..a03f983 100644 --- a/plugins/youtube_download/yt_dlp/extractor/amazon.py +++ b/plugins/youtube_download/yt_dlp/extractor/amazon.py @@ -1,6 +1,17 @@ -# coding: utf-8 +import re + from .common import InfoExtractor -from ..utils import int_or_none +from ..utils import ( + ExtractorError, + clean_html, + float_or_none, + get_element_by_attribute, + get_element_by_class, + int_or_none, + js_to_json, + traverse_obj, + url_or_none, +) class AmazonStoreIE(InfoExtractor): @@ -10,7 +21,7 @@ class AmazonStoreIE(InfoExtractor): 'url': 'https://www.amazon.co.uk/dp/B098XNCHLD/', 'info_dict': { 'id': 'B098XNCHLD', - 'title': 'md5:5f3194dbf75a8dcfc83079bd63a2abed', + 'title': str, }, 'playlist_mincount': 1, 'playlist': [{ @@ -19,28 +30,48 @@ class AmazonStoreIE(InfoExtractor): 'ext': 'mp4', 'title': 'mcdodo usb c cable 100W 5a', 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 34, }, - }] + }], + 'expected_warnings': ['Unable to extract data'], }, { 'url': 'https://www.amazon.in/Sony-WH-1000XM4-Cancelling-Headphones-Bluetooth/dp/B0863TXGM3', 'info_dict': { 'id': 'B0863TXGM3', - 'title': 'md5:b0bde4881d3cfd40d63af19f7898b8ff', + 'title': str, }, 'playlist_mincount': 4, + 'expected_warnings': ['Unable to extract data'], }, { 'url': 'https://www.amazon.com/dp/B0845NXCXF/', 'info_dict': { 'id': 'B0845NXCXF', - 'title': 'md5:2145cd4e3c7782f1ee73649a3cff1171', + 'title': str, }, 'playlist-mincount': 1, + 'expected_warnings': ['Unable to extract data'], + }, { + 'url': 'https://www.amazon.es/Samsung-Smartphone-s-AMOLED-Quad-c%C3%A1mara-espa%C3%B1ola/dp/B08WX337PQ', + 'info_dict': { + 'id': 'B08WX337PQ', + 'title': str, + }, + 'playlist_mincount': 1, + 'expected_warnings': ['Unable to extract data'], }] def _real_extract(self, url): id = self._match_id(url) - webpage = self._download_webpage(url, id) - data_json = self._parse_json(self._html_search_regex(r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'(.*)\'\)', webpage, 'data'), id) + + for retry in self.RetryManager(): + webpage = self._download_webpage(url, id) + try: + data_json = self._search_json( + r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'', webpage, 'data', id, + transform_source=js_to_json) + except ExtractorError as e: + retry.error = e + entries = [{ 'id': video['marketPlaceID'], 'url': video['url'], @@ -50,4 +81,90 @@ class AmazonStoreIE(InfoExtractor): 'height': int_or_none(video.get('videoHeight')), 'width': int_or_none(video.get('videoWidth')), } for video in (data_json.get('videos') or []) if video.get('isVideo') and video.get('url')] - return self.playlist_result(entries, playlist_id=id, playlist_title=data_json['title']) + return self.playlist_result(entries, playlist_id=id, playlist_title=data_json.get('title')) + + +class AmazonReviewsIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/gp/customer-reviews/(?P[^/&#$?]+)' + _TESTS = [{ + 'url': 'https://www.amazon.com/gp/customer-reviews/R10VE9VUSY19L3/ref=cm_cr_arp_d_rvw_ttl', + 'info_dict': { + 'id': 'R10VE9VUSY19L3', + 'ext': 'mp4', + 'title': 'Get squad #Suspicious', + 'description': 'md5:7012695052f440a1e064e402d87e0afb', + 'uploader': 'Kimberly Cronkright', + 'average_rating': 1.0, + 'thumbnail': r're:^https?://.*\.jpg$', + }, + 'expected_warnings': ['Review body was not found in webpage'], + }, { + 'url': 'https://www.amazon.com/gp/customer-reviews/R10VE9VUSY19L3/ref=cm_cr_arp_d_rvw_ttl?language=es_US', + 'info_dict': { + 'id': 'R10VE9VUSY19L3', + 'ext': 'mp4', + 'title': 'Get squad #Suspicious', + 'description': 'md5:7012695052f440a1e064e402d87e0afb', + 'uploader': 'Kimberly Cronkright', + 'average_rating': 1.0, + 'thumbnail': r're:^https?://.*\.jpg$', + }, + 'expected_warnings': ['Review body was not found in webpage'], + }, { + 'url': 'https://www.amazon.in/gp/customer-reviews/RV1CO8JN5VGXV/', + 'info_dict': { + 'id': 'RV1CO8JN5VGXV', + 'ext': 'mp4', + 'title': 'Not sure about its durability', + 'description': 'md5:1a252c106357f0a3109ebf37d2e87494', + 'uploader': 'Shoaib Gulzar', + 'average_rating': 2.0, + 'thumbnail': r're:^https?://.*\.jpg$', + }, + 'expected_warnings': ['Review body was not found in webpage'], + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + for retry in self.RetryManager(): + webpage = self._download_webpage(url, video_id) + review_body = get_element_by_attribute('data-hook', 'review-body', webpage) + if not review_body: + retry.error = ExtractorError('Review body was not found in webpage', expected=True) + + formats, subtitles = [], {} + + manifest_url = self._search_regex( + r'data-video-url="([^"]+)"', review_body, 'm3u8 url', default=None) + if url_or_none(manifest_url): + fmts, subtitles = self._extract_m3u8_formats_and_subtitles( + manifest_url, video_id, 'mp4', fatal=False) + formats.extend(fmts) + + video_url = self._search_regex( + r']+\bvalue="([^"]+)"[^>]+\bclass="video-url"', review_body, 'mp4 url', default=None) + if url_or_none(video_url): + formats.append({ + 'url': video_url, + 'ext': 'mp4', + 'format_id': 'http-mp4', + }) + + if not formats: + self.raise_no_formats('No video found for this customer review', expected=True) + + return { + 'id': video_id, + 'title': (clean_html(get_element_by_attribute('data-hook', 'review-title', webpage)) + or self._html_extract_title(webpage)), + 'description': clean_html(traverse_obj(re.findall( + r'(.+?)', review_body), -1)), + 'uploader': clean_html(get_element_by_class('a-profile-name', webpage)), + 'average_rating': float_or_none(clean_html(get_element_by_attribute( + 'data-hook', 'review-star-rating', webpage) or '').partition(' ')[0]), + 'thumbnail': self._search_regex( + r'data-thumbnail-url="([^"]+)"', review_body, 'thumbnail', default=None), + 'formats': formats, + 'subtitles': subtitles, + } diff --git a/plugins/youtube_download/yt_dlp/extractor/amazonminitv.py b/plugins/youtube_download/yt_dlp/extractor/amazonminitv.py new file mode 100644 index 0000000..7309968 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/amazonminitv.py @@ -0,0 +1,290 @@ +import json + +from .common import InfoExtractor +from ..utils import ExtractorError, int_or_none, traverse_obj, try_get + + +class AmazonMiniTVBaseIE(InfoExtractor): + def _real_initialize(self): + self._download_webpage( + 'https://www.amazon.in/minitv', None, + note='Fetching guest session cookies') + AmazonMiniTVBaseIE.session_id = self._get_cookies('https://www.amazon.in')['session-id'].value + + def _call_api(self, asin, data=None, note=None): + device = {'clientId': 'ATVIN', 'deviceLocale': 'en_GB'} + if data: + data['variables'].update({ + 'contentType': 'VOD', + 'sessionIdToken': self.session_id, + **device, + }) + + resp = self._download_json( + f'https://www.amazon.in/minitv/api/web/{"graphql" if data else "prs"}', + asin, note=note, headers={'Content-Type': 'application/json'}, + data=json.dumps(data).encode() if data else None, + query=None if data else { + 'deviceType': 'A1WMMUXPCUJL4N', + 'contentId': asin, + **device, + }) + + if resp.get('errors'): + raise ExtractorError(f'MiniTV said: {resp["errors"][0]["message"]}') + elif not data: + return resp + return resp['data'][data['operationName']] + + +class AmazonMiniTVIE(AmazonMiniTVBaseIE): + _VALID_URL = r'(?:https?://(?:www\.)?amazon\.in/minitv/tp/|amazonminitv:(?:amzn1\.dv\.gti\.)?)(?P[a-f0-9-]+)' + _TESTS = [{ + 'url': 'https://www.amazon.in/minitv/tp/75fe3a75-b8fe-4499-8100-5c9424344840?referrer=https%3A%2F%2Fwww.amazon.in%2Fminitv', + 'info_dict': { + 'id': 'amzn1.dv.gti.75fe3a75-b8fe-4499-8100-5c9424344840', + 'ext': 'mp4', + 'title': 'May I Kiss You?', + 'language': 'Hindi', + 'thumbnail': r're:^https?://.*\.jpg$', + 'description': 'md5:a549bfc747973e04feb707833474e59d', + 'release_timestamp': 1644710400, + 'release_date': '20220213', + 'duration': 846, + 'chapters': 'count:2', + 'series': 'Couple Goals', + 'series_id': 'amzn1.dv.gti.56521d46-b040-4fd5-872e-3e70476a04b0', + 'season': 'Season 3', + 'season_number': 3, + 'season_id': 'amzn1.dv.gti.20331016-d9b9-4968-b991-c89fa4927a36', + 'episode': 'May I Kiss You?', + 'episode_number': 2, + 'episode_id': 'amzn1.dv.gti.75fe3a75-b8fe-4499-8100-5c9424344840', + }, + }, { + 'url': 'https://www.amazon.in/minitv/tp/280d2564-584f-452f-9c98-7baf906e01ab?referrer=https%3A%2F%2Fwww.amazon.in%2Fminitv', + 'info_dict': { + 'id': 'amzn1.dv.gti.280d2564-584f-452f-9c98-7baf906e01ab', + 'ext': 'mp4', + 'title': 'Jahaan', + 'language': 'Hindi', + 'thumbnail': r're:^https?://.*\.jpg', + 'description': 'md5:05eb765a77bf703f322f120ec6867339', + 'release_timestamp': 1647475200, + 'release_date': '20220317', + 'duration': 783, + 'chapters': [], + }, + }, { + 'url': 'https://www.amazon.in/minitv/tp/280d2564-584f-452f-9c98-7baf906e01ab', + 'only_matching': True, + }, { + 'url': 'amazonminitv:amzn1.dv.gti.280d2564-584f-452f-9c98-7baf906e01ab', + 'only_matching': True, + }, { + 'url': 'amazonminitv:280d2564-584f-452f-9c98-7baf906e01ab', + 'only_matching': True, + }] + + _GRAPHQL_QUERY_CONTENT = ''' +query content($sessionIdToken: String!, $deviceLocale: String, $contentId: ID!, $contentType: ContentType!, $clientId: String) { + content( + applicationContextInput: {deviceLocale: $deviceLocale, sessionIdToken: $sessionIdToken, clientId: $clientId} + contentId: $contentId + contentType: $contentType + ) { + contentId + name + ... on Episode { + contentId + vodType + name + images + description { + synopsis + contentLengthInSeconds + } + publicReleaseDateUTC + audioTracks + seasonId + seriesId + seriesName + seasonNumber + episodeNumber + timecode { + endCreditsTime + } + } + ... on MovieContent { + contentId + vodType + name + description { + synopsis + contentLengthInSeconds + } + images + publicReleaseDateUTC + audioTracks + } + } +}''' + + def _real_extract(self, url): + asin = f'amzn1.dv.gti.{self._match_id(url)}' + prs = self._call_api(asin, note='Downloading playback info') + + formats, subtitles = [], {} + for type_, asset in prs['playbackAssets'].items(): + if not traverse_obj(asset, 'manifestUrl'): + continue + if type_ == 'hls': + m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles( + asset['manifestUrl'], asin, ext='mp4', entry_protocol='m3u8_native', + m3u8_id=type_, fatal=False) + formats.extend(m3u8_fmts) + subtitles = self._merge_subtitles(subtitles, m3u8_subs) + elif type_ == 'dash': + mpd_fmts, mpd_subs = self._extract_mpd_formats_and_subtitles( + asset['manifestUrl'], asin, mpd_id=type_, fatal=False) + formats.extend(mpd_fmts) + subtitles = self._merge_subtitles(subtitles, mpd_subs) + else: + self.report_warning(f'Unknown asset type: {type_}') + + title_info = self._call_api( + asin, note='Downloading title info', data={ + 'operationName': 'content', + 'variables': {'contentId': asin}, + 'query': self._GRAPHQL_QUERY_CONTENT, + }) + credits_time = try_get(title_info, lambda x: x['timecode']['endCreditsTime'] / 1000) + is_episode = title_info.get('vodType') == 'EPISODE' + + return { + 'id': asin, + 'title': title_info.get('name'), + 'formats': formats, + 'subtitles': subtitles, + 'language': traverse_obj(title_info, ('audioTracks', 0)), + 'thumbnails': [{ + 'id': type_, + 'url': url, + } for type_, url in (title_info.get('images') or {}).items()], + 'description': traverse_obj(title_info, ('description', 'synopsis')), + 'release_timestamp': int_or_none(try_get(title_info, lambda x: x['publicReleaseDateUTC'] / 1000)), + 'duration': traverse_obj(title_info, ('description', 'contentLengthInSeconds')), + 'chapters': [{ + 'start_time': credits_time, + 'title': 'End Credits', + }] if credits_time else [], + 'series': title_info.get('seriesName'), + 'series_id': title_info.get('seriesId'), + 'season_number': title_info.get('seasonNumber'), + 'season_id': title_info.get('seasonId'), + 'episode': title_info.get('name') if is_episode else None, + 'episode_number': title_info.get('episodeNumber'), + 'episode_id': asin if is_episode else None, + } + + +class AmazonMiniTVSeasonIE(AmazonMiniTVBaseIE): + IE_NAME = 'amazonminitv:season' + _VALID_URL = r'amazonminitv:season:(?:amzn1\.dv\.gti\.)?(?P[a-f0-9-]+)' + IE_DESC = 'Amazon MiniTV Series, "minitv:season:" prefix' + _TESTS = [{ + 'url': 'amazonminitv:season:amzn1.dv.gti.0aa996eb-6a1b-4886-a342-387fbd2f1db0', + 'playlist_mincount': 6, + 'info_dict': { + 'id': 'amzn1.dv.gti.0aa996eb-6a1b-4886-a342-387fbd2f1db0', + }, + }, { + 'url': 'amazonminitv:season:0aa996eb-6a1b-4886-a342-387fbd2f1db0', + 'only_matching': True, + }] + + _GRAPHQL_QUERY = ''' +query getEpisodes($sessionIdToken: String!, $clientId: String, $episodeOrSeasonId: ID!, $deviceLocale: String) { + getEpisodes( + applicationContextInput: {sessionIdToken: $sessionIdToken, deviceLocale: $deviceLocale, clientId: $clientId} + episodeOrSeasonId: $episodeOrSeasonId + ) { + episodes { + ... on Episode { + contentId + name + images + seriesName + seasonId + seriesId + seasonNumber + episodeNumber + description { + synopsis + contentLengthInSeconds + } + publicReleaseDateUTC + } + } + } +} +''' + + def _entries(self, asin): + season_info = self._call_api( + asin, note='Downloading season info', data={ + 'operationName': 'getEpisodes', + 'variables': {'episodeOrSeasonId': asin}, + 'query': self._GRAPHQL_QUERY, + }) + + for episode in season_info['episodes']: + yield self.url_result( + f'amazonminitv:{episode["contentId"]}', AmazonMiniTVIE, episode['contentId']) + + def _real_extract(self, url): + asin = f'amzn1.dv.gti.{self._match_id(url)}' + return self.playlist_result(self._entries(asin), asin) + + +class AmazonMiniTVSeriesIE(AmazonMiniTVBaseIE): + IE_NAME = 'amazonminitv:series' + _VALID_URL = r'amazonminitv:series:(?:amzn1\.dv\.gti\.)?(?P[a-f0-9-]+)' + _TESTS = [{ + 'url': 'amazonminitv:series:amzn1.dv.gti.56521d46-b040-4fd5-872e-3e70476a04b0', + 'playlist_mincount': 3, + 'info_dict': { + 'id': 'amzn1.dv.gti.56521d46-b040-4fd5-872e-3e70476a04b0', + }, + }, { + 'url': 'amazonminitv:series:56521d46-b040-4fd5-872e-3e70476a04b0', + 'only_matching': True, + }] + + _GRAPHQL_QUERY = ''' +query getSeasons($sessionIdToken: String!, $deviceLocale: String, $episodeOrSeasonOrSeriesId: ID!, $clientId: String) { + getSeasons( + applicationContextInput: {deviceLocale: $deviceLocale, sessionIdToken: $sessionIdToken, clientId: $clientId} + episodeOrSeasonOrSeriesId: $episodeOrSeasonOrSeriesId + ) { + seasons { + seasonId + } + } +} +''' + + def _entries(self, asin): + season_info = self._call_api( + asin, note='Downloading series info', data={ + 'operationName': 'getSeasons', + 'variables': {'episodeOrSeasonOrSeriesId': asin}, + 'query': self._GRAPHQL_QUERY, + }) + + for season in season_info['seasons']: + yield self.url_result(f'amazonminitv:season:{season["seasonId"]}', AmazonMiniTVSeasonIE, season['seasonId']) + + def _real_extract(self, url): + asin = f'amzn1.dv.gti.{self._match_id(url)}' + return self.playlist_result(self._entries(asin), asin) diff --git a/plugins/youtube_download/yt_dlp/extractor/amcnetworks.py b/plugins/youtube_download/yt_dlp/extractor/amcnetworks.py index e38e215..c58bc7b 100644 --- a/plugins/youtube_download/yt_dlp/extractor/amcnetworks.py +++ b/plugins/youtube_download/yt_dlp/extractor/amcnetworks.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .theplatform import ThePlatformIE @@ -12,7 +9,7 @@ from ..utils import ( ) -class AMCNetworksIE(ThePlatformIE): +class AMCNetworksIE(ThePlatformIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'https?://(?:www\.)?(?Pamc|bbcamerica|ifc|(?:we|sundance)tv)\.com/(?P(?:movies|shows(?:/[^/]+)+)/[^/?#&]+)' _TESTS = [{ 'url': 'https://www.bbcamerica.com/shows/the-graham-norton-show/videos/tina-feys-adorable-airline-themed-family-dinner--51631', @@ -109,7 +106,6 @@ class AMCNetworksIE(ThePlatformIE): media_url = update_url_query(media_url, query) formats, subtitles = self._extract_theplatform_smil( media_url, video_id) - self._sort_formats(formats) thumbnails = [] thumbnail_urls = [properties.get('imageDesktop')] diff --git a/plugins/youtube_download/yt_dlp/extractor/americastestkitchen.py b/plugins/youtube_download/yt_dlp/extractor/americastestkitchen.py index 6e6099a..abda55d 100644 --- a/plugins/youtube_download/yt_dlp/extractor/americastestkitchen.py +++ b/plugins/youtube_download/yt_dlp/extractor/americastestkitchen.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import json from .common import InfoExtractor @@ -14,7 +11,7 @@ from ..utils import ( class AmericasTestKitchenIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?:americastestkitchen|cooks(?:country|illustrated))\.com/(?Pepisode|videos)/(?P\d+)' + _VALID_URL = r'https?://(?:www\.)?americastestkitchen\.com/(?:cooks(?:country|illustrated)/)?(?Pepisode|videos)/(?P\d+)' _TESTS = [{ 'url': 'https://www.americastestkitchen.com/episode/582-weeknight-japanese-suppers', 'md5': 'b861c3e365ac38ad319cfd509c30577f', @@ -22,15 +19,20 @@ class AmericasTestKitchenIE(InfoExtractor): 'id': '5b400b9ee338f922cb06450c', 'title': 'Japanese Suppers', 'ext': 'mp4', + 'display_id': 'weeknight-japanese-suppers', 'description': 'md5:64e606bfee910627efc4b5f050de92b3', - 'thumbnail': r're:^https?://', - 'timestamp': 1523318400, - 'upload_date': '20180410', - 'release_date': '20180410', - 'series': "America's Test Kitchen", - 'season_number': 18, + 'timestamp': 1523304000, + 'upload_date': '20180409', + 'release_date': '20180409', + 'series': 'America\'s Test Kitchen', + 'season': 'Season 18', 'episode': 'Japanese Suppers', + 'season_number': 18, 'episode_number': 15, + 'duration': 1376, + 'thumbnail': r're:^https?://', + 'average_rating': 0, + 'view_count': int, }, 'params': { 'skip_download': True, @@ -43,15 +45,20 @@ class AmericasTestKitchenIE(InfoExtractor): 'id': '5fbe8c61bda2010001c6763b', 'title': 'Simple Chicken Dinner', 'ext': 'mp4', + 'display_id': 'atktv_2103_simple-chicken-dinner_full-episode_web-mp4', 'description': 'md5:eb68737cc2fd4c26ca7db30139d109e7', - 'thumbnail': r're:^https?://', - 'timestamp': 1610755200, - 'upload_date': '20210116', - 'release_date': '20210116', - 'series': "America's Test Kitchen", - 'season_number': 21, + 'timestamp': 1610737200, + 'upload_date': '20210115', + 'release_date': '20210115', + 'series': 'America\'s Test Kitchen', + 'season': 'Season 21', 'episode': 'Simple Chicken Dinner', + 'season_number': 21, 'episode_number': 3, + 'duration': 1397, + 'thumbnail': r're:^https?://', + 'view_count': int, + 'average_rating': 0, }, 'params': { 'skip_download': True, @@ -60,10 +67,10 @@ class AmericasTestKitchenIE(InfoExtractor): 'url': 'https://www.americastestkitchen.com/videos/3420-pan-seared-salmon', 'only_matching': True, }, { - 'url': 'https://www.cookscountry.com/episode/564-when-only-chocolate-will-do', + 'url': 'https://www.americastestkitchen.com/cookscountry/episode/564-when-only-chocolate-will-do', 'only_matching': True, }, { - 'url': 'https://www.cooksillustrated.com/videos/4478-beef-wellington', + 'url': 'https://www.americastestkitchen.com/cooksillustrated/videos/4478-beef-wellington', 'only_matching': True, }] @@ -93,7 +100,7 @@ class AmericasTestKitchenIE(InfoExtractor): class AmericasTestKitchenSeasonIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?Pamericastestkitchen|cookscountry)\.com/episodes/browse/season_(?P\d+)' + _VALID_URL = r'https?://(?:www\.)?americastestkitchen\.com(?P/cookscountry)?/episodes/browse/season_(?P\d+)' _TESTS = [{ # ATK Season 'url': 'https://www.americastestkitchen.com/episodes/browse/season_1', @@ -104,7 +111,7 @@ class AmericasTestKitchenSeasonIE(InfoExtractor): 'playlist_count': 13, }, { # Cooks Country Season - 'url': 'https://www.cookscountry.com/episodes/browse/season_12', + 'url': 'https://www.americastestkitchen.com/cookscountry/episodes/browse/season_12', 'info_dict': { 'id': 'season_12', 'title': 'Season 12', @@ -113,17 +120,17 @@ class AmericasTestKitchenSeasonIE(InfoExtractor): }] def _real_extract(self, url): - show_name, season_number = self._match_valid_url(url).groups() + show_path, season_number = self._match_valid_url(url).group('show', 'id') season_number = int(season_number) - slug = 'atk' if show_name == 'americastestkitchen' else 'cco' + slug = 'cco' if show_path == '/cookscountry' else 'atk' season = 'Season %d' % season_number season_search = self._download_json( 'https://y1fnzxui30-dsn.algolia.net/1/indexes/everest_search_%s_season_desc_production' % slug, season, headers={ - 'Origin': 'https://www.%s.com' % show_name, + 'Origin': 'https://www.americastestkitchen.com', 'X-Algolia-API-Key': '8d504d0099ed27c1b73708d22871d805', 'X-Algolia-Application-Id': 'Y1FNZXUI30', }, query={ @@ -139,12 +146,12 @@ class AmericasTestKitchenSeasonIE(InfoExtractor): def entries(): for episode in (season_search.get('hits') or []): - search_url = episode.get('search_url') + search_url = episode.get('search_url') # always formatted like '/episode/123-title-of-episode' if not search_url: continue yield { '_type': 'url', - 'url': 'https://www.%s.com%s' % (show_name, search_url), + 'url': f'https://www.americastestkitchen.com{show_path or ""}{search_url}', 'id': try_get(episode, lambda e: e['objectID'].split('_')[-1]), 'title': episode.get('title'), 'description': episode.get('description'), diff --git a/plugins/youtube_download/yt_dlp/extractor/amp.py b/plugins/youtube_download/yt_dlp/extractor/amp.py index 24c684c..b0cbd77 100644 --- a/plugins/youtube_download/yt_dlp/extractor/amp.py +++ b/plugins/youtube_download/yt_dlp/extractor/amp.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import ( determine_ext, @@ -13,7 +10,7 @@ from ..utils import ( ) -class AMPIE(InfoExtractor): +class AMPIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor # parse Akamai Adaptive Media Player feed def _extract_feed_info(self, url): feed = self._download_json( @@ -87,8 +84,6 @@ class AMPIE(InfoExtractor): 'ext': ext, }) - self._sort_formats(formats) - timestamp = unified_timestamp(item.get('pubDate'), ' ') or parse_iso8601(item.get('dc-date')) return { diff --git a/plugins/youtube_download/yt_dlp/extractor/angel.py b/plugins/youtube_download/yt_dlp/extractor/angel.py new file mode 100644 index 0000000..306b365 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/angel.py @@ -0,0 +1,56 @@ +import re + +from .common import InfoExtractor +from ..utils import url_or_none, merge_dicts + + +class AngelIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?angel\.com/watch/(?P[^/?#]+)/episode/(?P[\w-]+)/season-(?P\d+)/episode-(?P\d+)/(?P[^/?#]+)' + _TESTS = [{ + 'url': 'https://www.angel.com/watch/tuttle-twins/episode/2f3d0382-ea82-4cdc-958e-84fbadadc710/season-1/episode-1/when-laws-give-you-lemons', + 'md5': '4734e5cfdd64a568e837246aa3eaa524', + 'info_dict': { + 'id': '2f3d0382-ea82-4cdc-958e-84fbadadc710', + 'ext': 'mp4', + 'title': 'Tuttle Twins Season 1, Episode 1: When Laws Give You Lemons', + 'description': 'md5:73b704897c20ab59c433a9c0a8202d5e', + 'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$', + 'duration': 1359.0 + } + }, { + 'url': 'https://www.angel.com/watch/the-chosen/episode/8dfb714d-bca5-4812-8125-24fb9514cd10/season-1/episode-1/i-have-called-you-by-name', + 'md5': 'e4774bad0a5f0ad2e90d175cafdb797d', + 'info_dict': { + 'id': '8dfb714d-bca5-4812-8125-24fb9514cd10', + 'ext': 'mp4', + 'title': 'The Chosen Season 1, Episode 1: I Have Called You By Name', + 'description': 'md5:aadfb4827a94415de5ff6426e6dee3be', + 'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$', + 'duration': 3276.0 + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + json_ld = self._search_json_ld(webpage, video_id) + + formats, subtitles = self._extract_m3u8_formats_and_subtitles( + json_ld.pop('url'), video_id, note='Downloading HD m3u8 information') + + info_dict = { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'description': self._og_search_description(webpage), + 'formats': formats, + 'subtitles': subtitles + } + + # Angel uses cloudinary in the background and supports image transformations. + # We remove these transformations and return the source file + base_thumbnail_url = url_or_none(self._og_search_thumbnail(webpage)) or json_ld.pop('thumbnails') + if base_thumbnail_url: + info_dict['thumbnail'] = re.sub(r'(/upload)/.+(/angel-app/.+)$', r'\1\2', base_thumbnail_url) + + return merge_dicts(info_dict, json_ld) diff --git a/plugins/youtube_download/yt_dlp/extractor/animelab.py b/plugins/youtube_download/yt_dlp/extractor/animelab.py deleted file mode 100644 index 4fb7ee4..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/animelab.py +++ /dev/null @@ -1,285 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -from .common import InfoExtractor - -from ..utils import ( - ExtractorError, - urlencode_postdata, - int_or_none, - str_or_none, - determine_ext, -) - -from ..compat import compat_HTTPError - - -class AnimeLabBaseIE(InfoExtractor): - _LOGIN_REQUIRED = True - _LOGIN_URL = 'https://www.animelab.com/login' - _NETRC_MACHINE = 'animelab' - - def _login(self): - def is_logged_in(login_webpage): - return 'Sign In' not in login_webpage - - login_page = self._download_webpage( - self._LOGIN_URL, None, 'Downloading login page') - - # Check if already logged in - if is_logged_in(login_page): - return - - (username, password) = self._get_login_info() - if username is None and self._LOGIN_REQUIRED: - self.raise_login_required('Login is required to access any AnimeLab content') - - login_form = { - 'email': username, - 'password': password, - } - - try: - response = self._download_webpage( - self._LOGIN_URL, None, 'Logging in', 'Wrong login info', - data=urlencode_postdata(login_form), - headers={'Content-Type': 'application/x-www-form-urlencoded'}) - except ExtractorError as e: - if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400: - raise ExtractorError('Unable to log in (wrong credentials?)', expected=True) - else: - raise - - # if login was successful - if is_logged_in(response): - return - - raise ExtractorError('Unable to login (cannot verify if logged in)') - - def _real_initialize(self): - self._login() - - -class AnimeLabIE(AnimeLabBaseIE): - _VALID_URL = r'https?://(?:www\.)?animelab\.com/player/(?P<id>[^/]+)' - - # the following tests require authentication, but a free account will suffice - # just set 'usenetrc' to true in test/local_parameters.json if you use a .netrc file - # or you can set 'username' and 'password' there - # the tests also select a specific format so that the same video is downloaded - # regardless of whether the user is premium or not (needs testing on a premium account) - _TEST = { - 'url': 'https://www.animelab.com/player/fullmetal-alchemist-brotherhood-episode-42', - 'md5': '05bde4b91a5d1ff46ef5b94df05b0f7f', - 'info_dict': { - 'id': '383', - 'ext': 'mp4', - 'display_id': 'fullmetal-alchemist-brotherhood-episode-42', - 'title': 'Fullmetal Alchemist: Brotherhood - Episode 42 - Signs of a Counteroffensive', - 'description': 'md5:103eb61dd0a56d3dfc5dbf748e5e83f4', - 'series': 'Fullmetal Alchemist: Brotherhood', - 'episode': 'Signs of a Counteroffensive', - 'episode_number': 42, - 'duration': 1469, - 'season': 'Season 1', - 'season_number': 1, - 'season_id': '38', - }, - 'params': { - 'format': '[format_id=21711_yeshardsubbed_ja-JP][height=480]', - }, - 'skip': 'All AnimeLab content requires authentication', - } - - def _real_extract(self, url): - display_id = self._match_id(url) - - # unfortunately we can get different URLs for the same formats - # e.g. if we are using a "free" account so no dubs available - # (so _remove_duplicate_formats is not effective) - # so we use a dictionary as a workaround - formats = {} - for language_option_url in ('https://www.animelab.com/player/%s/subtitles', - 'https://www.animelab.com/player/%s/dubbed'): - actual_url = language_option_url % display_id - webpage = self._download_webpage(actual_url, display_id, 'Downloading URL ' + actual_url) - - video_collection = self._parse_json(self._search_regex(r'new\s+?AnimeLabApp\.VideoCollection\s*?\((.*?)\);', webpage, 'AnimeLab VideoCollection'), display_id) - position = int_or_none(self._search_regex(r'playlistPosition\s*?=\s*?(\d+)', webpage, 'Playlist Position')) - - raw_data = video_collection[position]['videoEntry'] - - video_id = str_or_none(raw_data['id']) - - # create a title from many sources (while grabbing other info) - # TODO use more fallback sources to get some of these - series = raw_data.get('showTitle') - video_type = raw_data.get('videoEntryType', {}).get('name') - episode_number = raw_data.get('episodeNumber') - episode_name = raw_data.get('name') - - title_parts = (series, video_type, episode_number, episode_name) - if None not in title_parts: - title = '%s - %s %s - %s' % title_parts - else: - title = episode_name - - description = raw_data.get('synopsis') or self._og_search_description(webpage, default=None) - - duration = int_or_none(raw_data.get('duration')) - - thumbnail_data = raw_data.get('images', []) - thumbnails = [] - for thumbnail in thumbnail_data: - for instance in thumbnail['imageInstances']: - image_data = instance.get('imageInfo', {}) - thumbnails.append({ - 'id': str_or_none(image_data.get('id')), - 'url': image_data.get('fullPath'), - 'width': image_data.get('width'), - 'height': image_data.get('height'), - }) - - season_data = raw_data.get('season', {}) or {} - season = str_or_none(season_data.get('name')) - season_number = int_or_none(season_data.get('seasonNumber')) - season_id = str_or_none(season_data.get('id')) - - for video_data in raw_data['videoList']: - current_video_list = {} - current_video_list['language'] = video_data.get('language', {}).get('languageCode') - - is_hardsubbed = video_data.get('hardSubbed') - - for video_instance in video_data['videoInstances']: - httpurl = video_instance.get('httpUrl') - url = httpurl if httpurl else video_instance.get('rtmpUrl') - if url is None: - # this video format is unavailable to the user (not premium etc.) - continue - - current_format = current_video_list.copy() - - format_id_parts = [] - - format_id_parts.append(str_or_none(video_instance.get('id'))) - - if is_hardsubbed is not None: - if is_hardsubbed: - format_id_parts.append('yeshardsubbed') - else: - format_id_parts.append('nothardsubbed') - - format_id_parts.append(current_format['language']) - - format_id = '_'.join([x for x in format_id_parts if x is not None]) - - ext = determine_ext(url) - if ext == 'm3u8': - for format_ in self._extract_m3u8_formats( - url, video_id, m3u8_id=format_id, fatal=False): - formats[format_['format_id']] = format_ - continue - elif ext == 'mpd': - for format_ in self._extract_mpd_formats( - url, video_id, mpd_id=format_id, fatal=False): - formats[format_['format_id']] = format_ - continue - - current_format['url'] = url - quality_data = video_instance.get('videoQuality') - if quality_data: - quality = quality_data.get('name') or quality_data.get('description') - else: - quality = None - - height = None - if quality: - height = int_or_none(self._search_regex(r'(\d+)p?$', quality, 'Video format height', default=None)) - - if height is None: - self.report_warning('Could not get height of video') - else: - current_format['height'] = height - current_format['format_id'] = format_id - - formats[current_format['format_id']] = current_format - - formats = list(formats.values()) - self._sort_formats(formats) - - return { - 'id': video_id, - 'display_id': display_id, - 'title': title, - 'description': description, - 'series': series, - 'episode': episode_name, - 'episode_number': int_or_none(episode_number), - 'thumbnails': thumbnails, - 'duration': duration, - 'formats': formats, - 'season': season, - 'season_number': season_number, - 'season_id': season_id, - } - - -class AnimeLabShowsIE(AnimeLabBaseIE): - _VALID_URL = r'https?://(?:www\.)?animelab\.com/shows/(?P<id>[^/]+)' - - _TEST = { - 'url': 'https://www.animelab.com/shows/attack-on-titan', - 'info_dict': { - 'id': '45', - 'title': 'Attack on Titan', - 'description': 'md5:989d95a2677e9309368d5cf39ba91469', - }, - 'playlist_count': 59, - 'skip': 'All AnimeLab content requires authentication', - } - - def _real_extract(self, url): - _BASE_URL = 'http://www.animelab.com' - _SHOWS_API_URL = '/api/videoentries/show/videos/' - display_id = self._match_id(url) - - webpage = self._download_webpage(url, display_id, 'Downloading requested URL') - - show_data_str = self._search_regex(r'({"id":.*}),\svideoEntry', webpage, 'AnimeLab show data') - show_data = self._parse_json(show_data_str, display_id) - - show_id = str_or_none(show_data.get('id')) - title = show_data.get('name') - description = show_data.get('shortSynopsis') or show_data.get('longSynopsis') - - entries = [] - for season in show_data['seasons']: - season_id = season['id'] - get_data = urlencode_postdata({ - 'seasonId': season_id, - 'limit': 1000, - }) - # despite using urlencode_postdata, we are sending a GET request - target_url = _BASE_URL + _SHOWS_API_URL + show_id + "?" + get_data.decode('utf-8') - response = self._download_webpage( - target_url, - None, 'Season id %s' % season_id) - - season_data = self._parse_json(response, display_id) - - for video_data in season_data['list']: - entries.append(self.url_result( - _BASE_URL + '/player/' + video_data['slug'], 'AnimeLab', - str_or_none(video_data.get('id')), video_data.get('name') - )) - - return { - '_type': 'playlist', - 'id': show_id, - 'title': title, - 'description': description, - 'entries': entries, - } - -# TODO implement myqueue diff --git a/plugins/youtube_download/yt_dlp/extractor/animeondemand.py b/plugins/youtube_download/yt_dlp/extractor/animeondemand.py deleted file mode 100644 index 5694f72..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/animeondemand.py +++ /dev/null @@ -1,291 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..compat import compat_str -from ..utils import ( - determine_ext, - extract_attributes, - ExtractorError, - join_nonempty, - url_or_none, - urlencode_postdata, - urljoin, -) - - -class AnimeOnDemandIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)' - _LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in' - _APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply' - _NETRC_MACHINE = 'animeondemand' - # German-speaking countries of Europe - _GEO_COUNTRIES = ['AT', 'CH', 'DE', 'LI', 'LU'] - _TESTS = [{ - # jap, OmU - 'url': 'https://www.anime-on-demand.de/anime/161', - 'info_dict': { - 'id': '161', - 'title': 'Grimgar, Ashes and Illusions (OmU)', - 'description': 'md5:6681ce3c07c7189d255ac6ab23812d31', - }, - 'playlist_mincount': 4, - }, { - # Film wording is used instead of Episode, ger/jap, Dub/OmU - 'url': 'https://www.anime-on-demand.de/anime/39', - 'only_matching': True, - }, { - # Episodes without titles, jap, OmU - 'url': 'https://www.anime-on-demand.de/anime/162', - 'only_matching': True, - }, { - # ger/jap, Dub/OmU, account required - 'url': 'https://www.anime-on-demand.de/anime/169', - 'only_matching': True, - }, { - # Full length film, non-series, ger/jap, Dub/OmU, account required - 'url': 'https://www.anime-on-demand.de/anime/185', - 'only_matching': True, - }, { - # Flash videos - 'url': 'https://www.anime-on-demand.de/anime/12', - 'only_matching': True, - }] - - def _login(self): - username, password = self._get_login_info() - if username is None: - return - - login_page = self._download_webpage( - self._LOGIN_URL, None, 'Downloading login page') - - if '>Our licensing terms allow the distribution of animes only to German-speaking countries of Europe' in login_page: - self.raise_geo_restricted( - '%s is only available in German-speaking countries of Europe' % self.IE_NAME) - - login_form = self._form_hidden_inputs('new_user', login_page) - - login_form.update({ - 'user[login]': username, - 'user[password]': password, - }) - - post_url = self._search_regex( - r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page, - 'post url', default=self._LOGIN_URL, group='url') - - if not post_url.startswith('http'): - post_url = urljoin(self._LOGIN_URL, post_url) - - response = self._download_webpage( - post_url, None, 'Logging in', - data=urlencode_postdata(login_form), headers={ - 'Referer': self._LOGIN_URL, - }) - - if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')): - error = self._search_regex( - r'<p[^>]+\bclass=(["\'])(?:(?!\1).)*\balert\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</p>', - response, 'error', default=None, group='error') - if error: - raise ExtractorError('Unable to login: %s' % error, expected=True) - raise ExtractorError('Unable to log in') - - def _real_initialize(self): - self._login() - - def _real_extract(self, url): - anime_id = self._match_id(url) - - webpage = self._download_webpage(url, anime_id) - - if 'data-playlist=' not in webpage: - self._download_webpage( - self._APPLY_HTML5_URL, anime_id, - 'Activating HTML5 beta', 'Unable to apply HTML5 beta') - webpage = self._download_webpage(url, anime_id) - - csrf_token = self._html_search_meta( - 'csrf-token', webpage, 'csrf token', fatal=True) - - anime_title = self._html_search_regex( - r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>', - webpage, 'anime name') - anime_description = self._html_search_regex( - r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>', - webpage, 'anime description', default=None) - - def extract_info(html, video_id, num=None): - title, description = [None] * 2 - formats = [] - - for input_ in re.findall( - r'<input[^>]+class=["\'].*?streamstarter[^>]+>', html): - attributes = extract_attributes(input_) - title = attributes.get('data-dialog-header') - playlist_urls = [] - for playlist_key in ('data-playlist', 'data-otherplaylist', 'data-stream'): - playlist_url = attributes.get(playlist_key) - if isinstance(playlist_url, compat_str) and re.match( - r'/?[\da-zA-Z]+', playlist_url): - playlist_urls.append(attributes[playlist_key]) - if not playlist_urls: - continue - - lang = attributes.get('data-lang') - lang_note = attributes.get('value') - - for playlist_url in playlist_urls: - kind = self._search_regex( - r'videomaterialurl/\d+/([^/]+)/', - playlist_url, 'media kind', default=None) - format_id = join_nonempty(lang, kind) if lang or kind else str(num) - format_note = join_nonempty(kind, lang_note, delim=', ') - item_id_list = [] - if format_id: - item_id_list.append(format_id) - item_id_list.append('videomaterial') - playlist = self._download_json( - urljoin(url, playlist_url), video_id, - 'Downloading %s JSON' % ' '.join(item_id_list), - headers={ - 'X-Requested-With': 'XMLHttpRequest', - 'X-CSRF-Token': csrf_token, - 'Referer': url, - 'Accept': 'application/json, text/javascript, */*; q=0.01', - }, fatal=False) - if not playlist: - continue - stream_url = url_or_none(playlist.get('streamurl')) - if stream_url: - rtmp = re.search( - r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+/))(?P<playpath>mp[34]:.+)', - stream_url) - if rtmp: - formats.append({ - 'url': rtmp.group('url'), - 'app': rtmp.group('app'), - 'play_path': rtmp.group('playpath'), - 'page_url': url, - 'player_url': 'https://www.anime-on-demand.de/assets/jwplayer.flash-55abfb34080700304d49125ce9ffb4a6.swf', - 'rtmp_real_time': True, - 'format_id': 'rtmp', - 'ext': 'flv', - }) - continue - start_video = playlist.get('startvideo', 0) - playlist = playlist.get('playlist') - if not playlist or not isinstance(playlist, list): - continue - playlist = playlist[start_video] - title = playlist.get('title') - if not title: - continue - description = playlist.get('description') - for source in playlist.get('sources', []): - file_ = source.get('file') - if not file_: - continue - ext = determine_ext(file_) - format_id = join_nonempty( - lang, kind, - 'hls' if ext == 'm3u8' else None, - 'dash' if source.get('type') == 'video/dash' or ext == 'mpd' else None) - if ext == 'm3u8': - file_formats = self._extract_m3u8_formats( - file_, video_id, 'mp4', - entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False) - elif source.get('type') == 'video/dash' or ext == 'mpd': - continue - file_formats = self._extract_mpd_formats( - file_, video_id, mpd_id=format_id, fatal=False) - else: - continue - for f in file_formats: - f.update({ - 'language': lang, - 'format_note': format_note, - }) - formats.extend(file_formats) - - return { - 'title': title, - 'description': description, - 'formats': formats, - } - - def extract_entries(html, video_id, common_info, num=None): - info = extract_info(html, video_id, num) - - if info['formats']: - self._sort_formats(info['formats']) - f = common_info.copy() - f.update(info) - yield f - - # Extract teaser/trailer only when full episode is not available - if not info['formats']: - m = re.search( - r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>(?P<kind>Teaser|Trailer)<', - html) - if m: - f = common_info.copy() - f.update({ - 'id': '%s-%s' % (f['id'], m.group('kind').lower()), - 'title': m.group('title'), - 'url': urljoin(url, m.group('href')), - }) - yield f - - def extract_episodes(html): - for num, episode_html in enumerate(re.findall( - r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', html), 1): - episodebox_title = self._search_regex( - (r'class="episodebox-title"[^>]+title=(["\'])(?P<title>.+?)\1', - r'class="episodebox-title"[^>]+>(?P<title>.+?)<'), - episode_html, 'episodebox title', default=None, group='title') - if not episodebox_title: - continue - - episode_number = int(self._search_regex( - r'(?:Episode|Film)\s*(\d+)', - episodebox_title, 'episode number', default=num)) - episode_title = self._search_regex( - r'(?:Episode|Film)\s*\d+\s*-\s*(.+)', - episodebox_title, 'episode title', default=None) - - video_id = 'episode-%d' % episode_number - - common_info = { - 'id': video_id, - 'series': anime_title, - 'episode': episode_title, - 'episode_number': episode_number, - } - - for e in extract_entries(episode_html, video_id, common_info): - yield e - - def extract_film(html, video_id): - common_info = { - 'id': anime_id, - 'title': anime_title, - 'description': anime_description, - } - for e in extract_entries(html, video_id, common_info): - yield e - - def entries(): - has_episodes = False - for e in extract_episodes(webpage): - has_episodes = True - yield e - - if not has_episodes: - for e in extract_film(webpage, anime_id): - yield e - - return self.playlist_result( - entries(), anime_id, anime_title, anime_description) diff --git a/plugins/youtube_download/yt_dlp/extractor/ant1newsgr.py b/plugins/youtube_download/yt_dlp/extractor/ant1newsgr.py new file mode 100644 index 0000000..7b384b2 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/ant1newsgr.py @@ -0,0 +1,128 @@ +import urllib.parse + +from .common import InfoExtractor +from ..utils import ( + HEADRequest, + ExtractorError, + determine_ext, + scale_thumbnails_to_max_format_width, +) + + +class Ant1NewsGrBaseIE(InfoExtractor): + def _download_and_extract_api_data(self, video_id, netloc, cid=None): + url = f'{self.http_scheme()}//{netloc}{self._API_PATH}' + info = self._download_json(url, video_id, query={'cid': cid or video_id}) + try: + source = info['url'] + except KeyError: + raise ExtractorError('no source found for %s' % video_id) + formats, subs = (self._extract_m3u8_formats_and_subtitles(source, video_id, 'mp4') + if determine_ext(source) == 'm3u8' else ([{'url': source}], {})) + thumbnails = scale_thumbnails_to_max_format_width( + formats, [{'url': info['thumb']}], r'(?<=/imgHandler/)\d+') + return { + 'id': video_id, + 'title': info.get('title'), + 'thumbnails': thumbnails, + 'formats': formats, + 'subtitles': subs, + } + + +class Ant1NewsGrWatchIE(Ant1NewsGrBaseIE): + IE_NAME = 'ant1newsgr:watch' + IE_DESC = 'ant1news.gr videos' + _VALID_URL = r'https?://(?P<netloc>(?:www\.)?ant1news\.gr)/watch/(?P<id>\d+)/' + _API_PATH = '/templates/data/player' + + _TESTS = [{ + 'url': 'https://www.ant1news.gr/watch/1506168/ant1-news-09112021-stis-18-45', + 'md5': '95925e6b32106754235f2417e0d2dfab', + 'info_dict': { + 'id': '1506168', + 'ext': 'mp4', + 'title': 'md5:0ad00fa66ecf8aa233d26ab0dba7514a', + 'description': 'md5:18665af715a6dcfeac1d6153a44f16b0', + 'thumbnail': 'https://ant1media.azureedge.net/imgHandler/640/26d46bf6-8158-4f02-b197-7096c714b2de.jpg', + }, + }] + + def _real_extract(self, url): + video_id, netloc = self._match_valid_url(url).group('id', 'netloc') + webpage = self._download_webpage(url, video_id) + info = self._download_and_extract_api_data(video_id, netloc) + info['description'] = self._og_search_description(webpage) + return info + + +class Ant1NewsGrArticleIE(Ant1NewsGrBaseIE): + IE_NAME = 'ant1newsgr:article' + IE_DESC = 'ant1news.gr articles' + _VALID_URL = r'https?://(?:www\.)?ant1news\.gr/[^/]+/article/(?P<id>\d+)/' + + _TESTS = [{ + 'url': 'https://www.ant1news.gr/afieromata/article/549468/o-tzeims-mpont-sta-meteora-oi-apeiles-kai-o-xesikomos-ton-kalogeron', + 'md5': '294f18331bb516539d72d85a82887dcc', + 'info_dict': { + 'id': '_xvg/m_cmbatw=', + 'ext': 'mp4', + 'title': 'md5:a93e8ecf2e4073bfdffcb38f59945411', + 'timestamp': 1603092840, + 'upload_date': '20201019', + 'thumbnail': 'https://ant1media.azureedge.net/imgHandler/640/756206d2-d640-40e2-b201-3555abdfc0db.jpg', + }, + }, { + 'url': 'https://ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn', + 'info_dict': { + 'id': '620286', + 'title': 'md5:91fe569e952e4d146485740ae927662b', + }, + 'playlist_mincount': 2, + 'params': { + 'skip_download': True, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle') + embed_urls = list(Ant1NewsGrEmbedIE._extract_embed_urls(url, webpage)) + if not embed_urls: + raise ExtractorError('no videos found for %s' % video_id, expected=True) + return self.playlist_from_matches( + embed_urls, video_id, info.get('title'), ie=Ant1NewsGrEmbedIE.ie_key(), + video_kwargs={'url_transparent': True, 'timestamp': info.get('timestamp')}) + + +class Ant1NewsGrEmbedIE(Ant1NewsGrBaseIE): + IE_NAME = 'ant1newsgr:embed' + IE_DESC = 'ant1news.gr embedded videos' + _BASE_PLAYER_URL_RE = r'(?:https?:)?//(?:[a-zA-Z0-9\-]+\.)?(?:antenna|ant1news)\.gr/templates/pages/player' + _VALID_URL = rf'{_BASE_PLAYER_URL_RE}\?([^#]+&)?cid=(?P<id>[^#&]+)' + _EMBED_REGEX = [rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>{_BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+)(?P=_q1)'] + _API_PATH = '/news/templates/data/jsonPlayer' + + _TESTS = [{ + 'url': 'https://www.antenna.gr/templates/pages/player?cid=3f_li_c_az_jw_y_u=&w=670&h=377', + 'md5': 'dfc58c3a11a5a9aad2ba316ed447def3', + 'info_dict': { + 'id': '3f_li_c_az_jw_y_u=', + 'ext': 'mp4', + 'title': 'md5:a30c93332455f53e1e84ae0724f0adf7', + 'thumbnail': 'https://ant1media.azureedge.net/imgHandler/640/bbe31201-3f09-4a4e-87f5-8ad2159fffe2.jpg', + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + canonical_url = self._request_webpage( + HEADRequest(url), video_id, + note='Resolve canonical player URL', + errnote='Could not resolve canonical player URL').geturl() + _, netloc, _, _, query, _ = urllib.parse.urlparse(canonical_url) + cid = urllib.parse.parse_qs(query)['cid'][0] + + return self._download_and_extract_api_data(video_id, netloc, cid=cid) diff --git a/plugins/youtube_download/yt_dlp/extractor/anvato.py b/plugins/youtube_download/yt_dlp/extractor/anvato.py index 0d444fc..79bfe41 100644 --- a/plugins/youtube_download/yt_dlp/extractor/anvato.py +++ b/plugins/youtube_download/yt_dlp/extractor/anvato.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import base64 import hashlib import json @@ -10,38 +7,68 @@ import time from .common import InfoExtractor from ..aes import aes_encrypt -from ..compat import compat_str from ..utils import ( bytes_to_intlist, determine_ext, - intlist_to_bytes, int_or_none, + intlist_to_bytes, join_nonempty, + smuggle_url, strip_jsonp, + traverse_obj, unescapeHTML, unsmuggle_url, ) -# This import causes a ModuleNotFoundError on some systems for unknown reason. -# See issues: -# https://github.com/yt-dlp/yt-dlp/issues/35 -# https://github.com/ytdl-org/youtube-dl/issues/27449 -# https://github.com/animelover1984/youtube-dl/issues/17 -try: - from .anvato_token_generator import NFLTokenGenerator -except ImportError: - NFLTokenGenerator = None - def md5_text(s): - if not isinstance(s, compat_str): - s = compat_str(s) - return hashlib.md5(s.encode('utf-8')).hexdigest() + return hashlib.md5(str(s).encode()).hexdigest() class AnvatoIE(InfoExtractor): _VALID_URL = r'anvato:(?P<access_key_or_mcp>[^:]+):(?P<id>\d+)' + _API_BASE_URL = 'https://tkx.mp.lura.live/rest/v2' + _ANVP_RE = r'<script[^>]+\bdata-anvp\s*=\s*(["\'])(?P<anvp>(?:(?!\1).)+)\1' + _AUTH_KEY = b'\x31\xc2\x42\x84\x9e\x73\xa0\xce' # from anvplayer.min.js + + _TESTS = [{ + # from https://www.nfl.com/videos/baker-mayfield-s-game-changing-plays-from-3-td-game-week-14 + 'url': 'anvato:GXvEgwyJeWem8KCYXfeoHWknwP48Mboj:899441', + 'md5': '921919dab3cd0b849ff3d624831ae3e2', + 'info_dict': { + 'id': '899441', + 'ext': 'mp4', + 'title': 'Baker Mayfield\'s game-changing plays from 3-TD game Week 14', + 'description': 'md5:85e05a3cc163f8c344340f220521136d', + 'upload_date': '20201215', + 'timestamp': 1608009755, + 'thumbnail': r're:^https?://.*\.jpg', + 'uploader': 'NFL', + 'tags': ['Baltimore Ravens at Cleveland Browns (2020-REG-14)', 'Baker Mayfield', 'Game Highlights', + 'Player Highlights', 'Cleveland Browns', 'league'], + 'duration': 157, + 'categories': ['Entertainment', 'Game', 'Highlights'], + }, + }, { + # from https://ktla.com/news/99-year-old-woman-learns-to-fly-in-torrance-checks-off-bucket-list-dream/ + 'url': 'anvato:X8POa4zpGZMmeiq0wqiO8IP5rMqQM9VN:8032455', + 'md5': '837718bcfb3a7778d022f857f7a9b19e', + 'info_dict': { + 'id': '8032455', + 'ext': 'mp4', + 'title': '99-year-old woman learns to fly plane in Torrance, checks off bucket list dream', + 'description': 'md5:0a12bab8159445e78f52a297a35c6609', + 'upload_date': '20220928', + 'timestamp': 1664408881, + 'thumbnail': r're:^https?://.*\.jpg', + 'uploader': 'LIN', + 'tags': ['video', 'news', '5live'], + 'duration': 155, + 'categories': ['News'], + }, + }] + # Copied from anvplayer.min.js _ANVACK_TABLE = { 'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ', @@ -214,86 +241,74 @@ class AnvatoIE(InfoExtractor): 'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582' } + def _generate_nfl_token(self, anvack, mcp_id): + reroute = self._download_json( + 'https://api.nfl.com/v1/reroute', mcp_id, data=b'grant_type=client_credentials', + headers={'X-Domain-Id': 100}, note='Fetching token info') + token_type = reroute.get('token_type') or 'Bearer' + auth_token = f'{token_type} {reroute["access_token"]}' + response = self._download_json( + 'https://api.nfl.com/v3/shield/', mcp_id, data=json.dumps({ + 'query': '''{ + viewer { + mediaToken(anvack: "%s", id: %s) { + token + } + } +}''' % (anvack, mcp_id), + }).encode(), headers={ + 'Authorization': auth_token, + 'Content-Type': 'application/json', + }, note='Fetching NFL API token') + return traverse_obj(response, ('data', 'viewer', 'mediaToken', 'token')) + _TOKEN_GENERATORS = { - 'GXvEgwyJeWem8KCYXfeoHWknwP48Mboj': NFLTokenGenerator, + 'GXvEgwyJeWem8KCYXfeoHWknwP48Mboj': _generate_nfl_token, } - _API_KEY = '3hwbSuqqT690uxjNYBktSQpa5ZrpYYR0Iofx7NcJHyA' - - _ANVP_RE = r'<script[^>]+\bdata-anvp\s*=\s*(["\'])(?P<anvp>(?:(?!\1).)+)\1' - _AUTH_KEY = b'\x31\xc2\x42\x84\x9e\x73\xa0\xce' - - _TESTS = [{ - # from https://www.boston25news.com/news/watch-humpback-whale-breaches-right-next-to-fishing-boat-near-nh/817484874 - 'url': 'anvato:8v9BEynrwx8EFLYpgfOWcG1qJqyXKlRM:4465496', - 'info_dict': { - 'id': '4465496', - 'ext': 'mp4', - 'title': 'VIDEO: Humpback whale breaches right next to NH boat', - 'description': 'VIDEO: Humpback whale breaches right next to NH boat. Footage courtesy: Zach Fahey.', - 'duration': 22, - 'timestamp': 1534855680, - 'upload_date': '20180821', - 'uploader': 'ANV', - }, - 'params': { - 'skip_download': True, - }, - }, { - # from https://sanfrancisco.cbslocal.com/2016/06/17/source-oakland-cop-on-leave-for-having-girlfriend-help-with-police-reports/ - 'url': 'anvato:DVzl9QRzox3ZZsP9bNu5Li3X7obQOnqP:3417601', - 'only_matching': True, - }] - - def __init__(self, *args, **kwargs): - super(AnvatoIE, self).__init__(*args, **kwargs) - self.__server_time = None - def _server_time(self, access_key, video_id): - if self.__server_time is not None: - return self.__server_time + return int_or_none(traverse_obj(self._download_json( + f'{self._API_BASE_URL}/server_time', video_id, query={'anvack': access_key}, + note='Fetching server time', fatal=False), 'server_time')) or int(time.time()) - self.__server_time = int(self._download_json( - self._api_prefix(access_key) + 'server_time?anvack=' + access_key, video_id, - note='Fetching server time')['server_time']) - - return self.__server_time - - def _api_prefix(self, access_key): - return 'https://tkx2-%s.anvato.net/rest/v2/' % ('prod' if 'prod' in access_key else 'stage') - - def _get_video_json(self, access_key, video_id): + def _get_video_json(self, access_key, video_id, extracted_token): # See et() in anvplayer.min.js, which is an alias of getVideoJSON() - video_data_url = self._api_prefix(access_key) + 'mcp/video/%s?anvack=%s' % (video_id, access_key) + video_data_url = f'{self._API_BASE_URL}/mcp/video/{video_id}?anvack={access_key}' server_time = self._server_time(access_key, video_id) - input_data = '%d~%s~%s' % (server_time, md5_text(video_data_url), md5_text(server_time)) + input_data = f'{server_time}~{md5_text(video_data_url)}~{md5_text(server_time)}' auth_secret = intlist_to_bytes(aes_encrypt( bytes_to_intlist(input_data[:64]), bytes_to_intlist(self._AUTH_KEY))) - - video_data_url += '&X-Anvato-Adst-Auth=' + base64.b64encode(auth_secret).decode('ascii') + query = { + 'X-Anvato-Adst-Auth': base64.b64encode(auth_secret).decode('ascii'), + 'rtyp': 'fp', + } anvrid = md5_text(time.time() * 1000 * random.random())[:30] api = { 'anvrid': anvrid, 'anvts': server_time, } - if self._TOKEN_GENERATORS.get(access_key) is not None: - api['anvstk2'] = self._TOKEN_GENERATORS[access_key].generate(self, access_key, video_id) + if extracted_token is not None: + api['anvstk2'] = extracted_token + elif self._TOKEN_GENERATORS.get(access_key) is not None: + api['anvstk2'] = self._TOKEN_GENERATORS[access_key](self, access_key, video_id) + elif self._ANVACK_TABLE.get(access_key) is not None: + api['anvstk'] = md5_text(f'{access_key}|{anvrid}|{server_time}|{self._ANVACK_TABLE[access_key]}') else: - api['anvstk'] = md5_text('%s|%s|%d|%s' % ( - access_key, anvrid, server_time, - self._ANVACK_TABLE.get(access_key, self._API_KEY))) + api['anvstk2'] = 'default' return self._download_json( - video_data_url, video_id, transform_source=strip_jsonp, - data=json.dumps({'api': api}).encode('utf-8')) + video_data_url, video_id, transform_source=strip_jsonp, query=query, + data=json.dumps({'api': api}, separators=(',', ':')).encode('utf-8')) - def _get_anvato_videos(self, access_key, video_id): - video_data = self._get_video_json(access_key, video_id) + def _get_anvato_videos(self, access_key, video_id, token): + video_data = self._get_video_json(access_key, video_id, token) formats = [] for published_url in video_data['published_urls']: - video_url = published_url['embed_url'] + video_url = published_url.get('embed_url') + if not video_url: + continue media_format = published_url.get('format') ext = determine_ext(video_url) @@ -308,15 +323,27 @@ class AnvatoIE(InfoExtractor): 'tbr': tbr or None, } - if media_format == 'm3u8' and tbr is not None: + vtt_subs, hls_subs = {}, {} + if media_format == 'vtt': + _, vtt_subs = self._extract_m3u8_formats_and_subtitles( + video_url, video_id, m3u8_id='vtt', fatal=False) + continue + elif media_format == 'm3u8' and tbr is not None: a_format.update({ 'format_id': join_nonempty('hls', tbr), 'ext': 'mp4', }) elif media_format == 'm3u8-variant' or ext == 'm3u8': - formats.extend(self._extract_m3u8_formats( - video_url, video_id, 'mp4', entry_protocol='m3u8_native', - m3u8_id='hls', fatal=False)) + # For some videos the initial m3u8 URL returns JSON instead + manifest_json = self._download_json( + video_url, video_id, note='Downloading manifest JSON', errnote=False) + if manifest_json: + video_url = manifest_json.get('master_m3u8') + if not video_url: + continue + hls_fmts, hls_subs = self._extract_m3u8_formats_and_subtitles( + video_url, video_id, ext='mp4', m3u8_id='hls', fatal=False) + formats.extend(hls_fmts) continue elif ext == 'mp3' or media_format == 'mp3': a_format['vcodec'] = 'none' @@ -327,8 +354,6 @@ class AnvatoIE(InfoExtractor): }) formats.append(a_format) - self._sort_formats(formats) - subtitles = {} for caption in video_data.get('captions', []): a_caption = { @@ -336,6 +361,7 @@ class AnvatoIE(InfoExtractor): 'ext': 'tt' if caption.get('format') == 'SMPTE-TT' else None } subtitles.setdefault(caption['language'], []).append(a_caption) + subtitles = self._merge_subtitles(subtitles, hls_subs, vtt_subs) return { 'id': video_id, @@ -352,30 +378,19 @@ class AnvatoIE(InfoExtractor): 'subtitles': subtitles, } - @staticmethod - def _extract_urls(ie, webpage, video_id): - entries = [] - for mobj in re.finditer(AnvatoIE._ANVP_RE, webpage): - anvplayer_data = ie._parse_json( - mobj.group('anvp'), video_id, transform_source=unescapeHTML, - fatal=False) - if not anvplayer_data: - continue - video = anvplayer_data.get('video') - if not isinstance(video, compat_str) or not video.isdigit(): - continue - access_key = anvplayer_data.get('accessKey') - if not access_key: - mcp = anvplayer_data.get('mcp') - if mcp: - access_key = AnvatoIE._MCP_TO_ACCESS_KEY_TABLE.get( - mcp.lower()) + @classmethod + def _extract_from_webpage(cls, url, webpage): + for mobj in re.finditer(cls._ANVP_RE, webpage): + anvplayer_data = unescapeHTML(json.loads(mobj.group('anvp'))) or {} + video_id, access_key = anvplayer_data.get('video'), anvplayer_data.get('accessKey') if not access_key: + access_key = cls._MCP_TO_ACCESS_KEY_TABLE.get((anvplayer_data.get('mcp') or '').lower()) + if not (video_id or '').isdigit() or not access_key: continue - entries.append(ie.url_result( - 'anvato:%s:%s' % (access_key, video), ie=AnvatoIE.ie_key(), - video_id=video)) - return entries + url = f'anvato:{access_key}:{video_id}' + if anvplayer_data.get('token'): + url = smuggle_url(url, {'token': anvplayer_data['token']}) + yield cls.url_result(url, AnvatoIE, video_id) def _extract_anvato_videos(self, webpage, video_id): anvplayer_data = self._parse_json( @@ -383,7 +398,7 @@ class AnvatoIE(InfoExtractor): self._ANVP_RE, webpage, 'Anvato player data', group='anvp'), video_id) return self._get_anvato_videos( - anvplayer_data['accessKey'], anvplayer_data['video']) + anvplayer_data['accessKey'], anvplayer_data['video'], 'default') # cbslocal token = 'default' def _real_extract(self, url): url, smuggled_data = unsmuggle_url(url, {}) @@ -391,9 +406,7 @@ class AnvatoIE(InfoExtractor): 'countries': smuggled_data.get('geo_countries'), }) - mobj = self._match_valid_url(url) - access_key, video_id = mobj.group('access_key_or_mcp', 'id') + access_key, video_id = self._match_valid_url(url).group('access_key_or_mcp', 'id') if access_key not in self._ANVACK_TABLE: - access_key = self._MCP_TO_ACCESS_KEY_TABLE.get( - access_key) or access_key - return self._get_anvato_videos(access_key, video_id) + access_key = self._MCP_TO_ACCESS_KEY_TABLE.get(access_key) or access_key + return self._get_anvato_videos(access_key, video_id, smuggled_data.get('token')) diff --git a/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/__init__.py b/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/__init__.py deleted file mode 100644 index 6e223db..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from __future__ import unicode_literals - -from .nfl import NFLTokenGenerator - -__all__ = [ - 'NFLTokenGenerator', -] diff --git a/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/common.py b/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/common.py deleted file mode 100644 index b959a90..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/common.py +++ /dev/null @@ -1,6 +0,0 @@ -from __future__ import unicode_literals - - -class TokenGenerator: - def generate(self, anvack, mcp_id): - raise NotImplementedError('This method must be implemented by subclasses') diff --git a/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/nfl.py b/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/nfl.py deleted file mode 100644 index 97a2b24..0000000 --- a/plugins/youtube_download/yt_dlp/extractor/anvato_token_generator/nfl.py +++ /dev/null @@ -1,30 +0,0 @@ -from __future__ import unicode_literals - -import json - -from .common import TokenGenerator - - -class NFLTokenGenerator(TokenGenerator): - _AUTHORIZATION = None - - def generate(ie, anvack, mcp_id): - if not NFLTokenGenerator._AUTHORIZATION: - reroute = ie._download_json( - 'https://api.nfl.com/v1/reroute', mcp_id, - data=b'grant_type=client_credentials', - headers={'X-Domain-Id': 100}) - NFLTokenGenerator._AUTHORIZATION = '%s %s' % (reroute.get('token_type') or 'Bearer', reroute['access_token']) - return ie._download_json( - 'https://api.nfl.com/v3/shield/', mcp_id, data=json.dumps({ - 'query': '''{ - viewer { - mediaToken(anvack: "%s", id: %s) { - token - } - } -}''' % (anvack, mcp_id), - }).encode(), headers={ - 'Authorization': NFLTokenGenerator._AUTHORIZATION, - 'Content-Type': 'application/json', - })['data']['viewer']['mediaToken']['token'] diff --git a/plugins/youtube_download/yt_dlp/extractor/aol.py b/plugins/youtube_download/yt_dlp/extractor/aol.py index 4766a2c..6949ca9 100644 --- a/plugins/youtube_download/yt_dlp/extractor/aol.py +++ b/plugins/youtube_download/yt_dlp/extractor/aol.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .yahoo import YahooIE @@ -12,7 +9,7 @@ from ..utils import ( ) -class AolIE(YahooIE): +class AolIE(YahooIE): # XXX: Do not subclass from concrete IE IE_NAME = 'aol.com' _VALID_URL = r'(?:aol-video:|https?://(?:www\.)?aol\.(?:com|ca|co\.uk|de|jp)/video/(?:[^/]+/)*)(?P<id>\d{9}|[0-9a-f]{24}|[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12})' @@ -122,7 +119,6 @@ class AolIE(YahooIE): 'height': int_or_none(qs.get('h', [None])[0]), }) formats.append(f) - self._sort_formats(formats) return { 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/apa.py b/plugins/youtube_download/yt_dlp/extractor/apa.py index 1736cdf..1ea0b1d 100644 --- a/plugins/youtube_download/yt_dlp/extractor/apa.py +++ b/plugins/youtube_download/yt_dlp/extractor/apa.py @@ -1,8 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re - from .common import InfoExtractor from ..utils import ( determine_ext, @@ -13,6 +8,7 @@ from ..utils import ( class APAIE(InfoExtractor): _VALID_URL = r'(?P<base_url>https?://[^/]+\.apa\.at)/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})' + _EMBED_REGEX = [r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//[^/]+\.apa\.at/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}.*?)\1'] _TESTS = [{ 'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029', 'md5': '2b12292faeb0a7d930c778c7a5b4759b', @@ -33,14 +29,6 @@ class APAIE(InfoExtractor): 'only_matching': True, }] - @staticmethod - def _extract_urls(webpage): - return [ - mobj.group('url') - for mobj in re.finditer( - r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//[^/]+\.apa\.at/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}.*?)\1', - webpage)] - def _real_extract(self, url): mobj = self._match_valid_url(url) video_id, base_url = mobj.group('id', 'base_url') @@ -84,7 +72,6 @@ class APAIE(InfoExtractor): 'format_id': format_id, 'height': height, }) - self._sort_formats(formats) return { 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/aparat.py b/plugins/youtube_download/yt_dlp/extractor/aparat.py index 1057233..4a989d8 100644 --- a/plugins/youtube_download/yt_dlp/extractor/aparat.py +++ b/plugins/youtube_download/yt_dlp/extractor/aparat.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import ( get_element_by_id, @@ -13,6 +10,7 @@ from ..utils import ( class AparatIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)' + _EMBED_REGEX = [r'<iframe .*?src="(?P<url>http://www\.aparat\.com/video/[^"]+)"'] _TESTS = [{ 'url': 'http://www.aparat.com/v/wP8On', @@ -75,7 +73,6 @@ class AparatIE(InfoExtractor): r'(\d+)[pP]', label or '', 'height', default=None)), }) - self._sort_formats(formats) info = self._search_json_ld(webpage, video_id, default={}) diff --git a/plugins/youtube_download/yt_dlp/extractor/appleconnect.py b/plugins/youtube_download/yt_dlp/extractor/appleconnect.py index 494f833..d00b0f9 100644 --- a/plugins/youtube_download/yt_dlp/extractor/appleconnect.py +++ b/plugins/youtube_download/yt_dlp/extractor/appleconnect.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import ( str_to_int, diff --git a/plugins/youtube_download/yt_dlp/extractor/applepodcasts.py b/plugins/youtube_download/yt_dlp/extractor/applepodcasts.py index 6a74de7..49bbeab 100644 --- a/plugins/youtube_download/yt_dlp/extractor/applepodcasts.py +++ b/plugins/youtube_download/yt_dlp/extractor/applepodcasts.py @@ -1,9 +1,8 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import ( + clean_html, clean_podcast_url, + get_element_by_class, int_or_none, parse_iso8601, try_get, @@ -14,16 +13,17 @@ class ApplePodcastsIE(InfoExtractor): _VALID_URL = r'https?://podcasts\.apple\.com/(?:[^/]+/)?podcast(?:/[^/]+){1,2}.*?\bi=(?P<id>\d+)' _TESTS = [{ 'url': 'https://podcasts.apple.com/us/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777', - 'md5': 'df02e6acb11c10e844946a39e7222b08', + 'md5': '41dc31cd650143e530d9423b6b5a344f', 'info_dict': { 'id': '1000482637777', 'ext': 'mp3', 'title': '207 - Whitney Webb Returns', - 'description': 'md5:13a73bade02d2e43737751e3987e1399', + 'description': 'md5:75ef4316031df7b41ced4e7b987f79c6', 'upload_date': '20200705', - 'timestamp': 1593921600, - 'duration': 6425, + 'timestamp': 1593932400, + 'duration': 6454, 'series': 'The Tim Dillon Show', + 'thumbnail': 're:.+[.](png|jpe?g|webp)', } }, { 'url': 'https://podcasts.apple.com/podcast/207-whitney-webb-returns/id1135137367?i=1000482637777', @@ -39,24 +39,47 @@ class ApplePodcastsIE(InfoExtractor): def _real_extract(self, url): episode_id = self._match_id(url) webpage = self._download_webpage(url, episode_id) - ember_data = self._parse_json(self._search_regex( - r'id="shoebox-ember-data-store"[^>]*>\s*({.+?})\s*<', - webpage, 'ember data'), episode_id) - ember_data = ember_data.get(episode_id) or ember_data - episode = ember_data['data']['attributes'] + episode_data = {} + ember_data = {} + # new page type 2021-11 + amp_data = self._parse_json(self._search_regex( + r'(?s)id="shoebox-media-api-cache-amp-podcasts"[^>]*>\s*({.+?})\s*<', + webpage, 'AMP data', default='{}'), episode_id, fatal=False) or {} + amp_data = try_get(amp_data, + lambda a: self._parse_json( + next(a[x] for x in iter(a) if episode_id in x), + episode_id), + dict) or {} + amp_data = amp_data.get('d') or [] + episode_data = try_get( + amp_data, + lambda a: next(x for x in a + if x['type'] == 'podcast-episodes' and x['id'] == episode_id), + dict) + if not episode_data: + # try pre 2021-11 page type: TODO: consider deleting if no longer used + ember_data = self._parse_json(self._search_regex( + r'(?s)id="shoebox-ember-data-store"[^>]*>\s*({.+?})\s*<', + webpage, 'ember data'), episode_id) or {} + ember_data = ember_data.get(episode_id) or ember_data + episode_data = try_get(ember_data, lambda x: x['data'], dict) + episode = episode_data['attributes'] description = episode.get('description') or {} series = None - for inc in (ember_data.get('included') or []): + for inc in (amp_data or ember_data.get('included') or []): if inc.get('type') == 'media/podcast': series = try_get(inc, lambda x: x['attributes']['name']) + series = series or clean_html(get_element_by_class('podcast-header__identity', webpage)) return { 'id': episode_id, - 'title': episode['name'], + 'title': episode.get('name'), 'url': clean_podcast_url(episode['assetUrl']), 'description': description.get('standard') or description.get('short'), 'timestamp': parse_iso8601(episode.get('releaseDateTime')), 'duration': int_or_none(episode.get('durationInMilliseconds'), 1000), 'series': series, + 'thumbnail': self._og_search_thumbnail(webpage), + 'vcodec': 'none', } diff --git a/plugins/youtube_download/yt_dlp/extractor/appletrailers.py b/plugins/youtube_download/yt_dlp/extractor/appletrailers.py index 8140e33..2e0b0a8 100644 --- a/plugins/youtube_download/yt_dlp/extractor/appletrailers.py +++ b/plugins/youtube_download/yt_dlp/extractor/appletrailers.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - import re import json @@ -122,7 +120,6 @@ class AppleTrailersIE(InfoExtractor): 'height': int_or_none(size_data.get('height')), 'language': version[:2], }) - self._sort_formats(formats) entries.append({ 'id': movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', clip_title).lower(), @@ -187,8 +184,6 @@ class AppleTrailersIE(InfoExtractor): 'height': int_or_none(format['height']), }) - self._sort_formats(formats) - playlist.append({ '_type': 'video', 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/archiveorg.py b/plugins/youtube_download/yt_dlp/extractor/archiveorg.py index 2a25c07..4ccd398 100644 --- a/plugins/youtube_download/yt_dlp/extractor/archiveorg.py +++ b/plugins/youtube_download/yt_dlp/extractor/archiveorg.py @@ -1,39 +1,37 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re import json +import re +import urllib.error +import urllib.parse + from .common import InfoExtractor -from .youtube import YoutubeIE, YoutubeBaseInfoExtractor -from ..compat import ( - compat_urllib_parse_unquote, - compat_urllib_parse_unquote_plus, - compat_HTTPError -) +from .naver import NaverBaseIE +from .youtube import YoutubeBaseInfoExtractor, YoutubeIE +from ..compat import compat_HTTPError, compat_urllib_parse_unquote from ..utils import ( + KNOWN_EXTENSIONS, + ExtractorError, + HEADRequest, bug_reports_message, clean_html, dict_get, extract_attributes, - ExtractorError, get_element_by_id, - HEADRequest, int_or_none, join_nonempty, - KNOWN_EXTENSIONS, + js_to_json, merge_dicts, mimetype2ext, orderedSet, parse_duration, parse_qs, - str_to_int, str_or_none, + str_to_int, traverse_obj, try_get, unified_strdate, unified_timestamp, + url_or_none, urlhandle_detect_ext, - url_or_none ) @@ -54,6 +52,11 @@ class ArchiveOrgIE(InfoExtractor): 'upload_date': '20100315', 'creator': 'SRI International', 'uploader': 'laura@archive.org', + 'thumbnail': r're:https://archive\.org/download/.*\.jpg', + 'release_year': 1968, + 'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr', + 'track': 'XD300-23 68HighlightsAResearchCntAugHumanIntellect', + }, }, { 'url': 'https://archive.org/details/Cops1922', @@ -62,33 +65,43 @@ class ArchiveOrgIE(InfoExtractor): 'id': 'Cops1922', 'ext': 'mp4', 'title': 'Buster Keaton\'s "Cops" (1922)', - 'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c', + 'description': 'md5:cd6f9910c35aedd5fc237dbc3957e2ca', 'uploader': 'yorkmba99@hotmail.com', 'timestamp': 1387699629, 'upload_date': '20131222', + 'display_id': 'Cops-v2.mp4', + 'thumbnail': r're:https://archive\.org/download/.*\.jpg', + 'duration': 1091.96, }, }, { 'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect', 'only_matching': True, }, { 'url': 'https://archive.org/details/Election_Ads', - 'md5': '284180e857160cf866358700bab668a3', + 'md5': 'eec5cddebd4793c6a653b69c3b11f2e6', 'info_dict': { 'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg', 'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg', - 'ext': 'mp4', + 'ext': 'mpg', + 'thumbnail': r're:https://archive\.org/download/.*\.jpg', + 'duration': 59.77, + 'display_id': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg', }, }, { 'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg', - 'md5': '7915213ef02559b5501fe630e1a53f59', + 'md5': 'ea1eed8234e7d4165f38c8c769edef38', 'info_dict': { 'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg', 'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg', - 'ext': 'mp4', + 'ext': 'mpg', 'timestamp': 1205588045, 'uploader': 'mikedavisstripmaster@yahoo.com', 'description': '1960 Presidential Campaign Election Commercials John F Kennedy, Richard M Nixon', 'upload_date': '20080315', + 'display_id': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg', + 'duration': 59.51, + 'license': 'http://creativecommons.org/licenses/publicdomain/', + 'thumbnail': r're:https://archive\.org/download/.*\.jpg', }, }, { 'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16', @@ -97,6 +110,12 @@ class ArchiveOrgIE(InfoExtractor): 'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac', 'title': 'Turning', 'ext': 'flac', + 'track': 'Turning', + 'creator': 'Grateful Dead', + 'display_id': 'gd1977-05-08d01t01.flac', + 'track_number': 1, + 'album': '1977-05-08 - Barton Hall - Cornell University', + 'duration': 39.8, }, }, { 'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac', @@ -107,11 +126,20 @@ class ArchiveOrgIE(InfoExtractor): 'ext': 'flac', 'timestamp': 1205895624, 'uploader': 'mvernon54@yahoo.com', - 'description': 'md5:6a31f1996db0aa0fc9da6d6e708a1bb0', + 'description': 'md5:6c921464414814720c6593810a5c7e3d', 'upload_date': '20080319', 'location': 'Barton Hall - Cornell University', + 'duration': 438.68, + 'track': 'Deal', + 'creator': 'Grateful Dead', + 'album': '1977-05-08 - Barton Hall - Cornell University', + 'release_date': '19770508', + 'display_id': 'gd1977-05-08d01t07.flac', + 'release_year': 1977, + 'track_number': 7, }, }, { + # FIXME: give a better error message than just IndexError when all available formats are restricted 'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik', 'md5': '7cb019baa9b332e82ea7c10403acd180', 'info_dict': { @@ -119,6 +147,7 @@ class ArchiveOrgIE(InfoExtractor): 'title': 'Bells Of Rostov', 'ext': 'mp3', }, + 'skip': 'restricted' }, { 'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3', 'md5': '1d0aabe03edca83ca58d9ed3b493a3c3', @@ -131,6 +160,52 @@ class ArchiveOrgIE(InfoExtractor): 'description': 'md5:012b2d668ae753be36896f343d12a236', 'upload_date': '20190928', }, + 'skip': 'restricted' + }, { + # Original formats are private + 'url': 'https://archive.org/details/irelandthemakingofarepublic', + 'info_dict': { + 'id': 'irelandthemakingofarepublic', + 'title': 'Ireland: The Making of a Republic', + 'upload_date': '20160610', + 'description': 'md5:f70956a156645a658a0dc9513d9e78b7', + 'uploader': 'dimitrios@archive.org', + 'creator': ['British Broadcasting Corporation', 'Time-Life Films'], + 'timestamp': 1465594947, + }, + 'playlist': [ + { + 'md5': '0b211261b26590d49df968f71b90690d', + 'info_dict': { + 'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel1_01.mov', + 'ext': 'mp4', + 'title': 'irelandthemakingofarepublicreel1_01.mov', + 'duration': 130.46, + 'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel1_01_000117.jpg', + 'display_id': 'irelandthemakingofarepublicreel1_01.mov', + }, + }, { + 'md5': '67335ee3b23a0da930841981c1e79b02', + 'info_dict': { + 'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel1_02.mov', + 'ext': 'mp4', + 'duration': 1395.13, + 'title': 'irelandthemakingofarepublicreel1_02.mov', + 'display_id': 'irelandthemakingofarepublicreel1_02.mov', + 'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel1_02_001374.jpg', + }, + }, { + 'md5': 'e470e86787893603f4a341a16c281eb5', + 'info_dict': { + 'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel2.mov', + 'ext': 'mp4', + 'duration': 1602.67, + 'title': 'irelandthemakingofarepublicreel2.mov', + 'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel2_001554.jpg', + 'display_id': 'irelandthemakingofarepublicreel2.mov', + }, + } + ] }] @staticmethod @@ -146,7 +221,7 @@ class ArchiveOrgIE(InfoExtractor): return json.loads(extract_attributes(element)['value']) def _real_extract(self, url): - video_id = compat_urllib_parse_unquote_plus(self._match_id(url)) + video_id = urllib.parse.unquote_plus(self._match_id(url)) identifier, entry_id = (video_id.split('/', 1) + [None])[:2] # Archive.org metadata API doesn't clearly demarcate playlist entries @@ -221,17 +296,25 @@ class ArchiveOrgIE(InfoExtractor): 'filesize': int_or_none(f.get('size'))}) extension = (f['name'].rsplit('.', 1) + [None])[1] - if extension in KNOWN_EXTENSIONS: + + # We don't want to skip private formats if the user has access to them, + # however without access to an account with such privileges we can't implement/test this. + # For now to be safe, we will only skip them if there is no user logged in. + is_logged_in = bool(self._get_cookies('https://archive.org').get('logged-in-sig')) + if extension in KNOWN_EXTENSIONS and (not f.get('private') or is_logged_in): entry['formats'].append({ 'url': 'https://archive.org/download/' + identifier + '/' + f['name'], 'format': f.get('format'), 'width': int_or_none(f.get('width')), 'height': int_or_none(f.get('height')), 'filesize': int_or_none(f.get('size')), - 'protocol': 'https'}) + 'protocol': 'https', + 'source_preference': 0 if f.get('source') == 'original' else -1, + 'format_note': f.get('source') + }) for entry in entries.values(): - self._sort_formats(entry['formats']) + entry['_format_sort_fields'] = ('source', ) if len(entries) == 1: # If there's only one item, use it as the main info dict @@ -287,7 +370,9 @@ class YoutubeWebArchiveIE(InfoExtractor): 'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg', 'duration': 32, 'uploader_id': 'Zeurel', - 'uploader_url': 'http://www.youtube.com/user/Zeurel' + 'uploader_url': 'https://www.youtube.com/user/Zeurel', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'channel_url': 'https://www.youtube.com/channel/UCukCyHaD-bK3in_pKpfH9Eg', } }, { # Internal link @@ -302,7 +387,9 @@ class YoutubeWebArchiveIE(InfoExtractor): 'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA', 'duration': 771, 'uploader_id': '1veritasium', - 'uploader_url': 'http://www.youtube.com/user/1veritasium' + 'uploader_url': 'https://www.youtube.com/user/1veritasium', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'channel_url': 'https://www.youtube.com/channel/UCHnyfMqiRRG1u-2MsSQLbXA', } }, { # Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description. @@ -316,7 +403,9 @@ class YoutubeWebArchiveIE(InfoExtractor): 'duration': 398, 'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3', 'uploader_id': 'machinima', - 'uploader_url': 'http://www.youtube.com/user/machinima' + 'uploader_url': 'https://www.youtube.com/user/machinima', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'uploader': 'machinima' } }, { # FLV video. Video file URL does not provide itag information @@ -330,7 +419,10 @@ class YoutubeWebArchiveIE(InfoExtractor): 'duration': 19, 'description': 'md5:10436b12e07ac43ff8df65287a56efb4', 'uploader_id': 'jawed', - 'uploader_url': 'http://www.youtube.com/user/jawed' + 'uploader_url': 'https://www.youtube.com/user/jawed', + 'channel_url': 'https://www.youtube.com/channel/UC4QobU6STFB0P71PMvOGN5A', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'uploader': 'jawed', } }, { 'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA', @@ -344,7 +436,9 @@ class YoutubeWebArchiveIE(InfoExtractor): 'duration': 204, 'description': 'md5:f7535343b6eda34a314eff8b85444680', 'uploader_id': 'itsmadeon', - 'uploader_url': 'http://www.youtube.com/user/itsmadeon' + 'uploader_url': 'https://www.youtube.com/user/itsmadeon', + 'channel_url': 'https://www.youtube.com/channel/UCqMDNf3Pn5L7pcNkuSEeO3w', + 'thumbnail': r're:https?://.*\.(jpg|webp)', } }, { # First capture is of dead video, second is the oldest from CDX response. @@ -355,10 +449,13 @@ class YoutubeWebArchiveIE(InfoExtractor): 'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News', 'upload_date': '20160218', 'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA', - 'duration': 1236, + 'duration': 1235, 'description': 'md5:21032bae736421e89c2edf36d1936947', 'uploader_id': 'MachinimaETC', - 'uploader_url': 'http://www.youtube.com/user/MachinimaETC' + 'uploader_url': 'https://www.youtube.com/user/MachinimaETC', + 'channel_url': 'https://www.youtube.com/channel/UCdIaNUarhzLSXGoItz7BHVA', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'uploader': 'ETC News', } }, { # First capture of dead video, capture date in link links to dead capture. @@ -369,10 +466,13 @@ class YoutubeWebArchiveIE(InfoExtractor): 'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.', 'upload_date': '20160219', 'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA', - 'duration': 798, + 'duration': 797, 'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7', 'uploader_id': 'MachinimaETC', - 'uploader_url': 'http://www.youtube.com/user/MachinimaETC' + 'uploader_url': 'https://www.youtube.com/user/MachinimaETC', + 'channel_url': 'https://www.youtube.com/channel/UCdIaNUarhzLSXGoItz7BHVA', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'uploader': 'ETC News', }, 'expected_warnings': [ r'unable to download capture webpage \(it may not be archived\)' @@ -392,12 +492,11 @@ class YoutubeWebArchiveIE(InfoExtractor): 'title': 'It\'s Bootleg AirPods Time.', 'upload_date': '20211021', 'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug', - 'channel_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug', + 'channel_url': 'https://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug', 'duration': 810, 'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc', + 'thumbnail': r're:https?://.*\.(jpg|webp)', 'uploader': 'DankPods', - 'uploader_id': 'UC7Jwj9fkrf1adN4fMmTkpug', - 'uploader_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug' } }, { # player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093 @@ -408,12 +507,135 @@ class YoutubeWebArchiveIE(InfoExtractor): 'title': 'bitch lasagna', 'upload_date': '20181005', 'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw', - 'channel_url': 'http://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw', + 'channel_url': 'https://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw', 'duration': 135, 'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0', 'uploader': 'PewDiePie', 'uploader_id': 'PewDiePie', - 'uploader_url': 'http://www.youtube.com/user/PewDiePie' + 'uploader_url': 'https://www.youtube.com/user/PewDiePie', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + } + }, { + # ~June 2010 Capture. swfconfig + 'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=8XeW5ilk-9Y', + 'info_dict': { + 'id': '8XeW5ilk-9Y', + 'ext': 'flv', + 'title': 'Story of Stuff, The Critique Part 4 of 4', + 'duration': 541, + 'description': 'md5:28157da06f2c5e94c97f7f3072509972', + 'uploader': 'HowTheWorldWorks', + 'uploader_id': 'HowTheWorldWorks', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'uploader_url': 'https://www.youtube.com/user/HowTheWorldWorks', + 'upload_date': '20090520', + } + }, { + # Jan 2011: watch-video-date/eow-date surrounded by whitespace + 'url': 'https://web.archive.org/web/20110126141719/http://www.youtube.com/watch?v=Q_yjX80U7Yc', + 'info_dict': { + 'id': 'Q_yjX80U7Yc', + 'ext': 'flv', + 'title': 'Spray Paint Art by Clay Butler: Purple Fantasy Forest', + 'uploader_id': 'claybutlermusic', + 'description': 'md5:4595264559e3d0a0ceb3f011f6334543', + 'upload_date': '20090803', + 'uploader': 'claybutlermusic', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'duration': 132, + 'uploader_url': 'https://www.youtube.com/user/claybutlermusic', + } + }, { + # ~May 2009 swfArgs. ytcfg is spread out over various vars + 'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=c5uJgG05xUY', + 'info_dict': { + 'id': 'c5uJgG05xUY', + 'ext': 'webm', + 'title': 'Story of Stuff, The Critique Part 1 of 4', + 'uploader_id': 'HowTheWorldWorks', + 'uploader': 'HowTheWorldWorks', + 'uploader_url': 'https://www.youtube.com/user/HowTheWorldWorks', + 'upload_date': '20090513', + 'description': 'md5:4ca77d79538064e41e4cc464e93f44f0', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'duration': 754, + } + }, { + # ~June 2012. Upload date is in another lang so cannot extract. + 'url': 'https://web.archive.org/web/20120607174520/http://www.youtube.com/watch?v=xWTLLl-dQaA', + 'info_dict': { + 'id': 'xWTLLl-dQaA', + 'ext': 'mp4', + 'title': 'Black Nerd eHarmony Video Bio Parody (SPOOF)', + 'uploader_url': 'https://www.youtube.com/user/BlackNerdComedy', + 'description': 'md5:e25f0133aaf9e6793fb81c18021d193e', + 'uploader_id': 'BlackNerdComedy', + 'uploader': 'BlackNerdComedy', + 'duration': 182, + 'thumbnail': r're:https?://.*\.(jpg|webp)', + } + }, { + # ~July 2013 + 'url': 'https://web.archive.org/web/*/https://www.youtube.com/watch?v=9eO1aasHyTM', + 'info_dict': { + 'id': '9eO1aasHyTM', + 'ext': 'mp4', + 'title': 'Polar-oid', + 'description': 'Cameras and bears are dangerous!', + 'uploader_url': 'https://www.youtube.com/user/punkybird', + 'uploader_id': 'punkybird', + 'duration': 202, + 'channel_id': 'UC62R2cBezNBOqxSerfb1nMQ', + 'channel_url': 'https://www.youtube.com/channel/UC62R2cBezNBOqxSerfb1nMQ', + 'upload_date': '20060428', + 'uploader': 'punkybird', + } + }, { + # April 2020: Player response in player config + 'url': 'https://web.archive.org/web/20200416034815/https://www.youtube.com/watch?v=Cf7vS8jc7dY&gl=US&hl=en', + 'info_dict': { + 'id': 'Cf7vS8jc7dY', + 'ext': 'mp4', + 'title': 'A Dramatic Pool Story (by Jamie Spicer-Lewis) - Game Grumps Animated', + 'duration': 64, + 'upload_date': '20200408', + 'uploader_id': 'GameGrumps', + 'uploader': 'GameGrumps', + 'channel_url': 'https://www.youtube.com/channel/UC9CuvdOVfMPvKCiwdGKL3cQ', + 'channel_id': 'UC9CuvdOVfMPvKCiwdGKL3cQ', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'description': 'md5:c625bb3c02c4f5fb4205971e468fa341', + 'uploader_url': 'https://www.youtube.com/user/GameGrumps', + } + }, { + # watch7-user-header with yt-user-info + 'url': 'ytarchive:kbh4T_b4Ixw:20160307085057', + 'info_dict': { + 'id': 'kbh4T_b4Ixw', + 'ext': 'mp4', + 'title': 'Shovel Knight OST - Strike the Earth! Plains of Passage 16 bit SNES style remake / remix', + 'channel_url': 'https://www.youtube.com/channel/UCnTaGvsHmMy792DWeT6HbGA', + 'uploader': 'Nelward music', + 'duration': 213, + 'description': 'md5:804b4a9ce37b050a5fefdbb23aeba54d', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'upload_date': '20150503', + 'channel_id': 'UCnTaGvsHmMy792DWeT6HbGA', + } + }, { + # April 2012 + 'url': 'https://web.archive.org/web/0/https://www.youtube.com/watch?v=SOm7mPoPskU', + 'info_dict': { + 'id': 'SOm7mPoPskU', + 'ext': 'mp4', + 'title': 'Boyfriend - Justin Bieber Parody', + 'uploader_url': 'https://www.youtube.com/user/thecomputernerd01', + 'uploader': 'thecomputernerd01', + 'thumbnail': r're:https?://.*\.(jpg|webp)', + 'description': 'md5:dd7fa635519c2a5b4d566beaecad7491', + 'duration': 200, + 'upload_date': '20120407', + 'uploader_id': 'thecomputernerd01', } }, { 'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw', @@ -445,9 +667,11 @@ class YoutubeWebArchiveIE(InfoExtractor): 'only_matching': True }, ] - _YT_INITIAL_DATA_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE - _YT_INITIAL_PLAYER_RESPONSE_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*({.+?})[)\s]*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE - _YT_INITIAL_BOUNDARY_RE = r'(?:(?:var\s+meta|</script|\n)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_BOUNDARY_RE + _YT_INITIAL_DATA_RE = YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE + _YT_INITIAL_PLAYER_RESPONSE_RE = fr'''(?x: + (?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*| + {YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE} + )''' _YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers _YT_ALL_THUMB_SERVERS = orderedSet( @@ -457,7 +681,7 @@ class YoutubeWebArchiveIE(InfoExtractor): _OLDEST_CAPTURE_DATE = 20050214000000 _NEWEST_CAPTURE_DATE = 20500101000000 - def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note='Downloading CDX API JSON'): + def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note=None, fatal=False): # CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md query = { 'url': url, @@ -468,21 +692,17 @@ class YoutubeWebArchiveIE(InfoExtractor): 'collapse': collapse or [], **(query or {}) } - res = self._download_json('https://web.archive.org/cdx/search/cdx', item_id, note, query=query) + res = self._download_json( + 'https://web.archive.org/cdx/search/cdx', item_id, + note or 'Downloading CDX API JSON', query=query, fatal=fatal) if isinstance(res, list) and len(res) >= 2: # format response to make it easier to use return list(dict(zip(res[0], v)) for v in res[1:]) elif not isinstance(res, list) or len(res) != 0: self.report_warning('Error while parsing CDX API response' + bug_reports_message()) - def _extract_yt_initial_variable(self, webpage, regex, video_id, name): - return self._parse_json(self._search_regex( - (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE), - regex), webpage, name, default='{}'), video_id, fatal=False) - def _extract_webpage_title(self, webpage): - page_title = self._html_search_regex( - r'<title>([^<]*)', webpage, 'title', default='') + page_title = self._html_extract_title(webpage, default='') # YouTube video pages appear to always have either 'YouTube -' as prefix or '- YouTube' as suffix. return self._html_search_regex( r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)', @@ -490,10 +710,32 @@ class YoutubeWebArchiveIE(InfoExtractor): def _extract_metadata(self, video_id, webpage): search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None)) - player_response = self._extract_yt_initial_variable( - webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') or {} - initial_data = self._extract_yt_initial_variable( - webpage, self._YT_INITIAL_DATA_RE, video_id, 'initial player response') or {} + player_response = self._search_json( + self._YT_INITIAL_PLAYER_RESPONSE_RE, webpage, 'initial player response', + video_id, default={}) + initial_data = self._search_json( + self._YT_INITIAL_DATA_RE, webpage, 'initial data', video_id, default={}) + + ytcfg = {} + for j in re.findall(r'yt\.setConfig\(\s*(?P{\s*(?s:.+?)\s*})\s*\);', webpage): # ~June 2010 + ytcfg.update(self._parse_json(j, video_id, fatal=False, ignore_extra=True, transform_source=js_to_json, errnote='') or {}) + + # XXX: this also may contain a 'ptchn' key + player_config = ( + self._search_json( + r'(?:yt\.playerConfig|ytplayer\.config|swfConfig)\s*=', + webpage, 'player config', video_id, default=None) + or ytcfg.get('PLAYER_CONFIG') or {}) + + # XXX: this may also contain a 'creator' key. + swf_args = self._search_json(r'swfArgs\s*=', webpage, 'swf config', video_id, default={}) + if swf_args and not traverse_obj(player_config, ('args',)): + player_config['args'] = swf_args + + if not player_response: + # April 2020 + player_response = self._parse_json( + traverse_obj(player_config, ('args', 'player_response')) or '{}', video_id, fatal=False) initial_data_video = traverse_obj( initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'videoPrimaryInfoRenderer'), @@ -509,21 +751,64 @@ class YoutubeWebArchiveIE(InfoExtractor): video_details.get('title') or YoutubeBaseInfoExtractor._get_text(microformats, 'title') or YoutubeBaseInfoExtractor._get_text(initial_data_video, 'title') + or traverse_obj(player_config, ('args', 'title')) or self._extract_webpage_title(webpage) or search_meta(['og:title', 'twitter:title', 'title'])) + def id_from_url(url, type_): + return self._search_regex( + rf'(?:{type_})/([^/#&?]+)', url or '', f'{type_} id', default=None) + + # XXX: would the get_elements_by_... functions be better suited here? + _CHANNEL_URL_HREF_RE = r'href="[^"]*(?Phttps?://www\.youtube\.com/(?:user|channel)/[^"]+)"' + uploader_or_channel_url = self._search_regex( + [fr'<(?:link\s*itemprop=\"url\"|a\s*id=\"watch-username\").*?\b{_CHANNEL_URL_HREF_RE}>', # @fd05024 + fr']*>\s*]*\b{_CHANNEL_URL_HREF_RE}'], # ~ May 2009, ~June 2012 + webpage, 'uploader or channel url', default=None) + + owner_profile_url = url_or_none(microformats.get('ownerProfileUrl')) # @a6211d2 + + # Uploader refers to the /user/ id ONLY + uploader_id = ( + id_from_url(owner_profile_url, 'user') + or id_from_url(uploader_or_channel_url, 'user') + or ytcfg.get('VIDEO_USERNAME')) + uploader_url = f'https://www.youtube.com/user/{uploader_id}' if uploader_id else None + + # XXX: do we want to differentiate uploader and channel? + uploader = ( + self._search_regex( + [r']*>\s*([^<]+)', # June 2010 + r'var\s*watchUsername\s*=\s*\'(.+?)\';', # ~May 2009 + r']*>\s*]*>\s*(.+?)\s*]*title="\s*(.+?)\s*"'], # ~June 2012 + webpage, 'uploader', default=None) + or self._html_search_regex( + [r'(?s)]*[^>]*>\s*(.*?)\s*]*yt-user-name[^>]*>\s*(.*?)\s*(?:(?!\1).)+)\1', # @b45a9e6 - webpage, 'channel id', default=None, group='id')) - channel_url = f'http://www.youtube.com/channel/{channel_id}' if channel_id else None + webpage, 'channel id', default=None, group='id') + or id_from_url(owner_profile_url, 'channel') + or id_from_url(uploader_or_channel_url, 'channel') + or traverse_obj(player_config, ('args', 'ucid'))) + channel_url = f'https://www.youtube.com/channel/{channel_id}' if channel_id else None duration = int_or_none( video_details.get('lengthSeconds') or microformats.get('lengthSeconds') + or traverse_obj(player_config, ('args', ('length_seconds', 'l')), get_all=False) or parse_duration(search_meta('duration'))) description = ( video_details.get('shortDescription') @@ -531,26 +816,13 @@ class YoutubeWebArchiveIE(InfoExtractor): or clean_html(get_element_by_id('eow-description', webpage)) # @9e6dd23 or search_meta(['description', 'og:description', 'twitter:description'])) - uploader = video_details.get('author') - - # Uploader ID and URL - uploader_mobj = re.search( - r'', # @fd05024 - webpage) - if uploader_mobj is not None: - uploader_id, uploader_url = uploader_mobj.group('uploader_id'), uploader_mobj.group('uploader_url') - else: - # @a6211d2 - uploader_url = url_or_none(microformats.get('ownerProfileUrl')) - uploader_id = self._search_regex( - r'(?:user|channel)/([^/]+)', uploader_url or '', 'uploader id', default=None) - upload_date = unified_strdate( dict_get(microformats, ('uploadDate', 'publishDate')) or search_meta(['uploadDate', 'datePublished']) or self._search_regex( - [r'(?s)id="eow-date.*?>(.*?)', - r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], # @7998520 + [r'(?s)id="eow-date.*?>\s*(.*?)\s*', + r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']', # @7998520 + r'class\s*=\s*"(?:watch-video-date|watch-video-added post-date)"[^>]*>\s*([^<]+?)\s*<'], # ~June 2010, ~Jan 2009 (respectively) webpage, 'upload date', default=None)) return { @@ -596,7 +868,7 @@ class YoutubeWebArchiveIE(InfoExtractor): response = self._call_cdx_api( video_id, f'https://www.youtube.com/watch?v={video_id}', filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or [] - all_captures = sorted([int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None]) + all_captures = sorted(int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None) # Prefer the new polymer UI captures as we support extracting more metadata from them # WBM captures seem to all switch to this layout ~July 2020 @@ -619,18 +891,22 @@ class YoutubeWebArchiveIE(InfoExtractor): url_date = url_date or url_date_2 urlh = None - try: - urlh = self._request_webpage( - HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id), - video_id, note='Fetching archived video file url', expected_status=True) - except ExtractorError as e: - # HTTP Error 404 is expected if the video is not saved. - if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404: - self.raise_no_formats( - 'The requested video is not archived, indexed, or there is an issue with web.archive.org', - expected=True) - else: - raise + retry_manager = self.RetryManager(fatal=False) + for retry in retry_manager: + try: + urlh = self._request_webpage( + HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id), + video_id, note='Fetching archived video file url', expected_status=True) + except ExtractorError as e: + # HTTP Error 404 is expected if the video is not saved. + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404: + self.raise_no_formats( + 'The requested video is not archived, indexed, or there is an issue with web.archive.org (try again later)', expected=True) + else: + retry.error = e + + if retry_manager.error: + self.raise_no_formats(retry_manager.error, expected=True, video_id=video_id) capture_dates = self._get_capture_dates(video_id, int_or_none(url_date)) self.write_debug('Captures to try: ' + join_nonempty(*capture_dates, delim=', ')) @@ -671,3 +947,237 @@ class YoutubeWebArchiveIE(InfoExtractor): if not info.get('title'): info['title'] = video_id return info + + +class VLiveWebArchiveIE(InfoExtractor): + IE_NAME = 'web.archive:vlive' + IE_DESC = 'web.archive.org saved vlive videos' + _VALID_URL = r'''(?x) + (?:https?://)?web\.archive\.org/ + (?:web/)?(?:(?P[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional + (?:https?(?::|%3[Aa])//)?(?: + (?:(?:www|m)\.)?vlive\.tv(?::(?:80|443))?/(?:video|embed)/(?P[0-9]+) # VLive URL + ) + ''' + _TESTS = [{ + 'url': 'https://web.archive.org/web/20221221144331/http://www.vlive.tv/video/1326', + 'md5': 'cc7314812855ce56de70a06a27314983', + 'info_dict': { + 'id': '1326', + 'ext': 'mp4', + 'title': "Girl's Day's Broadcast", + 'creator': "Girl's Day", + 'view_count': int, + 'uploader_id': 'muploader_a', + 'uploader_url': None, + 'uploader': None, + 'upload_date': '20150817', + 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', + 'timestamp': 1439816449, + 'like_count': int, + 'channel': 'Girl\'s Day', + 'channel_id': 'FDF27', + 'comment_count': int, + 'release_timestamp': 1439818140, + 'release_date': '20150817', + 'duration': 1014, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'https://web.archive.org/web/20221221182103/http://www.vlive.tv/video/16937', + 'info_dict': { + 'id': '16937', + 'ext': 'mp4', + 'title': '첸백시 걍방', + 'creator': 'EXO', + 'view_count': int, + 'subtitles': 'mincount:12', + 'uploader_id': 'muploader_j', + 'uploader_url': 'http://vlive.tv', + 'uploader': None, + 'upload_date': '20161112', + 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', + 'timestamp': 1478923074, + 'like_count': int, + 'channel': 'EXO', + 'channel_id': 'F94BD', + 'comment_count': int, + 'release_timestamp': 1478924280, + 'release_date': '20161112', + 'duration': 906, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'https://web.archive.org/web/20221127190050/http://www.vlive.tv/video/101870', + 'info_dict': { + 'id': '101870', + 'ext': 'mp4', + 'title': '[ⓓ xV] “레벨이들 매력에 반해? 안 반해?” 움직이는 HD 포토 (레드벨벳:Red Velvet)', + 'creator': 'Dispatch', + 'view_count': int, + 'subtitles': 'mincount:6', + 'uploader_id': 'V__FRA08071', + 'uploader_url': 'http://vlive.tv', + 'uploader': None, + 'upload_date': '20181130', + 'thumbnail': r're:^https?://.*\.(?:jpg|png)$', + 'timestamp': 1543601327, + 'like_count': int, + 'channel': 'Dispatch', + 'channel_id': 'C796F3', + 'comment_count': int, + 'release_timestamp': 1543601040, + 'release_date': '20181130', + 'duration': 279, + }, + 'params': { + 'skip_download': True, + }, + }] + + # The wayback machine has special timestamp and "mode" values: + # timestamp: + # 1 = the first capture + # 2 = the last capture + # mode: + # id_ = Identity - perform no alterations of the original resource, return it as it was archived. + _WAYBACK_BASE_URL = 'https://web.archive.org/web/2id_/' + + def _download_archived_page(self, url, video_id, *, timestamp='2', **kwargs): + for retry in self.RetryManager(): + try: + return self._download_webpage(f'https://web.archive.org/web/{timestamp}id_/{url}', video_id, **kwargs) + except ExtractorError as e: + if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 404: + raise ExtractorError('Page was not archived', expected=True) + retry.error = e + continue + + def _download_archived_json(self, url, video_id, **kwargs): + page = self._download_archived_page(url, video_id, **kwargs) + if not page: + raise ExtractorError('Page was not archived', expected=True) + else: + return self._parse_json(page, video_id) + + def _extract_formats_from_m3u8(self, m3u8_url, params, video_id): + m3u8_doc = self._download_archived_page(m3u8_url, video_id, note='Downloading m3u8', query=params, fatal=False) + if not m3u8_doc: + return + + # M3U8 document should be changed to archive domain + m3u8_doc = m3u8_doc.splitlines() + url_base = m3u8_url.rsplit('/', 1)[0] + first_segment = None + for i, line in enumerate(m3u8_doc): + if not line.startswith('#'): + m3u8_doc[i] = f'{self._WAYBACK_BASE_URL}{url_base}/{line}?{urllib.parse.urlencode(params)}' + first_segment = first_segment or m3u8_doc[i] + + # Segments may not have been archived. See https://web.archive.org/web/20221127190050/http://www.vlive.tv/video/101870 + urlh = self._request_webpage(HEADRequest(first_segment), video_id, errnote=False, + fatal=False, note='Check first segment availablity') + if urlh: + formats, subtitles = self._parse_m3u8_formats_and_subtitles('\n'.join(m3u8_doc), ext='mp4', video_id=video_id) + if subtitles: + self._report_ignoring_subs('m3u8') + return formats + + # Closely follows the logic of the ArchiveTeam grab script + # See: https://github.com/ArchiveTeam/vlive-grab/blob/master/vlive.lua + def _real_extract(self, url): + video_id, url_date = self._match_valid_url(url).group('id', 'date') + + webpage = self._download_archived_page(f'https://www.vlive.tv/video/{video_id}', video_id, timestamp=url_date) + + player_info = self._search_json(r'__PRELOADED_STATE__\s*=', webpage, 'player info', video_id) + user_country = traverse_obj(player_info, ('common', 'userCountry')) + + main_script_url = self._search_regex(r' 1: + self.report_warning('Multiple streams found. Only the first stream will be downloaded.') + stream = streams[0] + + max_stream = max( + stream.get('videos') or [], + key=lambda v: traverse_obj(v, ('bitrate', 'video'), default=0), default=None) + if max_stream is not None: + params = {arg.get('name'): arg.get('value') for arg in stream.get('keys', []) if arg.get('type') == 'param'} + formats = self._extract_formats_from_m3u8(max_stream.get('source'), params, video_id) or [] + + # For parts of the project MP4 files were archived + max_video = max( + traverse_obj(vod_data, ('videos', 'list', ...)), + key=lambda v: traverse_obj(v, ('bitrate', 'video'), default=0), default=None) + if max_video is not None: + video_url = self._WAYBACK_BASE_URL + max_video.get('source') + urlh = self._request_webpage(HEADRequest(video_url), video_id, errnote=False, + fatal=False, note='Check video availablity') + if urlh: + formats.append({'url': video_url}) + + return { + 'id': video_id, + 'formats': formats, + **traverse_obj(player_info, ('postDetail', 'post', { + 'title': ('officialVideo', 'title', {str}), + 'creator': ('author', 'nickname', {str}), + 'channel': ('channel', 'channelName', {str}), + 'channel_id': ('channel', 'channelCode', {str}), + 'duration': ('officialVideo', 'playTime', {int_or_none}), + 'view_count': ('officialVideo', 'playCount', {int_or_none}), + 'like_count': ('officialVideo', 'likeCount', {int_or_none}), + 'comment_count': ('officialVideo', 'commentCount', {int_or_none}), + 'timestamp': ('officialVideo', 'createdAt', {lambda x: int_or_none(x, scale=1000)}), + 'release_timestamp': ('officialVideo', 'willStartAt', {lambda x: int_or_none(x, scale=1000)}), + })), + **traverse_obj(vod_data, ('meta', { + 'uploader_id': ('user', 'id', {str}), + 'uploader': ('user', 'name', {str}), + 'uploader_url': ('user', 'url', {url_or_none}), + 'thumbnail': ('cover', 'source', {url_or_none}), + }), expected_type=lambda x: x or None), + **NaverBaseIE.process_subtitles(vod_data, lambda x: [self._WAYBACK_BASE_URL + x]), + } diff --git a/plugins/youtube_download/yt_dlp/extractor/arcpublishing.py b/plugins/youtube_download/yt_dlp/extractor/arcpublishing.py index 1943fd5..febd3d2 100644 --- a/plugins/youtube_download/yt_dlp/extractor/arcpublishing.py +++ b/plugins/youtube_download/yt_dlp/extractor/arcpublishing.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor @@ -73,8 +70,8 @@ class ArcPublishingIE(InfoExtractor): ], 'video-api-cdn.%s.arcpublishing.com/api'), ] - @staticmethod - def _extract_urls(webpage): + @classmethod + def _extract_embed_urls(cls, url, webpage): entries = [] # https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview for powa_el in re.findall(r'(]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage): @@ -124,8 +121,7 @@ class ArcPublishingIE(InfoExtractor): formats.extend(smil_formats) elif stream_type in ('ts', 'hls'): m3u8_formats = self._extract_m3u8_formats( - s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native', - m3u8_id='hls', fatal=False) + s_url, uuid, 'mp4', live=is_live, m3u8_id='hls', fatal=False) if all([f.get('acodec') == 'none' for f in m3u8_formats]): continue for f in m3u8_formats: @@ -148,7 +144,6 @@ class ArcPublishingIE(InfoExtractor): 'url': s_url, 'quality': -10, }) - self._sort_formats(formats) subtitles = {} for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []): diff --git a/plugins/youtube_download/yt_dlp/extractor/ard.py b/plugins/youtube_download/yt_dlp/extractor/ard.py index 4ad5d6d..8660741 100644 --- a/plugins/youtube_download/yt_dlp/extractor/ard.py +++ b/plugins/youtube_download/yt_dlp/extractor/ard.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import json import re @@ -43,14 +40,15 @@ class ARDMediathekBaseIE(InfoExtractor): 'This video is not available due to geoblocking', countries=self._GEO_COUNTRIES, metadata_available=True) - self._sort_formats(formats) - subtitles = {} subtitle_url = media_info.get('_subtitleUrl') if subtitle_url: subtitles['de'] = [{ 'ext': 'ttml', 'url': subtitle_url, + }, { + 'ext': 'vtt', + 'url': subtitle_url.replace('/ebutt/', '/webvtt/') + '.vtt', }] return { @@ -265,7 +263,6 @@ class ARDMediathekIE(ARDMediathekBaseIE): 'format_id': fid, 'url': furl, }) - self._sort_formats(formats) info = { 'formats': formats, } @@ -292,16 +289,16 @@ class ARDMediathekIE(ARDMediathekBaseIE): class ARDIE(InfoExtractor): _VALID_URL = r'(?Phttps?://(?:www\.)?daserste\.de/(?:[^/?#&]+/)+(?P[^/?#&]+))\.html' _TESTS = [{ - # available till 7.01.2022 - 'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-die-woche-video100.html', - 'md5': '867d8aa39eeaf6d76407c5ad1bb0d4c1', + # available till 7.12.2023 + 'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-video-424.html', + 'md5': 'a438f671e87a7eba04000336a119ccc4', 'info_dict': { - 'id': 'maischberger-die-woche-video100', - 'display_id': 'maischberger-die-woche-video100', + 'id': 'maischberger-video-424', + 'display_id': 'maischberger-video-424', 'ext': 'mp4', - 'duration': 3687.0, - 'title': 'maischberger. die woche vom 7. Januar 2021', - 'upload_date': '20210107', + 'duration': 4452.0, + 'title': 'maischberger am 07.12.2022', + 'upload_date': '20221207', 'thumbnail': r're:^https?://.*\.jpg$', }, }, { @@ -374,7 +371,6 @@ class ARDIE(InfoExtractor): continue f['url'] = format_url formats.append(f) - self._sort_formats(formats) _SUB_FORMATS = ( ('./dataTimedText', 'ttml'), @@ -407,8 +403,9 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE): (?:(?:beta|www)\.)?ardmediathek\.de/ (?:(?P[^/]+)/)? (?:player|live|video|(?Psendung|sammlung))/ - (?:(?P[^?#]+)/)? - (?P(?(playlist)|Y3JpZDovL)[a-zA-Z0-9]+)''' + (?:(?P(?(playlist)[^?#]+?|[^?#]+))/)? + (?P(?(playlist)|Y3JpZDovL)[a-zA-Z0-9]+) + (?(playlist)/(?P\d+)?/?(?:[?#]|$))''' _TESTS = [{ 'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/', @@ -436,6 +433,13 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE): 'description': 'md5:39578c7b96c9fe50afdf5674ad985e6b', 'upload_date': '20211108', }, + }, { + 'url': 'https://www.ardmediathek.de/sendung/beforeigners/beforeigners/staffel-1/Y3JpZDovL2Rhc2Vyc3RlLmRlL2JlZm9yZWlnbmVycw/1', + 'playlist_count': 6, + 'info_dict': { + 'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL2JlZm9yZWlnbmVycw', + 'title': 'beforeigners/beforeigners/staffel-1', + }, }, { 'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE', 'only_matching': True, @@ -561,14 +565,15 @@ class ARDBetaMediathekIE(ARDMediathekBaseIE): break pageNumber = pageNumber + 1 - return self.playlist_result(entries, playlist_title=display_id) + return self.playlist_result(entries, playlist_id, playlist_title=display_id) def _real_extract(self, url): - video_id, display_id, playlist_type, client = self._match_valid_url(url).group( - 'id', 'display_id', 'playlist', 'client') + video_id, display_id, playlist_type, client, season_number = self._match_valid_url(url).group( + 'id', 'display_id', 'playlist', 'client', 'season') display_id, client = display_id or video_id, client or 'ard' if playlist_type: + # TODO: Extract only specified season return self._ARD_extract_playlist(url, video_id, display_id, client, playlist_type) player_page = self._download_json( diff --git a/plugins/youtube_download/yt_dlp/extractor/arkena.py b/plugins/youtube_download/yt_dlp/extractor/arkena.py index 4f4f457..de36ec8 100644 --- a/plugins/youtube_download/yt_dlp/extractor/arkena.py +++ b/plugins/youtube_download/yt_dlp/extractor/arkena.py @@ -1,8 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re - from .common import InfoExtractor from ..utils import ( ExtractorError, @@ -22,6 +17,8 @@ class ArkenaIE(InfoExtractor): play\.arkena\.com/(?:config|embed)/avp/v\d/player/media/(?P[^/]+)/[^/]+/(?P\d+) ) ''' + # See https://support.arkena.com/display/PLAY/Ways+to+embed+your+video + _EMBED_REGEX = [r']+src=(["\'])(?P(?:https?:)?//play\.arkena\.com/embed/avp/.+?)\1'] _TESTS = [{ 'url': 'https://video.qbrick.com/play2/embed/player?accountId=1034090&mediaId=d8ab4607-00090107-aab86310', 'md5': '97f117754e5f3c020f5f26da4a44ebaf', @@ -53,15 +50,6 @@ class ArkenaIE(InfoExtractor): 'only_matching': True, }] - @staticmethod - def _extract_url(webpage): - # See https://support.arkena.com/display/PLAY/Ways+to+embed+your+video - mobj = re.search( - r']+src=(["\'])(?P(?:https?:)?//play\.arkena\.com/embed/avp/.+?)\1', - webpage) - if mobj: - return mobj.group('url') - def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') @@ -148,7 +136,6 @@ class ArkenaIE(InfoExtractor): elif mime_type == 'application/vnd.ms-sstr+xml': formats.extend(self._extract_ism_formats( href, video_id, ism_id='mss', fatal=False)) - self._sort_formats(formats) return { 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/arnes.py b/plugins/youtube_download/yt_dlp/extractor/arnes.py index 050c252..a493714 100644 --- a/plugins/youtube_download/yt_dlp/extractor/arnes.py +++ b/plugins/youtube_download/yt_dlp/extractor/arnes.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..compat import ( compat_parse_qs, @@ -76,7 +73,6 @@ class ArnesIE(InfoExtractor): 'width': int_or_none(media.get('width')), 'height': int_or_none(media.get('height')), }) - self._sort_formats(formats) channel = video.get('channel') or {} channel_id = channel.get('url') @@ -93,7 +89,7 @@ class ArnesIE(InfoExtractor): 'timestamp': parse_iso8601(video.get('creationTime')), 'channel': channel.get('name'), 'channel_id': channel_id, - 'channel_url': format_field(channel_id, template=f'{self._BASE_URL}/?channel=%s'), + 'channel_url': format_field(channel_id, None, f'{self._BASE_URL}/?channel=%s'), 'duration': float_or_none(video.get('duration'), 1000), 'view_count': int_or_none(video.get('views')), 'tags': video.get('hashtags'), diff --git a/plugins/youtube_download/yt_dlp/extractor/arte.py b/plugins/youtube_download/yt_dlp/extractor/arte.py index 296b169..e3cc5af 100644 --- a/plugins/youtube_download/yt_dlp/extractor/arte.py +++ b/plugins/youtube_download/yt_dlp/extractor/arte.py @@ -1,188 +1,232 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor -from ..compat import ( - compat_str, -) from ..utils import ( ExtractorError, + GeoRestrictedError, int_or_none, + parse_iso8601, parse_qs, - qualities, - try_get, - unified_strdate, + strip_or_none, + traverse_obj, url_or_none, ) class ArteTVBaseIE(InfoExtractor): _ARTE_LANGUAGES = 'fr|de|en|es|it|pl' - _API_BASE = 'https://api.arte.tv/api/player/v1' + _API_BASE = 'https://api.arte.tv/api/player/v2' class ArteTVIE(ArteTVBaseIE): _VALID_URL = r'''(?x) - https?:// + (?:https?:// (?: (?:www\.)?arte\.tv/(?P%(langs)s)/videos| api\.arte\.tv/api/player/v\d+/config/(?P%(langs)s) ) - /(?P\d{6}-\d{3}-[AF]) + |arte://program) + /(?P\d{6}-\d{3}-[AF]|LIVE) ''' % {'langs': ArteTVBaseIE._ARTE_LANGUAGES} _TESTS = [{ 'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/', - 'info_dict': { - 'id': '088501-000-A', - 'ext': 'mp4', - 'title': 'Mexico: Stealing Petrol to Survive', - 'upload_date': '20190628', - }, + 'only_matching': True, }, { 'url': 'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/', - 'only_matching': True, + 'info_dict': { + 'id': '100103-000-A', + 'title': 'USA: Dyskryminacja na porodówce', + 'description': 'md5:242017b7cce59ffae340a54baefcafb1', + 'alt_title': 'ARTE Reportage', + 'upload_date': '20201103', + 'duration': 554, + 'thumbnail': r're:https://api-cdn\.arte\.tv/.+940x530', + 'timestamp': 1604417980, + 'ext': 'mp4', + }, + 'params': {'skip_download': 'm3u8'} + }, { + 'note': 'No alt_title', + 'url': 'https://www.arte.tv/fr/videos/110371-000-A/la-chaleur-supplice-des-arbres-de-rue/', + 'info_dict': { + 'id': '110371-000-A', + 'ext': 'mp4', + 'upload_date': '20220718', + 'duration': 154, + 'timestamp': 1658162460, + 'description': 'md5:5890f36fe7dccfadb8b7c0891de54786', + 'title': 'La chaleur, supplice des arbres de rue', + 'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/CPE2sQDtD8GLQgt8DuYHLf/940x530', + }, + 'params': {'skip_download': 'm3u8'} }, { 'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A', 'only_matching': True, + }, { + 'url': 'https://api.arte.tv/api/player/v2/config/de/LIVE', + 'only_matching': True, + }, { + 'url': 'https://www.arte.tv/de/videos/110203-006-A/zaz/', + 'info_dict': { + 'id': '110203-006-A', + 'chapters': 'count:16', + 'description': 'md5:cf592f1df52fe52007e3f8eac813c084', + 'alt_title': 'Zaz', + 'title': 'Baloise Session 2022', + 'timestamp': 1668445200, + 'duration': 4054, + 'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/ubQjmVCGyRx3hmBuZEK9QZ/940x530', + 'upload_date': '20221114', + 'ext': 'mp4', + }, + 'expected_warnings': ['geo restricted'] }] + _GEO_BYPASS = True + + _LANG_MAP = { # ISO639 -> French abbreviations + 'fr': 'F', + 'de': 'A', + 'en': 'E[ANG]', + 'es': 'E[ESP]', + 'it': 'E[ITA]', + 'pl': 'E[POL]', + # XXX: probably means mixed; + # uses this code for audio that happens to be in Ukrainian, but the manifest uses the ISO code 'mul' (mixed) + 'mul': 'EU', + } + + _VERSION_CODE_RE = re.compile(r'''(?x) + V + (?PO?) + (?P[FA]|E\[[A-Z]+\]|EU)? + (?PAUD|) + (?: + (?P-ST) + (?PM?) + (?P[FA]|E\[[A-Z]+\]|EU) + )? + ''') + + # all obtained by exhaustive testing + _COUNTRIES_MAP = { + 'DE_FR': ( + 'BL', 'DE', 'FR', 'GF', 'GP', 'MF', 'MQ', 'NC', + 'PF', 'PM', 'RE', 'WF', 'YT', + ), + # with both of the below 'BE' sometimes works, sometimes doesn't + 'EUR_DE_FR': ( + 'AT', 'BL', 'CH', 'DE', 'FR', 'GF', 'GP', 'LI', + 'MC', 'MF', 'MQ', 'NC', 'PF', 'PM', 'RE', 'WF', + 'YT', + ), + 'SAT': ( + 'AD', 'AT', 'AX', 'BG', 'BL', 'CH', 'CY', 'CZ', + 'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GF', + 'GR', 'HR', 'HU', 'IE', 'IS', 'IT', 'KN', 'LI', + 'LT', 'LU', 'LV', 'MC', 'MF', 'MQ', 'MT', 'NC', + 'NL', 'NO', 'PF', 'PL', 'PM', 'PT', 'RE', 'RO', + 'SE', 'SI', 'SK', 'SM', 'VA', 'WF', 'YT', + ), + } + def _real_extract(self, url): mobj = self._match_valid_url(url) video_id = mobj.group('id') lang = mobj.group('lang') or mobj.group('lang_2') + langauge_code = self._LANG_MAP.get(lang) - info = self._download_json( - '%s/config/%s/%s' % (self._API_BASE, lang, video_id), video_id) - player_info = info['videoJsonPlayer'] + config = self._download_json(f'{self._API_BASE}/config/{lang}/{video_id}', video_id) - vsr = try_get(player_info, lambda x: x['VSR'], dict) - if not vsr: - error = None - if try_get(player_info, lambda x: x['custom_msg']['type']) == 'error': - error = try_get( - player_info, lambda x: x['custom_msg']['msg'], compat_str) - if not error: - error = 'Video %s is not available' % player_info.get('VID') or video_id - raise ExtractorError(error, expected=True) + geoblocking = traverse_obj(config, ('data', 'attributes', 'restriction', 'geoblocking')) or {} + if geoblocking.get('restrictedArea'): + raise GeoRestrictedError(f'Video restricted to {geoblocking["code"]!r}', + countries=self._COUNTRIES_MAP.get(geoblocking['code'], ('DE', 'FR'))) - upload_date_str = player_info.get('shootingDate') - if not upload_date_str: - upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0] + if not traverse_obj(config, ('data', 'attributes', 'rights')): + # Eg: https://www.arte.tv/de/videos/097407-215-A/28-minuten + # Eg: https://www.arte.tv/es/videos/104351-002-A/serviteur-du-peuple-1-23 + raise ExtractorError( + 'Video is not available in this language edition of Arte or broadcast rights expired', expected=True) - title = (player_info.get('VTI') or player_info['VID']).strip() - subtitle = player_info.get('VSU', '').strip() - if subtitle: - title += ' - %s' % subtitle + formats, subtitles = [], {} + secondary_formats = [] + for stream in config['data']['attributes']['streams']: + # official player contains code like `e.get("versions")[0].eStat.ml5` + stream_version = stream['versions'][0] + stream_version_code = stream_version['eStat']['ml5'] - qfunc = qualities(['MQ', 'HQ', 'EQ', 'SQ']) + lang_pref = -1 + m = self._VERSION_CODE_RE.match(stream_version_code) + if m: + lang_pref = int(''.join('01'[x] for x in ( + m.group('vlang') == langauge_code, # we prefer voice in the requested language + not m.group('audio_desc'), # and not the audio description version + bool(m.group('original_voice')), # but if voice is not in the requested language, at least choose the original voice + m.group('sub_lang') == langauge_code, # if subtitles are present, we prefer them in the requested language + not m.group('has_sub'), # but we prefer no subtitles otherwise + not m.group('sdh_sub'), # and we prefer not the hard-of-hearing subtitles if there are subtitles + ))) - LANGS = { - 'fr': 'F', - 'de': 'A', - 'en': 'E[ANG]', - 'es': 'E[ESP]', - 'it': 'E[ITA]', - 'pl': 'E[POL]', - } + short_label = traverse_obj(stream_version, 'shortLabel', expected_type=str, default='?') + if stream['protocol'].startswith('HLS'): + fmts, subs = self._extract_m3u8_formats_and_subtitles( + stream['url'], video_id=video_id, ext='mp4', m3u8_id=stream_version_code, fatal=False) + for fmt in fmts: + fmt.update({ + 'format_note': f'{stream_version.get("label", "unknown")} [{short_label}]', + 'language_preference': lang_pref, + }) + if any(map(short_label.startswith, ('cc', 'OGsub'))): + secondary_formats.extend(fmts) + else: + formats.extend(fmts) + self._merge_subtitles(subs, target=subtitles) - langcode = LANGS.get(lang, lang) + elif stream['protocol'] in ('HTTPS', 'RTMP'): + formats.append({ + 'format_id': f'{stream["protocol"]}-{stream_version_code}', + 'url': stream['url'], + 'format_note': f'{stream_version.get("label", "unknown")} [{short_label}]', + 'language_preference': lang_pref, + # 'ext': 'mp4', # XXX: may or may not be necessary, at least for HTTPS + }) - formats = [] - for format_id, format_dict in vsr.items(): - f = dict(format_dict) - format_url = url_or_none(f.get('url')) - streamer = f.get('streamer') - if not format_url and not streamer: - continue - versionCode = f.get('versionCode') - l = re.escape(langcode) - - # Language preference from most to least priority - # Reference: section 6.8 of - # https://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-07-1.pdf - PREFERENCES = ( - # original version in requested language, without subtitles - r'VO{0}$'.format(l), - # original version in requested language, with partial subtitles in requested language - r'VO{0}-ST{0}$'.format(l), - # original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language - r'VO{0}-STM{0}$'.format(l), - # non-original (dubbed) version in requested language, without subtitles - r'V{0}$'.format(l), - # non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language - r'V{0}-ST{0}$'.format(l), - # non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language - r'V{0}-STM{0}$'.format(l), - # original version in requested language, with partial subtitles in different language - r'VO{0}-ST(?!{0}).+?$'.format(l), - # original version in requested language, with subtitles for the deaf and hard-of-hearing in different language - r'VO{0}-STM(?!{0}).+?$'.format(l), - # original version in different language, with partial subtitles in requested language - r'VO(?:(?!{0}).+?)?-ST{0}$'.format(l), - # original version in different language, with subtitles for the deaf and hard-of-hearing in requested language - r'VO(?:(?!{0}).+?)?-STM{0}$'.format(l), - # original version in different language, without subtitles - r'VO(?:(?!{0}))?$'.format(l), - # original version in different language, with partial subtitles in different language - r'VO(?:(?!{0}).+?)?-ST(?!{0}).+?$'.format(l), - # original version in different language, with subtitles for the deaf and hard-of-hearing in different language - r'VO(?:(?!{0}).+?)?-STM(?!{0}).+?$'.format(l), - ) - - for pref, p in enumerate(PREFERENCES): - if re.match(p, versionCode): - lang_pref = len(PREFERENCES) - pref - break else: - lang_pref = -1 + self.report_warning(f'Skipping stream with unknown protocol {stream["protocol"]}') - media_type = f.get('mediaType') - if media_type == 'hls': - m3u8_formats = self._extract_m3u8_formats( - format_url, video_id, 'mp4', entry_protocol='m3u8_native', - m3u8_id=format_id, fatal=False) - for m3u8_format in m3u8_formats: - m3u8_format['language_preference'] = lang_pref - formats.extend(m3u8_formats) - continue + formats.extend(secondary_formats) + self._remove_duplicate_formats(formats) - format = { - 'format_id': format_id, - 'language_preference': lang_pref, - 'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')), - 'width': int_or_none(f.get('width')), - 'height': int_or_none(f.get('height')), - 'tbr': int_or_none(f.get('bitrate')), - 'quality': qfunc(f.get('quality')), - } - - if media_type == 'rtmp': - format['url'] = f['streamer'] - format['play_path'] = 'mp4:' + f['url'] - format['ext'] = 'flv' - else: - format['url'] = f['url'] - - formats.append(format) - - # For this extractor, quality only represents the relative quality - # with respect to other formats with the same resolution - self._sort_formats(formats, ('res', 'quality')) + metadata = config['data']['attributes']['metadata'] return { - 'id': player_info.get('VID') or video_id, - 'title': title, - 'description': player_info.get('VDE') or player_info.get('V7T'), - 'upload_date': unified_strdate(upload_date_str), - 'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'), + 'id': metadata['providerId'], + 'webpage_url': traverse_obj(metadata, ('link', 'url')), + 'title': traverse_obj(metadata, 'subtitle', 'title'), + 'alt_title': metadata.get('subtitle') and metadata.get('title'), + 'description': metadata.get('description'), + 'duration': traverse_obj(metadata, ('duration', 'seconds')), + 'language': metadata.get('language'), + 'timestamp': traverse_obj(config, ('data', 'attributes', 'rights', 'begin'), expected_type=parse_iso8601), + 'is_live': config['data']['attributes'].get('live', False), 'formats': formats, + 'subtitles': subtitles, + 'thumbnails': [ + {'url': image['url'], 'id': image.get('caption')} + for image in metadata.get('images') or [] if url_or_none(image.get('url')) + ], + # TODO: chapters may also be in stream['segments']? + 'chapters': traverse_obj(config, ('data', 'attributes', 'chapters', 'elements', ..., { + 'start_time': 'startTime', + 'title': 'title', + })) or None, } class ArteTVEmbedIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+' + _EMBED_REGEX = [r'<(?:iframe|script)[^>]+src=(["\'])(?P(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1'] _TESTS = [{ 'url': 'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A', 'info_dict': { @@ -192,17 +236,12 @@ class ArteTVEmbedIE(InfoExtractor): 'description': 'md5:be40b667f45189632b78c1425c7c2ce1', 'upload_date': '20201116', }, + 'skip': 'No video available' }, { 'url': 'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A', 'only_matching': True, }] - @staticmethod - def _extract_urls(webpage): - return [url for _, url in re.findall( - r'<(?:iframe|script)[^>]+src=(["\'])(?P(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1', - webpage)] - def _real_extract(self, url): qs = parse_qs(url) json_url = qs['json_url'][0] @@ -215,41 +254,71 @@ class ArteTVPlaylistIE(ArteTVBaseIE): _VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P%s)/videos/(?PRC-\d{6})' % ArteTVBaseIE._ARTE_LANGUAGES _TESTS = [{ 'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/', - 'info_dict': { - 'id': 'RC-016954', - 'title': 'Earn a Living', - 'description': 'md5:d322c55011514b3a7241f7fb80d494c2', - }, - 'playlist_mincount': 6, + 'only_matching': True, }, { 'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/', - 'only_matching': True, + 'playlist_mincount': 100, + 'info_dict': { + 'description': 'md5:84e7bf1feda248bc325ebfac818c476e', + 'id': 'RC-014123', + 'title': 'ARTE Reportage - najlepsze reportaże', + }, }] + def _real_extract(self, url): + lang, playlist_id = self._match_valid_url(url).group('lang', 'id') + playlist = self._download_json( + f'{self._API_BASE}/playlist/{lang}/{playlist_id}', playlist_id)['data']['attributes'] + + entries = [{ + '_type': 'url_transparent', + 'url': video['config']['url'], + 'ie_key': ArteTVIE.ie_key(), + 'id': video.get('providerId'), + 'title': video.get('title'), + 'alt_title': video.get('subtitle'), + 'thumbnail': url_or_none(traverse_obj(video, ('mainImage', 'url'))), + 'duration': int_or_none(traverse_obj(video, ('duration', 'seconds'))), + } for video in traverse_obj(playlist, ('items', lambda _, v: v['config']['url']))] + + return self.playlist_result(entries, playlist_id, + traverse_obj(playlist, ('metadata', 'title')), + traverse_obj(playlist, ('metadata', 'description'))) + + +class ArteTVCategoryIE(ArteTVBaseIE): + _VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P%s)/videos/(?P[\w-]+(?:/[\w-]+)*)/?\s*$' % ArteTVBaseIE._ARTE_LANGUAGES + _TESTS = [{ + 'url': 'https://www.arte.tv/en/videos/politics-and-society/', + 'info_dict': { + 'id': 'politics-and-society', + 'title': 'Politics and society', + 'description': 'Investigative documentary series, geopolitical analysis, and international commentary', + }, + 'playlist_mincount': 13, + }] + + @classmethod + def suitable(cls, url): + return ( + not any(ie.suitable(url) for ie in (ArteTVIE, ArteTVPlaylistIE, )) + and super().suitable(url)) + def _real_extract(self, url): lang, playlist_id = self._match_valid_url(url).groups() - collection = self._download_json( - '%s/collectionData/%s/%s?source=videos' - % (self._API_BASE, lang, playlist_id), playlist_id) - entries = [] - for video in collection['videos']: - if not isinstance(video, dict): + webpage = self._download_webpage(url, playlist_id) + + items = [] + for video in re.finditer( + r']*?href\s*=\s*(?P"|\'|\b)(?Phttps?://www\.arte\.tv/%s/videos/[\w/-]+)(?P=q)' % lang, + webpage): + video = video.group('url') + if video == url: continue - video_url = url_or_none(video.get('url')) or url_or_none(video.get('jsonUrl')) - if not video_url: - continue - video_id = video.get('programId') - entries.append({ - '_type': 'url_transparent', - 'url': video_url, - 'id': video_id, - 'title': video.get('title'), - 'alt_title': video.get('subtitle'), - 'thumbnail': url_or_none(try_get(video, lambda x: x['mainImage']['url'], compat_str)), - 'duration': int_or_none(video.get('durationSeconds')), - 'view_count': int_or_none(video.get('views')), - 'ie_key': ArteTVIE.ie_key(), - }) - title = collection.get('title') - description = collection.get('shortDescription') or collection.get('teaserText') - return self.playlist_result(entries, playlist_id, title, description) + if any(ie.suitable(video) for ie in (ArteTVIE, ArteTVPlaylistIE, )): + items.append(video) + + title = strip_or_none(self._generic_title('', webpage, default='').rsplit('|', 1)[0]) or None + + return self.playlist_from_matches(items, playlist_id=playlist_id, playlist_title=title, + description=self._og_search_description(webpage, default=None)) diff --git a/plugins/youtube_download/yt_dlp/extractor/asiancrush.py b/plugins/youtube_download/yt_dlp/extractor/asiancrush.py index 75a6329..23f310e 100644 --- a/plugins/youtube_download/yt_dlp/extractor/asiancrush.py +++ b/plugins/youtube_download/yt_dlp/extractor/asiancrush.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import functools import re @@ -181,8 +178,7 @@ class AsianCrushPlaylistIE(AsianCrushBaseIE): 'title', default=None) or self._og_search_title( webpage, default=None) or self._html_search_meta( 'twitter:title', webpage, 'title', - default=None) or self._search_regex( - r'([^<]+)', webpage, 'title', fatal=False) + default=None) or self._html_extract_title(webpage) if title: title = re.sub(r'\s*\|\s*.+?$', '', title) diff --git a/plugins/youtube_download/yt_dlp/extractor/atresplayer.py b/plugins/youtube_download/yt_dlp/extractor/atresplayer.py index 6d84396..a20e7f9 100644 --- a/plugins/youtube_download/yt_dlp/extractor/atresplayer.py +++ b/plugins/youtube_download/yt_dlp/extractor/atresplayer.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .common import InfoExtractor from ..compat import compat_HTTPError from ..utils import ( @@ -37,9 +33,6 @@ class AtresPlayerIE(InfoExtractor): ] _API_BASE = 'https://api.atresplayer.com/' - def _real_initialize(self): - self._login() - def _handle_error(self, e, code): if isinstance(e.cause, compat_HTTPError) and e.cause.code == code: error = self._parse_json(e.cause.read(), None) @@ -48,11 +41,7 @@ class AtresPlayerIE(InfoExtractor): raise ExtractorError(error['error_description'], expected=True) raise - def _login(self): - username, password = self._get_login_info() - if username is None: - return - + def _perform_login(self, username, password): self._request_webpage( self._API_BASE + 'login', None, 'Downloading login page') @@ -95,7 +84,6 @@ class AtresPlayerIE(InfoExtractor): elif src_type == 'application/dash+xml': formats, subtitles = self._extract_mpd_formats( src, video_id, mpd_id='dash', fatal=False) - self._sort_formats(formats) heartbeat = episode.get('heartbeat') or {} omniture = episode.get('omniture') or {} diff --git a/plugins/youtube_download/yt_dlp/extractor/atscaleconf.py b/plugins/youtube_download/yt_dlp/extractor/atscaleconf.py new file mode 100644 index 0000000..3f7b1e9 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/atscaleconf.py @@ -0,0 +1,34 @@ +import re + +from .common import InfoExtractor + + +class AtScaleConfEventIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?atscaleconference\.com/events/(?P[^/&$?]+)' + + _TESTS = [{ + 'url': 'https://atscaleconference.com/events/data-scale-spring-2022/', + 'playlist_mincount': 13, + 'info_dict': { + 'id': 'data-scale-spring-2022', + 'title': 'Data @Scale Spring 2022', + 'description': 'md5:7d7ca1c42ac9c6d8a785092a1aea4b55' + }, + }, { + 'url': 'https://atscaleconference.com/events/video-scale-2021/', + 'playlist_mincount': 14, + 'info_dict': { + 'id': 'video-scale-2021', + 'title': 'Video @Scale 2021', + 'description': 'md5:7d7ca1c42ac9c6d8a785092a1aea4b55' + }, + }] + + def _real_extract(self, url): + id = self._match_id(url) + webpage = self._download_webpage(url, id) + + return self.playlist_from_matches( + re.findall(r'data-url\s*=\s*"(https?://(?:www\.)?atscaleconference\.com/videos/[^"]+)"', webpage), + ie='Generic', playlist_id=id, + title=self._og_search_title(webpage), description=self._og_search_description(webpage)) diff --git a/plugins/youtube_download/yt_dlp/extractor/atttechchannel.py b/plugins/youtube_download/yt_dlp/extractor/atttechchannel.py index 8f93fb3..6ff4ec0 100644 --- a/plugins/youtube_download/yt_dlp/extractor/atttechchannel.py +++ b/plugins/youtube_download/yt_dlp/extractor/atttechchannel.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import unified_strdate diff --git a/plugins/youtube_download/yt_dlp/extractor/atvat.py b/plugins/youtube_download/yt_dlp/extractor/atvat.py index 7c30cfc..d6ed9e4 100644 --- a/plugins/youtube_download/yt_dlp/extractor/atvat.py +++ b/plugins/youtube_download/yt_dlp/extractor/atvat.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import datetime from .common import InfoExtractor @@ -8,6 +5,7 @@ from ..utils import ( float_or_none, jwt_encode_hs256, try_get, + ExtractorError, ) @@ -51,7 +49,6 @@ class ATVAtIE(InfoExtractor): 'url': source_url, 'format_id': protocol, }) - self._sort_formats(formats) return { 'id': clip_id, @@ -94,6 +91,11 @@ class ATVAtIE(InfoExtractor): }) video_id, videos_data = list(videos['data'].items())[0] + error_msg = try_get(videos_data, lambda x: x['error']['title']) + if error_msg == 'Geo check failed': + self.raise_geo_restricted(error_msg) + elif error_msg: + raise ExtractorError(error_msg) entries = [ self._extract_video_info(url, contentResource[video['id']], video) for video in videos_data] diff --git a/plugins/youtube_download/yt_dlp/extractor/audimedia.py b/plugins/youtube_download/yt_dlp/extractor/audimedia.py index 6bd48ef..35114e5 100644 --- a/plugins/youtube_download/yt_dlp/extractor/audimedia.py +++ b/plugins/youtube_download/yt_dlp/extractor/audimedia.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import ( int_or_none, @@ -79,7 +76,6 @@ class AudiMediaIE(InfoExtractor): 'format_id': 'http-%s' % bitrate, }) formats.append(f) - self._sort_formats(formats) return { 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/audioboom.py b/plugins/youtube_download/yt_dlp/extractor/audioboom.py index c51837b..a23fcd2 100644 --- a/plugins/youtube_download/yt_dlp/extractor/audioboom.py +++ b/plugins/youtube_download/yt_dlp/extractor/audioboom.py @@ -1,27 +1,33 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor -from ..utils import ( - clean_html, - float_or_none, -) +from ..utils import clean_html, float_or_none, traverse_obj, unescapeHTML class AudioBoomIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?audioboom\.com/(?:boos|posts)/(?P[0-9]+)' _TESTS = [{ 'url': 'https://audioboom.com/posts/7398103-asim-chaudhry', - 'md5': '7b00192e593ff227e6a315486979a42d', + 'md5': '4d68be11c9f9daf3dab0778ad1e010c3', 'info_dict': { 'id': '7398103', 'ext': 'mp3', 'title': 'Asim Chaudhry', - 'description': 'md5:2f3fef17dacc2595b5362e1d7d3602fc', + 'description': 'md5:0ed714ae0e81e5d9119cac2f618ad679', 'duration': 4000.99, 'uploader': 'Sue Perkins: An hour or so with...', 'uploader_url': r're:https?://(?:www\.)?audioboom\.com/channel/perkins', } + }, { # Direct mp3-file link + 'url': 'https://audioboom.com/posts/8128496.mp3', + 'md5': 'e329edf304d450def95c7f86a9165ee1', + 'info_dict': { + 'id': '8128496', + 'ext': 'mp3', + 'title': 'TCRNo8 / DAILY 03 - In Control', + 'description': 'md5:44665f142db74858dfa21c5b34787948', + 'duration': 1689.7, + 'uploader': 'Lost Dot Podcast: The Trans Pyrenees and Transcontinental Race', + 'uploader_url': r're:https?://(?:www\.)?audioboom\.com/channels/5003904', + } }, { 'url': 'https://audioboom.com/posts/4279833-3-09-2016-czaban-hour-3?t=0', 'only_matching': True, @@ -29,45 +35,23 @@ class AudioBoomIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) + webpage = self._download_webpage(f'https://audioboom.com/posts/{video_id}', video_id) - webpage = self._download_webpage(url, video_id) - - clip = None - - clip_store = self._parse_json( - self._html_search_regex( - r'data-new-clip-store=(["\'])(?P{.+?})\1', - webpage, 'clip store', default='{}', group='json'), - video_id, fatal=False) - if clip_store: - clips = clip_store.get('clips') - if clips and isinstance(clips, list) and isinstance(clips[0], dict): - clip = clips[0] - - def from_clip(field): - if clip: - return clip.get(field) - - audio_url = from_clip('clipURLPriorToLoading') or self._og_search_property( - 'audio', webpage, 'audio url') - title = from_clip('title') or self._html_search_meta( - ['og:title', 'og:audio:title', 'audio_title'], webpage) - description = from_clip('description') or clean_html(from_clip('formattedDescription')) or self._og_search_description(webpage) - - duration = float_or_none(from_clip('duration') or self._html_search_meta( - 'weibo:audio:duration', webpage)) - - uploader = from_clip('author') or self._html_search_meta( - ['og:audio:artist', 'twitter:audio:artist_name', 'audio_artist'], webpage, 'uploader') - uploader_url = from_clip('author_url') or self._html_search_meta( - 'audioboo:channel', webpage, 'uploader url') + clip_store = self._search_json( + r'data-react-class="V5DetailPagePlayer"\s*data-react-props=["\']', + webpage, 'clip store', video_id, fatal=False, transform_source=unescapeHTML) + clip = traverse_obj(clip_store, ('clips', 0), expected_type=dict) or {} return { 'id': video_id, - 'url': audio_url, - 'title': title, - 'description': description, - 'duration': duration, - 'uploader': uploader, - 'uploader_url': uploader_url, + 'url': clip.get('clipURLPriorToLoading') or self._og_search_property('audio', webpage, 'audio url'), + 'title': clip.get('title') or self._html_search_meta(['og:title', 'og:audio:title', 'audio_title'], webpage), + 'description': (clip.get('description') or clean_html(clip.get('formattedDescription')) + or self._og_search_description(webpage)), + 'duration': float_or_none(clip.get('duration') or self._html_search_meta('weibo:audio:duration', webpage)), + 'uploader': clip.get('author') or self._html_search_meta( + ['og:audio:artist', 'twitter:audio:artist_name', 'audio_artist'], webpage, 'uploader'), + 'uploader_url': clip.get('author_url') or self._html_search_regex( + r'
\s*\d+)' + + _TESTS = [{ + 'url': 'http://nokiatune.audiodraft.com/entry/5874', + 'info_dict': { + 'id': '9485', + 'ext': 'mp3', + 'title': 'Hula Hula Calls', + 'uploader': 'unclemaki', + 'uploader_id': '13512', + 'average_rating': 5, + 'like_count': int, + }, + }, { + 'url': 'http://vikinggrace.audiodraft.com/entry/501', + 'info_dict': { + 'id': '22241', + 'ext': 'mp3', + 'title': 'MVG Happy', + 'uploader': 'frog', + 'uploader_id': '19142', + 'average_rating': 5, + 'like_count': int, + }, + }, { + 'url': 'http://timferriss.audiodraft.com/entry/765', + 'info_dict': { + 'id': '19710', + 'ext': 'mp3', + 'title': 'ferris03', + 'uploader': 'malex', + 'uploader_id': '17335', + 'average_rating': 5, + 'like_count': int, + }, + }] + + def _real_extract(self, url): + id = self._match_id(url) + webpage = self._download_webpage(url, id) + player_entry_id = self._search_regex(r'playAudio\(\'(player_entry_\d+)\'\);', webpage, id, 'play entry id') + return self._audiodraft_extract_from_id(player_entry_id) + + +class AudiodraftGenericIE(AudiodraftBaseIE): + IE_NAME = 'Audiodraft:generic' + _VALID_URL = r'https?://www\.audiodraft\.com/contests/[^/#]+#entries&eid=(?P\d+)' + + _TESTS = [{ + 'url': 'https://www.audiodraft.com/contests/570-Score-A-Video-Surprise-Us#entries&eid=30138', + 'info_dict': { + 'id': '30138', + 'ext': 'mp3', + 'title': 'DROP in sound_V2', + 'uploader': 'TiagoSilva', + 'uploader_id': '19452', + 'average_rating': 4, + 'like_count': int, + }, + }] + + def _real_extract(self, url): + id = self._match_id(url) + return self._audiodraft_extract_from_id(f'player_entry_{id}') diff --git a/plugins/youtube_download/yt_dlp/extractor/audiomack.py b/plugins/youtube_download/yt_dlp/extractor/audiomack.py index 31fb859..5c4160f 100644 --- a/plugins/youtube_download/yt_dlp/extractor/audiomack.py +++ b/plugins/youtube_download/yt_dlp/extractor/audiomack.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import itertools import time @@ -29,6 +26,7 @@ class AudiomackIE(InfoExtractor): } }, # audiomack wrapper around soundcloud song + # Needs new test URL. { 'add_ie': ['Soundcloud'], 'url': 'http://www.audiomack.com/song/hip-hop-daily/black-mamba-freestyle', diff --git a/plugins/youtube_download/yt_dlp/extractor/audius.py b/plugins/youtube_download/yt_dlp/extractor/audius.py index fa64995..6448b44 100644 --- a/plugins/youtube_download/yt_dlp/extractor/audius.py +++ b/plugins/youtube_download/yt_dlp/extractor/audius.py @@ -1,11 +1,8 @@ -# coding: utf-8 -from __future__ import unicode_literals - import random from .common import InfoExtractor -from ..utils import ExtractorError, try_get, compat_str, str_or_none -from ..compat import compat_urllib_parse_unquote +from ..compat import compat_str, compat_urllib_parse_unquote +from ..utils import ExtractorError, str_or_none, try_get class AudiusBaseIE(InfoExtractor): @@ -171,7 +168,7 @@ class AudiusIE(AudiusBaseIE): } -class AudiusTrackIE(AudiusIE): +class AudiusTrackIE(AudiusIE): # XXX: Do not subclass from concrete IE _VALID_URL = r'''(?x)(?:audius:)(?:https?://(?:www\.)?.+/v1/tracks/)?(?P\w+)''' IE_NAME = 'audius:track' IE_DESC = 'Audius track ID or API link. Prepend with "audius:"' @@ -246,7 +243,7 @@ class AudiusPlaylistIE(AudiusBaseIE): playlist_data.get('description')) -class AudiusProfileIE(AudiusPlaylistIE): +class AudiusProfileIE(AudiusPlaylistIE): # XXX: Do not subclass from concrete IE IE_NAME = 'audius:artist' IE_DESC = 'Audius.co profile/artist pages' _VALID_URL = r'https?://(?:www)?audius\.co/(?P[^\/]+)/?(?:[?#]|$)' diff --git a/plugins/youtube_download/yt_dlp/extractor/awaan.py b/plugins/youtube_download/yt_dlp/extractor/awaan.py index f5e559c..6fc938d 100644 --- a/plugins/youtube_download/yt_dlp/extractor/awaan.py +++ b/plugins/youtube_download/yt_dlp/extractor/awaan.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import base64 from .common import InfoExtractor @@ -44,7 +41,7 @@ class AWAANBaseIE(InfoExtractor): 'id': video_id, 'title': title, 'description': video_data.get('description_en') or video_data.get('description_ar'), - 'thumbnail': format_field(img, template='http://admin.mangomolo.com/analytics/%s'), + 'thumbnail': format_field(img, None, 'http://admin.mangomolo.com/analytics/%s'), 'duration': int_or_none(video_data.get('duration')), 'timestamp': parse_iso8601(video_data.get('create_time'), ' '), 'is_live': is_live, diff --git a/plugins/youtube_download/yt_dlp/extractor/aws.py b/plugins/youtube_download/yt_dlp/extractor/aws.py index dccfeaf..eb831a1 100644 --- a/plugins/youtube_download/yt_dlp/extractor/aws.py +++ b/plugins/youtube_download/yt_dlp/extractor/aws.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import datetime import hashlib import hmac @@ -9,7 +6,7 @@ from .common import InfoExtractor from ..compat import compat_urllib_parse_urlencode -class AWSIE(InfoExtractor): +class AWSIE(InfoExtractor): # XXX: Conventionally, base classes should end with BaseIE/InfoExtractor _AWS_ALGORITHM = 'AWS4-HMAC-SHA256' _AWS_REGION = 'us-east-1' diff --git a/plugins/youtube_download/yt_dlp/extractor/azmedien.py b/plugins/youtube_download/yt_dlp/extractor/azmedien.py index fee640e..d1686ee 100644 --- a/plugins/youtube_download/yt_dlp/extractor/azmedien.py +++ b/plugins/youtube_download/yt_dlp/extractor/azmedien.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import json from .common import InfoExtractor @@ -11,11 +8,12 @@ class AZMedienIE(InfoExtractor): IE_DESC = 'AZ Medien videos' _VALID_URL = r'''(?x) https?:// - (?:www\.)? + (?:www\.|tv\.)? (?P telezueri\.ch| telebaern\.tv| - telem1\.ch + telem1\.ch| + tvo-online\.ch )/ [^/]+/ (?P @@ -30,7 +28,7 @@ class AZMedienIE(InfoExtractor): ''' _TESTS = [{ - 'url': 'https://www.telezueri.ch/sonntalk/bundesrats-vakanzen-eu-rahmenabkommen-133214569', + 'url': 'https://tv.telezueri.ch/sonntalk/bundesrats-vakanzen-eu-rahmenabkommen-133214569', 'info_dict': { 'id': '1_anruz3wy', 'ext': 'mp4', @@ -38,6 +36,9 @@ class AZMedienIE(InfoExtractor): 'uploader_id': 'TVOnline', 'upload_date': '20180930', 'timestamp': 1538328802, + 'view_count': int, + 'thumbnail': 'http://cfvod.kaltura.com/p/1719221/sp/171922100/thumbnail/entry_id/1_anruz3wy/version/100031', + 'duration': 1930 }, 'params': { 'skip_download': True, diff --git a/plugins/youtube_download/yt_dlp/extractor/baidu.py b/plugins/youtube_download/yt_dlp/extractor/baidu.py index 364fd94..8786d67 100644 --- a/plugins/youtube_download/yt_dlp/extractor/baidu.py +++ b/plugins/youtube_download/yt_dlp/extractor/baidu.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .common import InfoExtractor from ..utils import unescapeHTML diff --git a/plugins/youtube_download/yt_dlp/extractor/banbye.py b/plugins/youtube_download/yt_dlp/extractor/banbye.py new file mode 100644 index 0000000..c873425 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/banbye.py @@ -0,0 +1,148 @@ +import math + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse_urlparse, + compat_parse_qs, +) +from ..utils import ( + format_field, + InAdvancePagedList, + traverse_obj, + unified_timestamp, +) + + +class BanByeBaseIE(InfoExtractor): + _API_BASE = 'https://api.banbye.com' + _CDN_BASE = 'https://cdn.banbye.com' + _VIDEO_BASE = 'https://banbye.com/watch' + + @staticmethod + def _extract_playlist_id(url, param='playlist'): + return compat_parse_qs( + compat_urllib_parse_urlparse(url).query).get(param, [None])[0] + + def _extract_playlist(self, playlist_id): + data = self._download_json(f'{self._API_BASE}/playlists/{playlist_id}', playlist_id) + return self.playlist_result([ + self.url_result(f'{self._VIDEO_BASE}/{video_id}', BanByeIE) + for video_id in data['videoIds']], playlist_id, data.get('name')) + + +class BanByeIE(BanByeBaseIE): + _VALID_URL = r'https?://(?:www\.)?banbye.com/(?:en/)?watch/(?P\w+)' + _TESTS = [{ + 'url': 'https://banbye.com/watch/v_ytfmvkVYLE8T', + 'md5': '2f4ea15c5ca259a73d909b2cfd558eb5', + 'info_dict': { + 'id': 'v_ytfmvkVYLE8T', + 'ext': 'mp4', + 'title': 'md5:5ec098f88a0d796f987648de6322ba0f', + 'description': 'md5:4d94836e73396bc18ef1fa0f43e5a63a', + 'uploader': 'wRealu24', + 'channel_id': 'ch_wrealu24', + 'channel_url': 'https://banbye.com/channel/ch_wrealu24', + 'timestamp': 1647604800, + 'upload_date': '20220318', + 'duration': 1931, + 'thumbnail': r're:https?://.*\.webp', + 'tags': 'count:5', + 'like_count': int, + 'dislike_count': int, + 'view_count': int, + 'comment_count': int, + }, + }, { + 'url': 'https://banbye.com/watch/v_2JjQtqjKUE_F?playlistId=p_Ld82N6gBw_OJ', + 'info_dict': { + 'title': 'Krzysztof Karoń', + 'id': 'p_Ld82N6gBw_OJ', + }, + 'playlist_count': 9, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + playlist_id = self._extract_playlist_id(url, 'playlistId') + + if self._yes_playlist(playlist_id, video_id): + return self._extract_playlist(playlist_id) + + data = self._download_json(f'{self._API_BASE}/videos/{video_id}', video_id) + thumbnails = [{ + 'id': f'{quality}p', + 'url': f'{self._CDN_BASE}/video/{video_id}/{quality}.webp', + } for quality in [48, 96, 144, 240, 512, 1080]] + formats = [{ + 'format_id': f'http-{quality}p', + 'quality': quality, + 'url': f'{self._CDN_BASE}/video/{video_id}/{quality}.mp4', + } for quality in data['quality']] + + return { + 'id': video_id, + 'title': data.get('title'), + 'description': data.get('desc'), + 'uploader': traverse_obj(data, ('channel', 'name')), + 'channel_id': data.get('channelId'), + 'channel_url': format_field(data, 'channelId', 'https://banbye.com/channel/%s'), + 'timestamp': unified_timestamp(data.get('publishedAt')), + 'duration': data.get('duration'), + 'tags': data.get('tags'), + 'formats': formats, + 'thumbnails': thumbnails, + 'like_count': data.get('likes'), + 'dislike_count': data.get('dislikes'), + 'view_count': data.get('views'), + 'comment_count': data.get('commentCount'), + } + + +class BanByeChannelIE(BanByeBaseIE): + _VALID_URL = r'https?://(?:www\.)?banbye.com/(?:en/)?channel/(?P\w+)' + _TESTS = [{ + 'url': 'https://banbye.com/channel/ch_wrealu24', + 'info_dict': { + 'title': 'wRealu24', + 'id': 'ch_wrealu24', + 'description': 'md5:da54e48416b74dfdde20a04867c0c2f6', + }, + 'playlist_mincount': 791, + }, { + 'url': 'https://banbye.com/channel/ch_wrealu24?playlist=p_Ld82N6gBw_OJ', + 'info_dict': { + 'title': 'Krzysztof Karoń', + 'id': 'p_Ld82N6gBw_OJ', + }, + 'playlist_count': 9, + }] + _PAGE_SIZE = 100 + + def _real_extract(self, url): + channel_id = self._match_id(url) + playlist_id = self._extract_playlist_id(url) + + if playlist_id: + return self._extract_playlist(playlist_id) + + def page_func(page_num): + data = self._download_json(f'{self._API_BASE}/videos', channel_id, query={ + 'channelId': channel_id, + 'sort': 'new', + 'limit': self._PAGE_SIZE, + 'offset': page_num * self._PAGE_SIZE, + }, note=f'Downloading page {page_num+1}') + return [ + self.url_result(f"{self._VIDEO_BASE}/{video['_id']}", BanByeIE) + for video in data['items'] + ] + + channel_data = self._download_json(f'{self._API_BASE}/channels/{channel_id}', channel_id) + entries = InAdvancePagedList( + page_func, + math.ceil(channel_data['videoCount'] / self._PAGE_SIZE), + self._PAGE_SIZE) + + return self.playlist_result( + entries, channel_id, channel_data.get('name'), channel_data.get('description')) diff --git a/plugins/youtube_download/yt_dlp/extractor/bandaichannel.py b/plugins/youtube_download/yt_dlp/extractor/bandaichannel.py index f1bcdef..d7fcf44 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bandaichannel.py +++ b/plugins/youtube_download/yt_dlp/extractor/bandaichannel.py @@ -1,11 +1,8 @@ -# coding: utf-8 -from __future__ import unicode_literals - -from .brightcove import BrightcoveNewIE +from .brightcove import BrightcoveNewBaseIE from ..utils import extract_attributes -class BandaiChannelIE(BrightcoveNewIE): +class BandaiChannelIE(BrightcoveNewBaseIE): IE_NAME = 'bandaichannel' _VALID_URL = r'https?://(?:www\.)?b-ch\.com/titles/(?P\d+/\d+)' _TESTS = [{ diff --git a/plugins/youtube_download/yt_dlp/extractor/bandcamp.py b/plugins/youtube_download/yt_dlp/extractor/bandcamp.py index b664145..e89b3a6 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bandcamp.py +++ b/plugins/youtube_download/yt_dlp/extractor/bandcamp.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import random import re import time @@ -8,34 +5,42 @@ import time from .common import InfoExtractor from ..compat import compat_str from ..utils import ( + KNOWN_EXTENSIONS, ExtractorError, float_or_none, int_or_none, - KNOWN_EXTENSIONS, parse_filesize, str_or_none, try_get, - update_url_query, unified_strdate, unified_timestamp, + update_url_query, url_or_none, urljoin, ) class BandcampIE(InfoExtractor): - _VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P[^/?#&]+)' + _VALID_URL = r'https?://(?P[^/]+)\.bandcamp\.com/track/(?P[^/?#&]+)' + _EMBED_REGEX = [r']*?content="(?P.*?bandcamp\.com.*?)"'] _TESTS = [{ 'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song', 'md5': 'c557841d5e50261777a6585648adf439', 'info_dict': { 'id': '1812978515', 'ext': 'mp3', - 'title': "youtube-dl \"'/\\ä↭ - youtube-dl \"'/\\ä↭ - youtube-dl test song \"'/\\ä↭", + 'title': 'youtube-dl "\'/\\ä↭ - youtube-dl "\'/\\ä↭ - youtube-dl test song "\'/\\ä↭', 'duration': 9.8485, - 'uploader': 'youtube-dl "\'/\\ä↭', + 'uploader': 'youtube-dl "\'/\\ä↭', 'upload_date': '20121129', 'timestamp': 1354224127, + 'track': 'youtube-dl "\'/\\ä↭ - youtube-dl test song "\'/\\ä↭', + 'album_artist': 'youtube-dl "\'/\\ä↭', + 'track_id': '1812978515', + 'artist': 'youtube-dl "\'/\\ä↭', + 'uploader_url': 'https://youtube-dl.bandcamp.com', + 'uploader_id': 'youtube-dl', + 'thumbnail': 'https://f4.bcbits.com/img/a3216802731_5.jpg', }, '_skip': 'There is a limit of 200 free downloads / month for the test song' }, { @@ -43,7 +48,8 @@ class BandcampIE(InfoExtractor): 'url': 'http://benprunty.bandcamp.com/track/lanius-battle', 'info_dict': { 'id': '2650410135', - 'ext': 'aiff', + 'ext': 'm4a', + 'acodec': r're:[fa]lac', 'title': 'Ben Prunty - Lanius (Battle)', 'thumbnail': r're:^https?://.*\.jpg$', 'uploader': 'Ben Prunty', @@ -56,7 +62,10 @@ class BandcampIE(InfoExtractor): 'track_number': 1, 'track_id': '2650410135', 'artist': 'Ben Prunty', + 'album_artist': 'Ben Prunty', 'album': 'FTL: Advanced Edition Soundtrack', + 'uploader_url': 'https://benprunty.bandcamp.com', + 'uploader_id': 'benprunty', }, }, { # no free download, mp3 128 @@ -77,7 +86,34 @@ class BandcampIE(InfoExtractor): 'track_number': 5, 'track_id': '2584466013', 'artist': 'Mastodon', + 'album_artist': 'Mastodon', 'album': 'Call of the Mastodon', + 'uploader_url': 'https://relapsealumni.bandcamp.com', + 'uploader_id': 'relapsealumni', + }, + }, { + # track from compilation album (artist/album_artist difference) + 'url': 'https://diskotopia.bandcamp.com/track/safehouse', + 'md5': '19c5337bca1428afa54129f86a2f6a69', + 'info_dict': { + 'id': '1978174799', + 'ext': 'mp3', + 'title': 'submerse - submerse - Safehouse', + 'thumbnail': r're:^https?://.*\.jpg$', + 'uploader': 'submerse', + 'timestamp': 1480779297, + 'upload_date': '20161203', + 'release_timestamp': 1481068800, + 'release_date': '20161207', + 'duration': 154.066, + 'track': 'submerse - Safehouse', + 'track_number': 3, + 'track_id': '1978174799', + 'artist': 'submerse', + 'album_artist': 'Diskotopia', + 'album': 'DSK F/W 2016-2017 Free Compilation', + 'uploader_url': 'https://diskotopia.bandcamp.com', + 'uploader_id': 'diskotopia', }, }] @@ -87,7 +123,7 @@ class BandcampIE(InfoExtractor): attr + ' data', group=2), video_id, fatal=fatal) def _real_extract(self, url): - title = self._match_id(url) + title, uploader = self._match_valid_url(url).group('id', 'uploader') webpage = self._download_webpage(url, title) tralbum = self._extract_data_attr(webpage, title) thumbnail = self._og_search_thumbnail(webpage) @@ -123,6 +159,9 @@ class BandcampIE(InfoExtractor): embed = self._extract_data_attr(webpage, title, 'embed', False) current = tralbum.get('current') or {} artist = embed.get('artist') or current.get('artist') or tralbum.get('artist') + album_artist = self._html_search_regex( + r'

[\S\s]*?by\s*\s*\s*([^>]+?)\s*', + webpage, 'album artist', fatal=False) timestamp = unified_timestamp( current.get('publish_date') or tralbum.get('album_publish_date')) @@ -183,10 +222,9 @@ class BandcampIE(InfoExtractor): 'format_note': f.get('description'), 'filesize': parse_filesize(f.get('size_mb')), 'vcodec': 'none', + 'acodec': format_id.split('-')[0], }) - self._sort_formats(formats) - title = '%s - %s' % (artist, track) if artist else track if not duration: @@ -198,6 +236,8 @@ class BandcampIE(InfoExtractor): 'title': title, 'thumbnail': thumbnail, 'uploader': artist, + 'uploader_id': uploader, + 'uploader_url': f'https://{uploader}.bandcamp.com', 'timestamp': timestamp, 'release_timestamp': unified_timestamp(tralbum.get('album_release_date')), 'duration': duration, @@ -206,13 +246,14 @@ class BandcampIE(InfoExtractor): 'track_id': track_id, 'artist': artist, 'album': embed.get('album_title'), + 'album_artist': album_artist, 'formats': formats, } -class BandcampAlbumIE(BandcampIE): +class BandcampAlbumIE(BandcampIE): # XXX: Do not subclass from concrete IE IE_NAME = 'Bandcamp:album' - _VALID_URL = r'https?://(?:(?P[^.]+)\.)?bandcamp\.com(?!/music)(?:/album/(?P[^/?#&]+))?' + _VALID_URL = r'https?://(?:(?P[^.]+)\.)?bandcamp\.com/album/(?P[^/?#&]+)' _TESTS = [{ 'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1', @@ -257,14 +298,6 @@ class BandcampAlbumIE(BandcampIE): 'id': 'hierophany-of-the-open-grave', }, 'playlist_mincount': 9, - }, { - 'url': 'http://dotscale.bandcamp.com', - 'info_dict': { - 'title': 'Loom', - 'id': 'dotscale', - 'uploader_id': 'dotscale', - }, - 'playlist_mincount': 7, }, { # with escaped quote in title 'url': 'https://jstrecords.bandcamp.com/album/entropy-ep', @@ -321,7 +354,7 @@ class BandcampAlbumIE(BandcampIE): } -class BandcampWeeklyIE(BandcampIE): +class BandcampWeeklyIE(BandcampIE): # XXX: Do not subclass from concrete IE IE_NAME = 'Bandcamp:weekly' _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P\d+)' _TESTS = [{ @@ -370,7 +403,6 @@ class BandcampWeeklyIE(BandcampIE): 'ext': ext, 'vcodec': 'none', }) - self._sort_formats(formats) title = show.get('audio_title') or 'Bandcamp Weekly' subtitle = show.get('subtitle') @@ -391,41 +423,63 @@ class BandcampWeeklyIE(BandcampIE): } -class BandcampMusicIE(InfoExtractor): - _VALID_URL = r'https?://(?P[^/]+)\.bandcamp\.com/music' +class BandcampUserIE(InfoExtractor): + IE_NAME = 'Bandcamp:user' + _VALID_URL = r'https?://(?!www\.)(?P[^.]+)\.bandcamp\.com(?:/music)?/?(?:[#?]|$)' + _TESTS = [{ + # Type 1 Bandcamp user page. + 'url': 'https://adrianvonziegler.bandcamp.com', + 'info_dict': { + 'id': 'adrianvonziegler', + 'title': 'Discography of adrianvonziegler', + }, + 'playlist_mincount': 23, + }, { + # Bandcamp user page with only one album + 'url': 'http://dotscale.bandcamp.com', + 'info_dict': { + 'id': 'dotscale', + 'title': 'Discography of dotscale' + }, + 'playlist_count': 1, + }, { + # Type 2 Bandcamp user page. + 'url': 'https://nightcallofficial.bandcamp.com', + 'info_dict': { + 'id': 'nightcallofficial', + 'title': 'Discography of nightcallofficial', + }, + 'playlist_count': 4, + }, { 'url': 'https://steviasphere.bandcamp.com/music', 'playlist_mincount': 47, 'info_dict': { 'id': 'steviasphere', + 'title': 'Discography of steviasphere', }, }, { 'url': 'https://coldworldofficial.bandcamp.com/music', 'playlist_mincount': 10, 'info_dict': { 'id': 'coldworldofficial', + 'title': 'Discography of coldworldofficial', }, }, { 'url': 'https://nuclearwarnowproductions.bandcamp.com/music', 'playlist_mincount': 399, 'info_dict': { 'id': 'nuclearwarnowproductions', + 'title': 'Discography of nuclearwarnowproductions', }, - } - ] - - _TYPE_IE_DICT = { - 'album': BandcampAlbumIE.ie_key(), - 'track': BandcampIE.ie_key() - } + }] def _real_extract(self, url): - id = self._match_id(url) - webpage = self._download_webpage(url, id) - items = re.findall(r'href\=\"\/(?P(?Palbum|track)+/[^\"]+)', webpage) - entries = [ - self.url_result( - f'https://{id}.bandcamp.com/{item[0]}', - ie=self._TYPE_IE_DICT[item[1]]) - for item in items] - return self.playlist_result(entries, id) + uploader = self._match_id(url) + webpage = self._download_webpage(url, uploader) + + discography_data = (re.findall(r'
  • ]+>\s*]+trackTitle["\'][^"\']+["\']([^"\']+)', webpage)) + + return self.playlist_from_matches( + discography_data, uploader, f'Discography of {uploader}', getter=lambda x: urljoin(url, x)) diff --git a/plugins/youtube_download/yt_dlp/extractor/bannedvideo.py b/plugins/youtube_download/yt_dlp/extractor/bannedvideo.py index 3db1151..51e7220 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bannedvideo.py +++ b/plugins/youtube_download/yt_dlp/extractor/bannedvideo.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - import json from .common import InfoExtractor @@ -137,7 +135,6 @@ query GetCommentReplies($id: String!) { formats.extend(self._extract_m3u8_formats( video_info.get('streamUrl'), video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', live=True)) - self._sort_formats(formats) return { 'id': video_id, diff --git a/plugins/youtube_download/yt_dlp/extractor/bbc.py b/plugins/youtube_download/yt_dlp/extractor/bbc.py index 85ab478..9d28e70 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bbc.py +++ b/plugins/youtube_download/yt_dlp/extractor/bbc.py @@ -1,18 +1,12 @@ -# coding: utf-8 -from __future__ import unicode_literals - import functools import itertools import json import re +import urllib.error +import xml.etree.ElementTree from .common import InfoExtractor -from ..compat import ( - compat_etree_Element, - compat_HTTPError, - compat_str, - compat_urlparse, -) +from ..compat import compat_HTTPError, compat_str, compat_urlparse from ..utils import ( ExtractorError, OnDemandPagedList, @@ -38,7 +32,7 @@ from ..utils import ( class BBCCoUkIE(InfoExtractor): IE_NAME = 'bbc.co.uk' IE_DESC = 'BBC iPlayer' - _ID_REGEX = r'(?:[pbm][\da-z]{7}|w[\da-z]{7,14})' + _ID_REGEX = r'(?:[pbml][\da-z]{7}|w[\da-z]{7,14})' _VALID_URL = r'''(?x) https?:// (?:www\.)?bbc\.co\.uk/ @@ -52,6 +46,7 @@ class BBCCoUkIE(InfoExtractor): ) (?P%s)(?!/(?:episodes|broadcasts|clips)) ''' % _ID_REGEX + _EMBED_REGEX = [r'setPlaylist\("(?Phttps?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)'] _LOGIN_URL = 'https://account.bbc.com/signin' _NETRC_MACHINE = 'bbc' @@ -263,11 +258,7 @@ class BBCCoUkIE(InfoExtractor): 'only_matching': True, }] - def _login(self): - username, password = self._get_login_info() - if username is None: - return - + def _perform_login(self, username, password): login_page = self._download_webpage( self._LOGIN_URL, None, 'Downloading signin page') @@ -293,9 +284,6 @@ class BBCCoUkIE(InfoExtractor): 'Unable to login: %s' % error, expected=True) raise ExtractorError('Unable to log in') - def _real_initialize(self): - self._login() - class MediaSelectionError(Exception): def __init__(self, id): self.id = id @@ -324,7 +312,7 @@ class BBCCoUkIE(InfoExtractor): continue captions = self._download_xml( cc_url, programme_id, 'Downloading captions', fatal=False) - if not isinstance(captions, compat_etree_Element): + if not isinstance(captions, xml.etree.ElementTree.Element): continue subtitles['en'] = [ { @@ -394,9 +382,17 @@ class BBCCoUkIE(InfoExtractor): formats.extend(self._extract_mpd_formats( href, programme_id, mpd_id=format_id, fatal=False)) elif transfer_format == 'hls': - formats.extend(self._extract_m3u8_formats( - href, programme_id, ext='mp4', entry_protocol='m3u8_native', - m3u8_id=format_id, fatal=False)) + # TODO: let expected_status be passed into _extract_xxx_formats() instead + try: + fmts = self._extract_m3u8_formats( + href, programme_id, ext='mp4', entry_protocol='m3u8_native', + m3u8_id=format_id, fatal=False) + except ExtractorError as e: + if not (isinstance(e.exc_info[1], urllib.error.HTTPError) + and e.exc_info[1].code in (403, 404)): + raise + fmts = [] + formats.extend(fmts) elif transfer_format == 'hds': formats.extend(self._extract_f4m_formats( href, programme_id, f4m_id=format_id, fatal=False)) @@ -579,8 +575,6 @@ class BBCCoUkIE(InfoExtractor): else: programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id) - self._sort_formats(formats) - return { 'id': programme_id, 'title': title, @@ -592,10 +586,15 @@ class BBCCoUkIE(InfoExtractor): } -class BBCIE(BBCCoUkIE): +class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE IE_NAME = 'bbc' IE_DESC = 'BBC' - _VALID_URL = r'https?://(?:www\.)?bbc\.(?:com|co\.uk)/(?:[^/]+/)+(?P[^/#?]+)' + _VALID_URL = r'''(?x) + https?://(?:www\.)?(?: + bbc\.(?:com|co\.uk)| + bbcnewsd73hkzno2ini43t4gblxvycyac5aw4gnv7t2rccijh7745uqd\.onion| + bbcweb3hytmzhn5d532owbu6oqadra5z3ar726vq5kgwwn6aucdccrad\.onion + )/(?:[^/]+/)+(?P[^/#?]+)''' _MEDIA_SETS = [ 'pc', @@ -784,21 +783,33 @@ class BBCIE(BBCCoUkIE): 'timestamp': 1437785037, 'upload_date': '20150725', }, + }, { + # video with window.__INITIAL_DATA__ and value as JSON string + 'url': 'https://www.bbc.com/news/av/world-europe-59468682', + 'info_dict': { + 'id': 'p0b71qth', + 'ext': 'mp4', + 'title': 'Why France is making this woman a national hero', + 'description': 'md5:7affdfab80e9c3a1f976230a1ff4d5e4', + 'thumbnail': r're:https?://.+/.+\.jpg', + 'timestamp': 1638230731, + 'upload_date': '20211130', + }, }, { # single video article embedded with data-media-vpid 'url': 'http://www.bbc.co.uk/sport/rowing/35908187', 'only_matching': True, }, { + # bbcthreeConfig 'url': 'https://www.bbc.co.uk/bbcthree/clip/73d0bbd0-abc3-4cea-b3c0-cdae21905eb1', 'info_dict': { 'id': 'p06556y7', 'ext': 'mp4', - 'title': 'Transfers: Cristiano Ronaldo to Man Utd, Arsenal to spend?', - 'description': 'md5:4b7dfd063d5a789a1512e99662be3ddd', + 'title': 'Things Not To Say to people that live on council estates', + 'description': "From being labelled a 'chav', to the presumption that they're 'scroungers', people who live on council estates encounter all kinds of prejudices and false assumptions about themselves, their families, and their lifestyles. Here, eight people discuss the common statements, misconceptions, and clichés that they're tired of hearing.", + 'duration': 360, + 'thumbnail': r're:https?://.+/.+\.jpg', }, - 'params': { - 'skip_download': True, - } }, { # window.__PRELOADED_STATE__ 'url': 'https://www.bbc.co.uk/radio/play/b0b9z4yl', @@ -833,6 +844,12 @@ class BBCIE(BBCCoUkIE): 'upload_date': '20190604', 'categories': ['Psychology'], }, + }, { # onion routes + 'url': 'https://www.bbcnewsd73hkzno2ini43t4gblxvycyac5aw4gnv7t2rccijh7745uqd.onion/news/av/world-europe-63208576', + 'only_matching': True, + }, { + 'url': 'https://www.bbcweb3hytmzhn5d532owbu6oqadra5z3ar726vq5kgwwn6aucdccrad.onion/sport/av/football/63195681', + 'only_matching': True, }] @classmethod @@ -871,7 +888,6 @@ class BBCIE(BBCCoUkIE): def _extract_from_playlist_sxml(self, url, playlist_id, timestamp): programme_id, title, description, duration, formats, subtitles = \ self._process_legacy_playlist_url(url, playlist_id) - self._sort_formats(formats) return { 'id': programme_id, 'title': title, @@ -890,13 +906,8 @@ class BBCIE(BBCCoUkIE): json_ld_info = self._search_json_ld(webpage, playlist_id, default={}) timestamp = json_ld_info.get('timestamp') - playlist_title = json_ld_info.get('title') - if not playlist_title: - playlist_title = self._og_search_title( - webpage, default=None) or self._html_search_regex( - r'(.+?)', webpage, 'playlist title', default=None) - if playlist_title: - playlist_title = re.sub(r'(.+)\s*-\s*BBC.*?$', r'\1', playlist_title).strip() + playlist_title = json_ld_info.get('title') or re.sub( + r'(.+)\s*-\s*BBC.*?$', r'\1', self._generic_title('', webpage, default='')).strip() or None playlist_description = json_ld_info.get( 'description') or self._og_search_description(webpage, default=None) @@ -940,7 +951,6 @@ class BBCIE(BBCCoUkIE): duration = int_or_none(items[0].get('duration')) programme_id = items[0].get('vpid') formats, subtitles = self._download_media_selector(programme_id) - self._sort_formats(formats) entries.append({ 'id': programme_id, 'title': title, @@ -977,7 +987,6 @@ class BBCIE(BBCCoUkIE): continue raise if entry: - self._sort_formats(entry['formats']) entries.append(entry) if entries: @@ -1001,7 +1010,6 @@ class BBCIE(BBCCoUkIE): if programme_id: formats, subtitles = self._download_media_selector(programme_id) - self._sort_formats(formats) # digitalData may be missing (e.g. http://www.bbc.com/autos/story/20130513-hyundais-rock-star) digital_data = self._parse_json( self._search_regex( @@ -1033,7 +1041,6 @@ class BBCIE(BBCCoUkIE): if version_id: title = smp_data['title'] formats, subtitles = self._download_media_selector(version_id) - self._sort_formats(formats) image_url = smp_data.get('holdingImageURL') display_date = init_data.get('displayDate') topic_title = init_data.get('topicTitle') @@ -1075,7 +1082,6 @@ class BBCIE(BBCCoUkIE): continue title = lead_media.get('title') or self._og_search_title(webpage) formats, subtitles = self._download_media_selector(programme_id) - self._sort_formats(formats) description = lead_media.get('summary') uploader = lead_media.get('masterBrand') uploader_id = lead_media.get('mid') @@ -1104,7 +1110,6 @@ class BBCIE(BBCCoUkIE): if current_programme and programme_id and current_programme.get('type') == 'playable_item': title = current_programme.get('titles', {}).get('tertiary') or playlist_title formats, subtitles = self._download_media_selector(programme_id) - self._sort_formats(formats) synopses = current_programme.get('synopses') or {} network = current_programme.get('network') or {} duration = int_or_none( @@ -1137,7 +1142,6 @@ class BBCIE(BBCCoUkIE): clip_title = clip.get('title') if clip_vpid and clip_title: formats, subtitles = self._download_media_selector(clip_vpid) - self._sort_formats(formats) return { 'id': clip_vpid, 'title': clip_title, @@ -1159,7 +1163,6 @@ class BBCIE(BBCCoUkIE): if not programme_id: continue formats, subtitles = self._download_media_selector(programme_id) - self._sort_formats(formats) entries.append({ 'id': programme_id, 'title': playlist_title, @@ -1171,9 +1174,16 @@ class BBCIE(BBCCoUkIE): return self.playlist_result( entries, playlist_id, playlist_title, playlist_description) - initial_data = self._parse_json(self._search_regex( - r'window\.__INITIAL_DATA__\s*=\s*({.+?});', webpage, - 'preload state', default='{}'), playlist_id, fatal=False) + initial_data = self._search_regex( + r'window\.__INITIAL_DATA__\s*=\s*("{.+?}")\s*;', webpage, + 'quoted preload state', default=None) + if initial_data is None: + initial_data = self._search_regex( + r'window\.__INITIAL_DATA__\s*=\s*({.+?})\s*;', webpage, + 'preload state', default={}) + else: + initial_data = self._parse_json(initial_data or '"{}"', playlist_id, fatal=False) + initial_data = self._parse_json(initial_data, playlist_id, fatal=False) if initial_data: def parse_media(media): if not media: @@ -1184,7 +1194,6 @@ class BBCIE(BBCCoUkIE): if not (item_id and item_title): continue formats, subtitles = self._download_media_selector(item_id) - self._sort_formats(formats) item_desc = None blocks = try_get(media, lambda x: x['summary']['blocks'], list) if blocks: @@ -1214,8 +1223,11 @@ class BBCIE(BBCCoUkIE): if name == 'media-experience': parse_media(try_get(resp, lambda x: x['data']['initialItem']['mediaItem'], dict)) elif name == 'article': - for block in (try_get(resp, lambda x: x['data']['blocks'], list) or []): - if block.get('type') != 'media': + for block in (try_get(resp, + (lambda x: x['data']['blocks'], + lambda x: x['data']['content']['model']['blocks'],), + list) or []): + if block.get('type') not in ['media', 'video']: continue parse_media(block.get('model')) return self.playlist_result( @@ -1282,7 +1294,6 @@ class BBCIE(BBCCoUkIE): formats, subtitles = self._extract_from_media_meta(media_meta, playlist_id) if not formats and not self.get_param('ignore_no_formats'): continue - self._sort_formats(formats) video_id = media_meta.get('externalId') if not video_id: diff --git a/plugins/youtube_download/yt_dlp/extractor/beatbump.py b/plugins/youtube_download/yt_dlp/extractor/beatbump.py new file mode 100644 index 0000000..0f40ebe --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/beatbump.py @@ -0,0 +1,101 @@ +from .common import InfoExtractor +from .youtube import YoutubeIE, YoutubeTabIE + + +class BeatBumpVideoIE(InfoExtractor): + _VALID_URL = r'https://beatbump\.ml/listen\?id=(?P[\w-]+)' + _TESTS = [{ + 'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs', + 'md5': '5ff3fff41d3935b9810a9731e485fe66', + 'info_dict': { + 'id': 'MgNrAu2pzNs', + 'ext': 'mp4', + 'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA', + 'artist': 'Stephen', + 'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp', + 'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA', + 'upload_date': '20190312', + 'categories': ['Music'], + 'playable_in_embed': True, + 'duration': 169, + 'like_count': int, + 'alt_title': 'Voyeur Girl', + 'view_count': int, + 'track': 'Voyeur Girl', + 'uploader': 'Stephen - Topic', + 'title': 'Voyeur Girl', + 'channel_follower_count': int, + 'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA', + 'age_limit': 0, + 'availability': 'public', + 'live_status': 'not_live', + 'album': 'it\'s too much love to know my dear', + 'channel': 'Stephen', + 'comment_count': int, + 'description': 'md5:7ae382a65843d6df2685993e90a8628f', + 'tags': 'count:11', + 'creator': 'Stephen', + 'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA', + } + }] + + def _real_extract(self, url): + id_ = self._match_id(url) + return self.url_result(f'https://music.youtube.com/watch?v={id_}', YoutubeIE, id_) + + +class BeatBumpPlaylistIE(InfoExtractor): + _VALID_URL = r'https://beatbump\.ml/(?:release\?id=|artist/|playlist/)(?P[\w-]+)' + _TESTS = [{ + 'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE', + 'playlist_count': 50, + 'info_dict': { + 'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0', + 'availability': 'unlisted', + 'view_count': int, + 'title': 'Album - Royalty Free Music Library V2 (50 Songs)', + 'description': '', + 'tags': [], + 'modified_date': '20221223', + } + }, { + 'url': 'https://beatbump.ml/artist/UC_aEa8K-EOJ3D6gOs7HcyNg', + 'playlist_mincount': 1, + 'params': {'flatplaylist': True}, + 'info_dict': { + 'id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', + 'uploader_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg', + 'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg', + 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', + 'channel_follower_count': int, + 'title': 'NoCopyrightSounds - Videos', + 'uploader': 'NoCopyrightSounds', + 'description': 'md5:cd4fd53d81d363d05eee6c1b478b491a', + 'channel': 'NoCopyrightSounds', + 'tags': 'count:12', + 'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', + }, + }, { + 'url': 'https://beatbump.ml/playlist/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', + 'playlist_mincount': 1, + 'params': {'flatplaylist': True}, + 'info_dict': { + 'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', + 'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds', + 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!', + 'view_count': int, + 'channel_url': 'https://www.youtube.com/@NoCopyrightSounds', + 'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', + 'title': 'NCS : All Releases 💿', + 'uploader': 'NoCopyrightSounds', + 'availability': 'public', + 'channel': 'NoCopyrightSounds', + 'tags': [], + 'modified_date': '20221225', + 'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', + } + }] + + def _real_extract(self, url): + id_ = self._match_id(url) + return self.url_result(f'https://music.youtube.com/browse/{id_}', YoutubeTabIE, id_) diff --git a/plugins/youtube_download/yt_dlp/extractor/beatport.py b/plugins/youtube_download/yt_dlp/extractor/beatport.py index e1cf8b4..0aecbd0 100644 --- a/plugins/youtube_download/yt_dlp/extractor/beatport.py +++ b/plugins/youtube_download/yt_dlp/extractor/beatport.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor @@ -77,7 +74,6 @@ class BeatportIE(InfoExtractor): fmt['abr'] = 96 fmt['asr'] = 44100 formats.append(fmt) - self._sort_formats(formats) images = [] for name, info in track['images'].items(): diff --git a/plugins/youtube_download/yt_dlp/extractor/beeg.py b/plugins/youtube_download/yt_dlp/extractor/beeg.py index 8fbabe7..52ee68e 100644 --- a/plugins/youtube_download/yt_dlp/extractor/beeg.py +++ b/plugins/youtube_download/yt_dlp/extractor/beeg.py @@ -1,32 +1,43 @@ -from __future__ import unicode_literals - from .common import InfoExtractor -from ..compat import ( - compat_str, -) + from ..utils import ( int_or_none, - parse_qs, + traverse_obj, + try_get, unified_timestamp, ) class BeegIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?beeg\.(?:com|porn(?:/video)?)/(?P\d+)' + _VALID_URL = r'https?://(?:www\.)?beeg\.(?:com(?:/video)?)/-?(?P\d+)' _TESTS = [{ - # api/v6 v1 - 'url': 'http://beeg.com/5416503', - 'md5': 'a1a1b1a8bc70a89e49ccfd113aed0820', + 'url': 'https://beeg.com/-0983946056129650', + 'md5': '51d235147c4627cfce884f844293ff88', 'info_dict': { - 'id': '5416503', + 'id': '0983946056129650', 'ext': 'mp4', - 'title': 'Sultry Striptease', - 'description': 'md5:d22219c09da287c14bed3d6c37ce4bc2', - 'timestamp': 1391813355, - 'upload_date': '20140207', - 'duration': 383, + 'title': 'sucked cock and fucked in a private plane', + 'duration': 927, 'tags': list, 'age_limit': 18, + 'upload_date': '20220131', + 'timestamp': 1643656455, + 'display_id': 2540839, + } + }, { + 'url': 'https://beeg.com/-0599050563103750?t=4-861', + 'md5': 'bd8b5ea75134f7f07fad63008db2060e', + 'info_dict': { + 'id': '0599050563103750', + 'ext': 'mp4', + 'title': 'Bad Relatives', + 'duration': 2060, + 'tags': list, + 'age_limit': 18, + 'description': 'md5:b4fc879a58ae6c604f8f259155b7e3b9', + 'timestamp': 1643623200, + 'display_id': 2569965, + 'upload_date': '20220131', } }, { # api/v6 v2 @@ -36,12 +47,6 @@ class BeegIE(InfoExtractor): # api/v6 v2 w/o t 'url': 'https://beeg.com/1277207756', 'only_matching': True, - }, { - 'url': 'https://beeg.porn/video/5416503', - 'only_matching': True, - }, { - 'url': 'https://beeg.porn/5416503', - 'only_matching': True, }] def _real_extract(self, url): @@ -49,68 +54,36 @@ class BeegIE(InfoExtractor): webpage = self._download_webpage(url, video_id) - beeg_version = self._search_regex( - r'beeg_version\s*=\s*([\da-zA-Z_-]+)', webpage, 'beeg version', - default='1546225636701') + video = self._download_json( + 'https://store.externulls.com/facts/file/%s' % video_id, + video_id, 'Downloading JSON for %s' % video_id) - if len(video_id) >= 10: - query = { - 'v': 2, - } - qs = parse_qs(url) - t = qs.get('t', [''])[0].split('-') - if len(t) > 1: - query.update({ - 's': t[0], - 'e': t[1], - }) - else: - query = {'v': 1} + fc_facts = video.get('fc_facts') + first_fact = {} + for fact in fc_facts: + if not first_fact or try_get(fact, lambda x: x['id'] < first_fact['id']): + first_fact = fact - for api_path in ('', 'api.'): - video = self._download_json( - 'https://%sbeeg.com/api/v6/%s/video/%s' - % (api_path, beeg_version, video_id), video_id, - fatal=api_path == 'api.', query=query) - if video: - break + resources = traverse_obj(video, ('file', 'hls_resources')) or first_fact.get('hls_resources') formats = [] - for format_id, video_url in video.items(): - if not video_url: + for format_id, video_uri in resources.items(): + if not video_uri: continue - height = self._search_regex( - r'^(\d+)[pP]$', format_id, 'height', default=None) - if not height: - continue - formats.append({ - 'url': self._proto_relative_url( - video_url.replace('{DATA_MARKERS}', 'data=pc_XX__%s_0' % beeg_version), 'https:'), - 'format_id': format_id, - 'height': int(height), - }) - self._sort_formats(formats) - - title = video['title'] - video_id = compat_str(video.get('id') or video_id) - display_id = video.get('code') - description = video.get('desc') - series = video.get('ps_name') - - timestamp = unified_timestamp(video.get('date')) - duration = int_or_none(video.get('duration')) - - tags = [tag.strip() for tag in video['tags'].split(',')] if video.get('tags') else None + height = int_or_none(self._search_regex(r'fl_cdn_(\d+)', format_id, 'height', default=None)) + current_formats = self._extract_m3u8_formats(f'https://video.beeg.com/{video_uri}', video_id, ext='mp4', m3u8_id=str(height)) + for f in current_formats: + f['height'] = height + formats.extend(current_formats) return { 'id': video_id, - 'display_id': display_id, - 'title': title, - 'description': description, - 'series': series, - 'timestamp': timestamp, - 'duration': duration, - 'tags': tags, + 'display_id': first_fact.get('id'), + 'title': traverse_obj(video, ('file', 'stuff', 'sf_name')), + 'description': traverse_obj(video, ('file', 'stuff', 'sf_story')), + 'timestamp': unified_timestamp(first_fact.get('fc_created')), + 'duration': int_or_none(traverse_obj(video, ('file', 'fl_duration'))), + 'tags': traverse_obj(video, ('tags', ..., 'tg_name')), 'formats': formats, 'age_limit': self._rta_search(webpage), } diff --git a/plugins/youtube_download/yt_dlp/extractor/behindkink.py b/plugins/youtube_download/yt_dlp/extractor/behindkink.py index 2c97f98..ca44981 100644 --- a/plugins/youtube_download/yt_dlp/extractor/behindkink.py +++ b/plugins/youtube_download/yt_dlp/extractor/behindkink.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .common import InfoExtractor from ..utils import url_basename diff --git a/plugins/youtube_download/yt_dlp/extractor/bellmedia.py b/plugins/youtube_download/yt_dlp/extractor/bellmedia.py index 904c17e..5ae4b91 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bellmedia.py +++ b/plugins/youtube_download/yt_dlp/extractor/bellmedia.py @@ -1,7 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - - from .common import InfoExtractor @@ -28,7 +24,7 @@ class BellMediaIE(InfoExtractor): )/.*?(?:\b(?:vid(?:eoid)?|clipId)=|-vid|~|%7E|/(?:episode)?)(?P[0-9]{6,})''' _TESTS = [{ 'url': 'https://www.bnnbloomberg.ca/video/david-cockfield-s-top-picks~1403070', - 'md5': '36d3ef559cfe8af8efe15922cd3ce950', + 'md5': '3e5b8e38370741d5089da79161646635', 'info_dict': { 'id': '1403070', 'ext': 'flv', @@ -36,6 +32,14 @@ class BellMediaIE(InfoExtractor): 'description': 'md5:810f7f8c6a83ad5b48677c3f8e5bb2c3', 'upload_date': '20180525', 'timestamp': 1527288600, + 'season_id': 73997, + 'season': '2018', + 'thumbnail': 'http://images2.9c9media.com/image_asset/2018_5_25_baf30cbd-b28d-4a18-9903-4bb8713b00f5_PNG_956x536.jpg', + 'tags': [], + 'categories': ['ETFs'], + 'season_number': 8, + 'duration': 272.038, + 'series': 'Market Call Tonight', }, }, { 'url': 'http://www.thecomedynetwork.ca/video/player?vid=923582', diff --git a/plugins/youtube_download/yt_dlp/extractor/berufetv.py b/plugins/youtube_download/yt_dlp/extractor/berufetv.py new file mode 100644 index 0000000..8160cbd --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/berufetv.py @@ -0,0 +1,70 @@ +from .common import InfoExtractor +from ..utils import float_or_none, mimetype2ext, traverse_obj + + +class BerufeTVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?web\.arbeitsagentur\.de/berufetv/[^?#]+/film;filmId=(?P[\w-]+)' + _TESTS = [{ + 'url': 'https://web.arbeitsagentur.de/berufetv/studienberufe/wirtschaftswissenschaften/wirtschaftswissenschaften-volkswirtschaft/film;filmId=DvKC3DUpMKvUZ_6fEnfg3u', + 'md5': '041b6432ec8e6838f84a5c30f31cc795', + 'info_dict': { + 'id': 'DvKC3DUpMKvUZ_6fEnfg3u', + 'ext': 'mp4', + 'title': 'Volkswirtschaftslehre', + 'description': 'md5:6bd87d0c63163480a6489a37526ee1c1', + 'categories': ['Studien­beruf'], + 'tags': ['Studienfilm'], + 'duration': 602.440, + 'thumbnail': r're:^https://asset-out-cdn\.video-cdn\.net/private/videos/DvKC3DUpMKvUZ_6fEnfg3u/thumbnails/793063\?quality=thumbnail&__token__=[^\s]+$', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + movie_metadata = self._download_json( + 'https://rest.arbeitsagentur.de/infosysbub/berufetv/pc/v1/film-metadata', + video_id, 'Downloading JSON metadata', + headers={'X-API-Key': '79089773-4892-4386-86e6-e8503669f426'}, fatal=False) + + meta = traverse_obj( + movie_metadata, ('metadaten', lambda _, i: video_id == i['miId']), + get_all=False, default={}) + + video = self._download_json( + f'https://d.video-cdn.net/play/player/8YRzUk6pTzmBdrsLe9Y88W/video/{video_id}', + video_id, 'Downloading video JSON') + + formats, subtitles = [], {} + for key, source in video['videoSources']['html'].items(): + if key == 'auto': + fmts, subs = self._extract_m3u8_formats_and_subtitles(source[0]['source'], video_id) + formats += fmts + subtitles = subs + else: + formats.append({ + 'url': source[0]['source'], + 'ext': mimetype2ext(source[0]['mimeType']), + 'format_id': key, + }) + + for track in video.get('videoTracks') or []: + if track.get('type') != 'SUBTITLES': + continue + subtitles.setdefault(track['language'], []).append({ + 'url': track['source'], + 'name': track.get('label'), + 'ext': 'vtt' + }) + + return { + 'id': video_id, + 'title': meta.get('titel') or traverse_obj(video, ('videoMetaData', 'title')), + 'description': meta.get('beschreibung'), + 'thumbnail': meta.get('thumbnail') or f'https://asset-out-cdn.video-cdn.net/private/videos/{video_id}/thumbnails/active', + 'duration': float_or_none(video.get('duration'), scale=1000), + 'categories': [meta['kategorie']] if meta.get('kategorie') else None, + 'tags': meta.get('themengebiete'), + 'subtitles': subtitles, + 'formats': formats, + } diff --git a/plugins/youtube_download/yt_dlp/extractor/bet.py b/plugins/youtube_download/yt_dlp/extractor/bet.py index 2c71442..6b867d1 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bet.py +++ b/plugins/youtube_download/yt_dlp/extractor/bet.py @@ -1,5 +1,3 @@ -from __future__ import unicode_literals - from .mtv import MTVServicesInfoExtractor from ..utils import unified_strdate diff --git a/plugins/youtube_download/yt_dlp/extractor/bfi.py b/plugins/youtube_download/yt_dlp/extractor/bfi.py index 60c8944..76f0516 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bfi.py +++ b/plugins/youtube_download/yt_dlp/extractor/bfi.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor diff --git a/plugins/youtube_download/yt_dlp/extractor/bfmtv.py b/plugins/youtube_download/yt_dlp/extractor/bfmtv.py index 501f69d..a7be0e6 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bfmtv.py +++ b/plugins/youtube_download/yt_dlp/extractor/bfmtv.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor @@ -8,7 +5,7 @@ from ..utils import extract_attributes class BFMTVBaseIE(InfoExtractor): - _VALID_URL_BASE = r'https?://(?:www\.)?bfmtv\.com/' + _VALID_URL_BASE = r'https?://(?:www\.|rmc\.)?bfmtv\.com/' _VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P\d{12})\.html' _VIDEO_BLOCK_REGEX = r'(]+class="video_block"[^>]*>)' BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s' @@ -34,6 +31,9 @@ class BFMTVIE(BFMTVBaseIE): 'uploader_id': '876450610001', 'upload_date': '20201002', 'timestamp': 1601629620, + 'duration': 44.757, + 'tags': ['bfmactu', 'politique'], + 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876450610001/5041f4c1-bc48-4af8-a256-1b8300ad8ef0/cf2f9114-e8e2-4494-82b4-ab794ea4bc7d/1920x1080/match/image.jpg', }, }] @@ -45,7 +45,7 @@ class BFMTVIE(BFMTVBaseIE): return self._brightcove_url_result(video_block['videoid'], video_block) -class BFMTVLiveIE(BFMTVIE): +class BFMTVLiveIE(BFMTVIE): # XXX: Do not subclass from concrete IE IE_NAME = 'bfmtv:live' _VALID_URL = BFMTVBaseIE._VALID_URL_BASE + '(?P(?:[^/]+/)?en-direct)' _TESTS = [{ @@ -84,6 +84,20 @@ class BFMTVArticleIE(BFMTVBaseIE): }, { 'url': 'https://www.bfmtv.com/sante/covid-19-oui-le-vaccin-de-pfizer-distribue-en-france-a-bien-ete-teste-sur-des-personnes-agees_AN-202101060275.html', 'only_matching': True, + }, { + 'url': 'https://rmc.bfmtv.com/actualites/societe/transports/ce-n-est-plus-tout-rentable-le-bioethanol-e85-depasse-1eu-le-litre-des-automobilistes-regrettent_AV-202301100268.html', + 'info_dict': { + 'id': '6318445464112', + 'ext': 'mp4', + 'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe', + 'description': None, + 'uploader_id': '876630703001', + 'upload_date': '20230110', + 'timestamp': 1673341692, + 'duration': 109.269, + 'tags': ['rmc', 'show', 'apolline de malherbe', 'info', 'talk', 'matinale', 'radio'], + 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876630703001/5bef74b8-9d5e-4480-a21f-60c2e2480c46/96c88b74-f9db-45e1-8040-e199c5da216c/1920x1080/match/image.jpg' + } }] def _real_extract(self, url): diff --git a/plugins/youtube_download/yt_dlp/extractor/bibeltv.py b/plugins/youtube_download/yt_dlp/extractor/bibeltv.py index 56c2bfe..fd20aad 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bibeltv.py +++ b/plugins/youtube_download/yt_dlp/extractor/bibeltv.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor diff --git a/plugins/youtube_download/yt_dlp/extractor/bigflix.py b/plugins/youtube_download/yt_dlp/extractor/bigflix.py index 28e3e59..02d1ba0 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bigflix.py +++ b/plugins/youtube_download/yt_dlp/extractor/bigflix.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - import re from .common import InfoExtractor @@ -66,8 +63,6 @@ class BigflixIE(InfoExtractor): 'url': decode_url(file_url), }) - self._sort_formats(formats) - description = self._html_search_meta('description', webpage) return { diff --git a/plugins/youtube_download/yt_dlp/extractor/bigo.py b/plugins/youtube_download/yt_dlp/extractor/bigo.py new file mode 100644 index 0000000..1cb6e58 --- /dev/null +++ b/plugins/youtube_download/yt_dlp/extractor/bigo.py @@ -0,0 +1,56 @@ +from .common import InfoExtractor +from ..utils import ExtractorError, urlencode_postdata + + +class BigoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?bigo\.tv/(?:[a-z]{2,}/)?(?P[^/]+)' + + _TESTS = [{ + 'url': 'https://www.bigo.tv/ja/221338632', + 'info_dict': { + 'id': '6576287577575737440', + 'title': '土よ〜💁‍♂️ 休憩室/REST room', + 'thumbnail': r're:https?://.+', + 'uploader': '✨Shin💫', + 'uploader_id': '221338632', + 'is_live': True, + }, + 'skip': 'livestream', + }, { + 'url': 'https://www.bigo.tv/th/Tarlerm1304', + 'only_matching': True, + }, { + 'url': 'https://bigo.tv/115976881', + 'only_matching': True, + }] + + def _real_extract(self, url): + user_id = self._match_id(url) + + info_raw = self._download_json( + 'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo', + user_id, data=urlencode_postdata({'siteId': user_id})) + + if not isinstance(info_raw, dict): + raise ExtractorError('Received invalid JSON data') + if info_raw.get('code'): + raise ExtractorError( + 'Bigo says: %s (code %s)' % (info_raw.get('msg'), info_raw.get('code')), expected=True) + info = info_raw.get('data') or {} + + if not info.get('alive'): + raise ExtractorError('This user is offline.', expected=True) + + formats, subs = self._extract_m3u8_formats_and_subtitles( + info.get('hls_src'), user_id, 'mp4', 'm3u8') + + return { + 'id': info.get('roomId') or user_id, + 'title': info.get('roomTopic') or info.get('nick_name') or user_id, + 'formats': formats, + 'subtitles': subs, + 'thumbnail': info.get('snapshot'), + 'uploader': info.get('nick_name'), + 'uploader_id': user_id, + 'is_live': True, + } diff --git a/plugins/youtube_download/yt_dlp/extractor/bild.py b/plugins/youtube_download/yt_dlp/extractor/bild.py index b8dfbd4..f3dea33 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bild.py +++ b/plugins/youtube_download/yt_dlp/extractor/bild.py @@ -1,6 +1,3 @@ -# coding: utf-8 -from __future__ import unicode_literals - from .common import InfoExtractor from ..utils import ( int_or_none, diff --git a/plugins/youtube_download/yt_dlp/extractor/bilibili.py b/plugins/youtube_download/yt_dlp/extractor/bilibili.py index a775aa9..f418063 100644 --- a/plugins/youtube_download/yt_dlp/extractor/bilibili.py +++ b/plugins/youtube_download/yt_dlp/extractor/bilibili.py @@ -1,390 +1,117 @@ -# coding: utf-8 - import base64 -import hashlib -import itertools import functools -import re +import itertools import math +import urllib.error +import urllib.parse from .common import InfoExtractor, SearchInfoExtractor -from ..compat import ( - compat_parse_qs, - compat_urlparse, - compat_urllib_parse_urlparse -) +from ..dependencies import Cryptodome from ..utils import ( ExtractorError, - int_or_none, + GeoRestrictedError, + InAdvancePagedList, + OnDemandPagedList, + filter_dict, float_or_none, + format_field, + int_or_none, + make_archive_id, + merge_dicts, mimetype2ext, - parse_iso8601, - traverse_obj, parse_count, + parse_qs, + qualities, smuggle_url, srt_subtitles_timecode, str_or_none, - strip_jsonp, - unified_timestamp, + traverse_obj, unsmuggle_url, - urlencode_postdata, url_or_none, - OnDemandPagedList + urlencode_postdata, ) -class BiliBiliIE(InfoExtractor): - _VALID_URL = r'''(?x) - https?:// - (?:(?:www|bangumi)\.)? - bilibili\.(?:tv|com)/ - (?: - (?: - video/[aA][vV]| - anime/(?P\d+)/play\# - )(?P\d+)| - (s/)?video/[bB][vV](?P[^/?#&]+) - ) - (?:/?\?p=(?P\d+))? - ''' - - _TESTS = [{ - 'url': 'http://www.bilibili.com/video/av1074402/', - 'md5': '5f7d29e1a2872f3df0cf76b1f87d3788', - 'info_dict': { - 'id': '1074402_part1', - 'ext': 'mp4', - 'title': '【金坷垃】金泡沫', - 'uploader_id': '156160', - 'uploader': '菊子桑', - 'upload_date': '20140420', - 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', - 'timestamp': 1398012678, - }, - }, { - # Tested in BiliBiliBangumiIE - 'url': 'http://bangumi.bilibili.com/anime/1869/play#40062', - 'only_matching': True, - }, { - # bilibili.tv - 'url': 'http://www.bilibili.tv/video/av1074402/', - 'only_matching': True, - }, { - 'url': 'http://bangumi.bilibili.com/anime/5802/play#100643', - 'md5': '3f721ad1e75030cc06faf73587cfec57', - 'info_dict': { - 'id': '100643_part1', - 'ext': 'mp4', - 'title': 'CHAOS;CHILD', - 'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...', - }, - 'skip': 'Geo-restricted to China', - }, { - 'url': 'http://www.bilibili.com/video/av8903802/', - 'info_dict': { - 'id': '8903802_part1', - 'ext': 'mp4', - 'title': '阿滴英文|英文歌分享#6 "Closer', - 'upload_date': '20170301', - 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文', - 'timestamp': 1488382634, - 'uploader_id': '65880958', - 'uploader': '阿滴英文', - }, - 'params': { - 'skip_download': True, - }, - }, { - # new BV video id format - 'url': 'https://www.bilibili.com/video/BV1JE411F741', - 'only_matching': True, - }, { - # Anthology - 'url': 'https://www.bilibili.com/video/BV1bK411W797', - 'info_dict': { - 'id': 'BV1bK411W797', - 'title': '物语中的人物是如何吐槽自己的OP的' - }, - 'playlist_count': 17, - }] - - _APP_KEY = 'iVGUTjsxvpLeuDCf' - _BILIBILI_KEY = 'aHRmhWMLkdeMuILqORnYZocwMBpMEOdt' - - def _report_error(self, result): - if 'message' in result: - raise ExtractorError('%s said: %s' % (self.IE_NAME, result['message']), expected=True) - elif 'code' in result: - raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True) - else: - raise ExtractorError('Can\'t extract Bangumi episode ID') - - def _real_extract(self, url): - url, smuggled_data = unsmuggle_url(url, {}) - - mobj = self._match_valid_url(url) - video_id = mobj.group('id_bv') or mobj.group('id') - - av_id, bv_id = self._get_video_id_set(video_id, mobj.group('id_bv') is not None) - video_id = av_id - - info = {} - anime_id = mobj.group('anime_id') - page_id = mobj.group('page') - webpage = self._download_webpage(url, video_id) - - # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself. - # If the video has no page argument, check to see if it's an anthology - if page_id is None: - if not self.get_param('noplaylist'): - r = self._extract_anthology_entries(bv_id, video_id, webpage) - if r is not None: - self.to_screen('Downloading anthology %s - add --no-playlist to just download video' % video_id) - return r - else: - self.to_screen('Downloading just video %s because of --no-playlist' % video_id) - - if 'anime/' not in url: - cid = self._search_regex( - r'\bcid(?:["\']:|=)(\d+),["\']page(?:["\']:|=)' + str(page_id), webpage, 'cid', - default=None - ) or self._search_regex( - r'\bcid(?:["\']:|=)(\d+)', webpage, 'cid', - default=None - ) or compat_parse_qs(self._search_regex( - [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)', - r'EmbedPlayer\([^)]+,\s*\\"([^"]+)\\"\)', - r']+src="https://secure\.bilibili\.com/secure,([^"]+)"'], - webpage, 'player parameters'))['cid'][0] - else: - if 'no_bangumi_tip' not in smuggled_data: - self.to_screen('Downloading episode %s. To download all videos in anime %s, re-run yt-dlp with %s' % ( - video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id))) - headers = { - 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', - 'Referer': url - } - headers.update(self.geo_verification_headers()) - - js = self._download_json( - 'http://bangumi.bilibili.com/web_api/get_source', video_id, - data=urlencode_postdata({'episode_id': video_id}), - headers=headers) - if 'result' not in js: - self._report_error(js) - cid = js['result']['cid'] - - headers = { - 'Accept': 'application/json', - 'Referer': url - } - headers.update(self.geo_verification_headers()) - - video_info = self._parse_json( - self._search_regex(r'window.__playinfo__\s*=\s*({.+?})', webpage, 'video info', default=None) or '{}', - video_id, fatal=False) - video_info = video_info.get('data') or {} - - durl = traverse_obj(video_info, ('dash', 'video')) - audios = traverse_obj(video_info, ('dash', 'audio')) or [] - entries = [] - - RENDITIONS = ('qn=80&quality=80&type=', 'quality=2&type=mp4') - for num, rendition in enumerate(RENDITIONS, start=1): - payload = 'appkey=%s&cid=%s&otype=json&%s' % (self._APP_KEY, cid, rendition) - sign = hashlib.md5((payload + self._BILIBILI_KEY).encode('utf-8')).hexdigest() - if not video_info: - video_info = self._download_json( - 'http://interface.bilibili.com/v2/playurl?%s&sign=%s' % (payload, sign), - video_id, note='Downloading video info page', - headers=headers, fatal=num == len(RENDITIONS)) - if not video_info: - continue - - if not durl and 'durl' not in video_info: - if num < len(RENDITIONS): - continue - self._report_error(video_info) - - formats = [] - for idx, durl in enumerate(durl or video_info['durl']): - formats.append({ - 'url': durl.get('baseUrl') or durl.get('base_url') or durl.get('url'), - 'ext': mimetype2ext(durl.get('mimeType') or durl.get('mime_type')), - 'fps': int_or_none(durl.get('frameRate') or durl.get('frame_rate')), - 'width': int_or_none(durl.get('width')), - 'height': int_or_none(durl.get('height')), - 'vcodec': durl.get('codecs'), - 'acodec': 'none' if audios else None, - 'tbr': float_or_none(durl.get('bandwidth'), scale=1000), - 'filesize': int_or_none(durl.get('size')), - }) - for backup_url in traverse_obj(durl, 'backup_url', expected_type=list) or []: - formats.append({ - 'url': backup_url, - 'quality': -2 if 'hd.mp4' in backup_url else -3, - }) - - for a_format in formats: - a_format.setdefault('http_headers', {}).update({ - 'Referer': url, - }) - for audio in audios: - formats.append({ - 'url': audio.get('baseUrl') or audio.get('base_url') or audio.get('url'), - 'ext': mimetype2ext(audio.get('mimeType') or audio.get('mime_type')), - 'fps': int_or_none(audio.get('frameRate') or audio.get('frame_rate')), - 'width': int_or_none(audio.get('width')), - 'height': int_or_none(audio.get('height')), - 'acodec': audio.get('codecs'), - 'vcodec': 'none', - 'tbr': float_or_none(audio.get('bandwidth'), scale=1000), - 'filesize': int_or_none(audio.get('size')) - }) - for backup_url in traverse_obj(audio, 'backup_url', expected_type=list) or []: - formats.append({ - 'url': backup_url, - # backup URLs have lower priorities - 'quality': -3, - }) - - info.update({ - 'id': video_id, - 'duration': float_or_none(durl.get('length'), 1000), - 'formats': formats, - }) - break - - self._sort_formats(formats) - - title = self._html_search_regex(( - r']+title=(["\'])(?P[^"\']+)', - r'(?s)]*>(?P.+?)
  • ', - self._meta_regex('title') - ), webpage, 'title', group='content', fatal=False) - - # Get part title for anthologies - if page_id is not None: - # TODO: The json is already downloaded by _extract_anthology_entries. Don't redownload for each video. - part_info = traverse_obj(self._download_json( - f'https://api.bilibili.com/x/player/pagelist?bvid={bv_id}&jsonp=jsonp', - video_id, note='Extracting videos in anthology'), 'data', expected_type=list) - title = title if len(part_info) == 1 else traverse_obj(part_info, (int(page_id) - 1, 'part')) or title - - description = self._html_search_meta('description', webpage) - timestamp = unified_timestamp(self._html_search_regex( - r']+datetime="([^"]+)"', webpage, 'upload time', - default=None) or self._html_search_meta( - 'uploadDate', webpage, 'timestamp', default=None)) - thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage) - - # TODO 'view_count' requires deobfuscating Javascript - info.update({ - 'id': f'{video_id}_part{page_id or 1}', - 'cid': cid, - 'title': title, - 'description': description, - 'timestamp': timestamp, - 'thumbnail': thumbnail, - 'duration': float_or_none(video_info.get('timelength'), scale=1000), - }) - - uploader_mobj = re.search( - r']+href="(?:https?:)?//space\.bilibili\.com/(?P\d+)"[^>]*>\s*(?P[^<]+?)\s*<', - webpage) - if uploader_mobj: - info.update({ - 'uploader': uploader_mobj.group('name').strip(), - 'uploader_id': uploader_mobj.group('id'), - }) - - if not info.get('uploader'): - info['uploader'] = self._html_search_meta( - 'author', webpage, 'uploader', default=None) - - top_level_info = { - 'tags': traverse_obj(self._download_json( - f'https://api.bilibili.com/x/tag/archive/tags?aid={video_id}', - video_id, fatal=False, note='Downloading tags'), ('data', ..., 'tag_name')), +class BilibiliBaseIE(InfoExtractor): + def extract_formats(self, play_info): + format_names = { + r['quality']: traverse_obj(r, 'new_description', 'display_desc') + for r in traverse_obj(play_info, ('support_formats', lambda _, v: v['quality'])) } - info['subtitles'] = { + audios = traverse_obj(play_info, ('dash', 'audio', ...)) + flac_audio = traverse_obj(play_info, ('dash', 'flac', 'audio')) + if flac_audio: + audios.append(flac_audio) + formats = [{ + 'url': traverse_obj(audio, 'baseUrl', 'base_url', 'url'), + 'ext': mimetype2ext(traverse_obj(audio, 'mimeType', 'mime_type')), + 'acodec': audio.get('codecs'), + 'vcodec': 'none', + 'tbr': float_or_none(audio.get('bandwidth'), scale=1000), + 'filesize': int_or_none(audio.get('size')) + } for audio in audios] + + formats.extend({ + 'url': traverse_obj(video, 'baseUrl', 'base_url', 'url'), + 'ext': mimetype2ext(traverse_obj(video, 'mimeType', 'mime_type')), + 'fps': float_or_none(traverse_obj(video, 'frameRate', 'frame_rate')), + 'width': int_or_none(video.get('width')), + 'height': int_or_none(video.get('height')), + 'vcodec': video.get('codecs'), + 'acodec': 'none' if audios else None, + 'tbr': float_or_none(video.get('bandwidth'), scale=1000), + 'filesize': int_or_none(video.get('size')), + 'quality': int_or_none(video.get('id')), + 'format': format_names.get(video.get('id')), + } for video in traverse_obj(play_info, ('dash', 'video', ...))) + + missing_formats = format_names.keys() - set(traverse_obj(formats, (..., 'quality'))) + if missing_formats: + self.to_screen(f'Format(s) {", ".join(format_names[i] for i in missing_formats)} are missing; ' + f'you have to login or become premium member to download them. {self._login_hint()}') + + return formats + + def json2srt(self, json_data): + srt_data = '' + for idx, line in enumerate(json_data.get('body') or []): + srt_data += (f'{idx + 1}\n' + f'{srt_subtitles_timecode(line["from"])} --> {srt_subtitles_timecode(line["to"])}\n' + f'{line["content"]}\n\n') + return srt_data + + def _get_subtitles(self, video_id, initial_state, cid): + subtitles = { 'danmaku': [{ 'ext': 'xml', 'url': f'https://comment.bilibili.com/{cid}.xml', }] } - r''' - # Requires https://github.com/m13253/danmaku2ass which is licenced under GPL3 - # See https://github.com/animelover1984/youtube-dl + for s in traverse_obj(initial_state, ('videoData', 'subtitle', 'list')) or []: + subtitles.setdefault(s['lan'], []).append({ + 'ext': 'srt', + 'data': self.json2srt(self._download_json(s['subtitle_url'], video_id)) + }) + return subtitles - raw_danmaku = self._download_webpage( - f'https://comment.bilibili.com/{cid}.xml', video_id, fatal=False, note='Downloading danmaku comments') - danmaku = NiconicoIE.CreateDanmaku(raw_danmaku, commentType='Bilibili', x=1024, y=576) - entries[0]['subtitles'] = { - 'danmaku': [{ - 'ext': 'ass', - 'data': danmaku - }] - } - ''' + def _get_chapters(self, aid, cid): + chapters = aid and cid and self._download_json( + 'https://api.bilibili.com/x/player/v2', aid, query={'aid': aid, 'cid': cid}, + note='Extracting chapters', fatal=False) + return traverse_obj(chapters, ('data', 'view_points', ..., { + 'title': 'content', + 'start_time': 'from', + 'end_time': 'to', + })) or None - top_level_info['__post_extractor'] = self.extract_comments(video_id) - - for entry in entries: - entry.update(info) - - if len(entries) == 1: - entries[0].update(top_level_info) - return entries[0] - - for idx, entry in enumerate(entries): - entry['id'] = '%s_part%d' % (video_id, (idx + 1)) - - return { - 'id': str(video_id), - 'bv_id': bv_id, - 'title': title, - 'description': description, - **info, **top_level_info - } - - def _extract_anthology_entries(self, bv_id, video_id, webpage): - title = self._html_search_regex( - (r']+\btitle=(["\'])(?P(?:(?!\1).)+)\1', - r'(?s)<h1[^>]*>(?P<title>.+?)</h1>', - r'<title>(?P<title>.+?)'), webpage, 'title', - group='title') - json_data = self._download_json( - f'https://api.bilibili.com/x/player/pagelist?bvid={bv_id}&jsonp=jsonp', - video_id, note='Extracting videos in anthology') - - if json_data['data']: - return self.playlist_from_matches( - json_data['data'], bv_id, title, ie=BiliBiliIE.ie_key(), - getter=lambda entry: 'https://www.bilibili.com/video/%s?p=%d' % (bv_id, entry['page'])) - - def _get_video_id_set(self, id, is_bv): - query = {'bvid': id} if is_bv else {'aid': id} - response = self._download_json( - "http://api.bilibili.cn/x/web-interface/view", - id, query=query, - note='Grabbing original ID via API') - - if response['code'] == -400: - raise ExtractorError('Video ID does not exist', expected=True, video_id=id) - elif response['code'] != 0: - raise ExtractorError(f'Unknown error occurred during API check (code {response["code"]})', - expected=True, video_id=id) - return response['data']['aid'], response['data']['bvid'] - - def _get_comments(self, video_id, commentPageNumber=0): + def _get_comments(self, aid): for idx in itertools.count(1): replies = traverse_obj( self._download_json( - f'https://api.bilibili.com/x/v2/reply?pn={idx}&oid={video_id}&type=1&jsonp=jsonp&sort=2&_=1567227301685', - video_id, note=f'Extracting comments from page {idx}', fatal=False), + f'https://api.bilibili.com/x/v2/reply?pn={idx}&oid={aid}&type=1&jsonp=jsonp&sort=2&_=1567227301685', + aid, note=f'Extracting comments from page {idx}', fatal=False), ('data', 'replies')) if not replies: return @@ -400,110 +127,440 @@ class BiliBiliIE(InfoExtractor): 'timestamp': reply.get('ctime'), 'parent': reply.get('parent') or 'root', } - for children in map(self._get_all_children, reply.get('replies') or []): + for children in map(self._get_all_children, traverse_obj(reply, ('replies', ...))): yield from children -class BiliBiliBangumiIE(InfoExtractor): - _VALID_URL = r'https?://bangumi\.bilibili\.com/anime/(?P\d+)' - - IE_NAME = 'bangumi.bilibili.com' - IE_DESC = 'BiliBili番剧' +class BiliBiliIE(BilibiliBaseIE): + _VALID_URL = r'https?://www\.bilibili\.com/video/[aAbB][vV](?P[^/?#&]+)' _TESTS = [{ - 'url': 'http://bangumi.bilibili.com/anime/1869', + 'url': 'https://www.bilibili.com/video/BV13x41117TL', 'info_dict': { - 'id': '1869', - 'title': '混沌武士', - 'description': 'md5:6a9622b911565794c11f25f81d6a97d2', + 'id': 'BV13x41117TL', + 'title': '阿滴英文|英文歌分享#6 "Closer', + 'ext': 'mp4', + 'description': '滴妹今天唱Closer給你聽! 有史以来,被推最多次也是最久的歌曲,其实歌词跟我原本想像差蛮多的,不过还是好听! 微博@阿滴英文', + 'uploader_id': '65880958', + 'uploader': '阿滴英文', + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', + 'duration': 554.117, + 'tags': list, + 'comment_count': int, + 'upload_date': '20170301', + 'timestamp': 1488353834, + 'like_count': int, + 'view_count': int, }, - 'playlist_count': 26, }, { - 'url': 'http://bangumi.bilibili.com/anime/1869', + # old av URL version + 'url': 'http://www.bilibili.com/video/av1074402/', 'info_dict': { - 'id': '1869', - 'title': '混沌武士', - 'description': 'md5:6a9622b911565794c11f25f81d6a97d2', + 'thumbnail': r're:^https?://.*\.(jpg|jpeg)$', + 'ext': 'mp4', + 'uploader': '菊子桑', + 'uploader_id': '156160', + 'id': 'BV11x411K7CN', + 'title': '【金坷垃】金泡沫', + 'duration': 308.36, + 'upload_date': '20140420', + 'timestamp': 1397983878, + 'description': 'md5:ce18c2a2d2193f0df2917d270f2e5923', + 'like_count': int, + 'comment_count': int, + 'view_count': int, + 'tags': list, }, + 'params': {'skip_download': True}, + }, { + 'note': 'Anthology', + 'url': 'https://www.bilibili.com/video/BV1bK411W797', + 'info_dict': { + 'id': 'BV1bK411W797', + 'title': '物语中的人物是如何吐槽自己的OP的' + }, + 'playlist_count': 18, 'playlist': [{ - 'md5': '91da8621454dd58316851c27c68b0c13', 'info_dict': { - 'id': '40062', + 'id': 'BV1bK411W797_p1', 'ext': 'mp4', - 'title': '混沌武士', - 'description': '故事发生在日本的江户时代。风是一个小酒馆的打工女。一日,酒馆里来了一群恶霸,虽然他们的举动令风十分不满,但是毕竟风只是一届女流,无法对他们采取什么行动,只能在心里嘟哝。这时,酒家里又进来了个“不良份子...', - 'timestamp': 1414538739, - 'upload_date': '20141028', - 'episode': '疾风怒涛 Tempestuous Temperaments', - 'episode_number': 1, - }, - }], - 'params': { - 'playlist_items': '1', + 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川', + 'tags': 'count:11', + 'timestamp': 1589601697, + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', + 'uploader': '打牌还是打桩', + 'uploader_id': '150259984', + 'like_count': int, + 'comment_count': int, + 'upload_date': '20200516', + 'view_count': int, + 'description': 'md5:e3c401cf7bc363118d1783dd74068a68', + 'duration': 90.314, + } + }] + }, { + 'note': 'Specific page of Anthology', + 'url': 'https://www.bilibili.com/video/BV1bK411W797?p=1', + 'info_dict': { + 'id': 'BV1bK411W797_p1', + 'ext': 'mp4', + 'title': '物语中的人物是如何吐槽自己的OP的 p01 Staple Stable/战场原+羽川', + 'tags': 'count:11', + 'timestamp': 1589601697, + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', + 'uploader': '打牌还是打桩', + 'uploader_id': '150259984', + 'like_count': int, + 'comment_count': int, + 'upload_date': '20200516', + 'view_count': int, + 'description': 'md5:e3c401cf7bc363118d1783dd74068a68', + 'duration': 90.314, + } + }, { + 'note': 'video has subtitles', + 'url': 'https://www.bilibili.com/video/BV12N4y1M7rh', + 'info_dict': { + 'id': 'BV12N4y1M7rh', + 'ext': 'mp4', + 'title': 'md5:96e8bb42c2b432c0d4ce3434a61479c1', + 'tags': list, + 'description': 'md5:afde2b7ba9025c01d9e3dde10de221e4', + 'duration': 313.557, + 'upload_date': '20220709', + 'uploader': '小夫Tech', + 'timestamp': 1657347907, + 'uploader_id': '1326814124', + 'comment_count': int, + 'view_count': int, + 'like_count': int, + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', + 'subtitles': 'count:2' }, + 'params': {'listsubtitles': True}, + }, { + 'url': 'https://www.bilibili.com/video/av8903802/', + 'info_dict': { + 'id': 'BV13x41117TL', + 'ext': 'mp4', + 'title': '阿滴英文|英文歌分享#6 "Closer', + 'upload_date': '20170301', + 'description': 'md5:3b1b9e25b78da4ef87e9b548b88ee76a', + 'timestamp': 1488353834, + 'uploader_id': '65880958', + 'uploader': '阿滴英文', + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', + 'duration': 554.117, + 'tags': list, + 'comment_count': int, + 'view_count': int, + 'like_count': int, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'note': 'video has chapter', + 'url': 'https://www.bilibili.com/video/BV1vL411G7N7/', + 'info_dict': { + 'id': 'BV1vL411G7N7', + 'ext': 'mp4', + 'title': '如何为你的B站视频添加进度条分段', + 'timestamp': 1634554558, + 'upload_date': '20211018', + 'description': 'md5:a9a3d6702b3a94518d419b2e9c320a6d', + 'tags': list, + 'uploader': '爱喝咖啡的当麻', + 'duration': 669.482, + 'uploader_id': '1680903', + 'chapters': 'count:6', + 'comment_count': int, + 'view_count': int, + 'like_count': int, + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', + }, + 'params': {'skip_download': True}, }] - @classmethod - def suitable(cls, url): - return False if BiliBiliIE.suitable(url) else super(BiliBiliBangumiIE, cls).suitable(url) + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id) + play_info = self._search_json(r'window\.__playinfo__\s*=', webpage, 'play info', video_id)['data'] + + video_data = initial_state['videoData'] + video_id, title = video_data['bvid'], video_data.get('title') + + # Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself. + page_list_json = traverse_obj( + self._download_json( + 'https://api.bilibili.com/x/player/pagelist', video_id, + fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'}, + note='Extracting videos in anthology'), + 'data', expected_type=list) or [] + is_anthology = len(page_list_json) > 1 + + part_id = int_or_none(parse_qs(url).get('p', [None])[-1]) + if is_anthology and not part_id and self._yes_playlist(video_id, video_id): + return self.playlist_from_matches( + page_list_json, video_id, title, ie=BiliBiliIE, + getter=lambda entry: f'https://www.bilibili.com/video/{video_id}?p={entry["page"]}') + + if is_anthology: + part_id = part_id or 1 + title += f' p{part_id:02d} {traverse_obj(page_list_json, (part_id - 1, "part")) or ""}' + + aid = video_data.get('aid') + old_video_id = format_field(aid, None, f'%s_part{part_id or 1}') + + cid = traverse_obj(video_data, ('pages', part_id - 1, 'cid')) if part_id else video_data.get('cid') + + return { + 'id': f'{video_id}{format_field(part_id, None, "_p%d")}', + 'formats': self.extract_formats(play_info), + '_old_archive_ids': [make_archive_id(self, old_video_id)] if old_video_id else None, + 'title': title, + 'description': traverse_obj(initial_state, ('videoData', 'desc')), + 'view_count': traverse_obj(initial_state, ('videoData', 'stat', 'view')), + 'uploader': traverse_obj(initial_state, ('upData', 'name')), + 'uploader_id': traverse_obj(initial_state, ('upData', 'mid')), + 'like_count': traverse_obj(initial_state, ('videoData', 'stat', 'like')), + 'comment_count': traverse_obj(initial_state, ('videoData', 'stat', 'reply')), + 'tags': traverse_obj(initial_state, ('tags', ..., 'tag_name')), + 'thumbnail': traverse_obj(initial_state, ('videoData', 'pic')), + 'timestamp': traverse_obj(initial_state, ('videoData', 'pubdate')), + 'duration': float_or_none(play_info.get('timelength'), scale=1000), + 'chapters': self._get_chapters(aid, cid), + 'subtitles': self.extract_subtitles(video_id, initial_state, cid), + '__post_extractor': self.extract_comments(aid), + 'http_headers': {'Referer': url}, + } + + +class BiliBiliBangumiIE(BilibiliBaseIE): + _VALID_URL = r'(?x)https?://www\.bilibili\.com/bangumi/play/(?P(?:ss|ep)\d+)' + + _TESTS = [{ + 'url': 'https://www.bilibili.com/bangumi/play/ss897', + 'info_dict': { + 'id': 'ss897', + 'ext': 'mp4', + 'series': '神的记事本', + 'season': '神的记事本', + 'season_id': 897, + 'season_number': 1, + 'episode': '你与旅行包', + 'episode_number': 2, + 'title': '神的记事本:第2话 你与旅行包', + 'duration': 1428.487, + 'timestamp': 1310809380, + 'upload_date': '20110716', + 'thumbnail': r're:^https?://.*\.(jpg|jpeg|png)$', + }, + }, { + 'url': 'https://www.bilibili.com/bangumi/play/ep508406', + 'only_matching': True, + }] def _real_extract(self, url): - bangumi_id = self._match_id(url) + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) - # Sometimes this API returns a JSONP response - season_info = self._download_json( - 'http://bangumi.bilibili.com/jsonp/seasoninfo/%s.ver' % bangumi_id, - bangumi_id, transform_source=strip_jsonp)['result'] + if '您所在的地区无法观看本片' in webpage: + raise GeoRestrictedError('This video is restricted') + elif ('开通大会员观看' in webpage and '__playinfo__' not in webpage + or '正在观看预览,大会员免费看全片' in webpage): + self.raise_login_required('This video is for premium members only') - entries = [{ - '_type': 'url_transparent', - 'url': smuggle_url(episode['webplay_url'], {'no_bangumi_tip': 1}), - 'ie_key': BiliBiliIE.ie_key(), - 'timestamp': parse_iso8601(episode.get('update_time'), delimiter=' '), - 'episode': episode.get('index_title'), - 'episode_number': int_or_none(episode.get('index')), - } for episode in season_info['episodes']] + play_info = self._search_json(r'window\.__playinfo__\s*=', webpage, 'play info', video_id)['data'] + formats = self.extract_formats(play_info) + if (not formats and '成为大会员抢先看' in webpage + and play_info.get('durl') and not play_info.get('dash')): + self.raise_login_required('This video is for premium members only') - entries = sorted(entries, key=lambda entry: entry.get('episode_number')) + initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id) - return self.playlist_result( - entries, bangumi_id, - season_info.get('bangumi_title'), season_info.get('evaluate')) + season_id = traverse_obj(initial_state, ('mediaInfo', 'season_id')) + season_number = season_id and next(( + idx + 1 for idx, e in enumerate( + traverse_obj(initial_state, ('mediaInfo', 'seasons', ...))) + if e.get('season_id') == season_id + ), None) + + return { + 'id': video_id, + 'formats': formats, + 'title': traverse_obj(initial_state, 'h1Title'), + 'episode': traverse_obj(initial_state, ('epInfo', 'long_title')), + 'episode_number': int_or_none(traverse_obj(initial_state, ('epInfo', 'title'))), + 'series': traverse_obj(initial_state, ('mediaInfo', 'series')), + 'season': traverse_obj(initial_state, ('mediaInfo', 'season_title')), + 'season_id': season_id, + 'season_number': season_number, + 'thumbnail': traverse_obj(initial_state, ('epInfo', 'cover')), + 'timestamp': traverse_obj(initial_state, ('epInfo', 'pub_time')), + 'duration': float_or_none(play_info.get('timelength'), scale=1000), + 'subtitles': self.extract_subtitles( + video_id, initial_state, traverse_obj(initial_state, ('epInfo', 'cid'))), + '__post_extractor': self.extract_comments(traverse_obj(initial_state, ('epInfo', 'aid'))), + 'http_headers': {'Referer': url, **self.geo_verification_headers()}, + } -class BilibiliChannelIE(InfoExtractor): - _VALID_URL = r'https?://space.bilibili\.com/(?P\d+)' - _API_URL = "https://api.bilibili.com/x/space/arc/search?mid=%s&pn=%d&jsonp=jsonp" +class BiliBiliBangumiMediaIE(InfoExtractor): + _VALID_URL = r'https?://www\.bilibili\.com/bangumi/media/md(?P\d+)' + _TESTS = [{ + 'url': 'https://www.bilibili.com/bangumi/media/md24097891', + 'info_dict': { + 'id': '24097891', + }, + 'playlist_mincount': 25, + }] + + def _real_extract(self, url): + media_id = self._match_id(url) + webpage = self._download_webpage(url, media_id) + + initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial_state', media_id) + episode_list = self._download_json( + 'https://api.bilibili.com/pgc/web/season/section', media_id, + query={'season_id': initial_state['mediaInfo']['season_id']}, + note='Downloading season info')['result']['main_section']['episodes'] + + return self.playlist_result(( + self.url_result(entry['share_url'], BiliBiliBangumiIE, entry['aid']) + for entry in episode_list), media_id) + + +class BilibiliSpaceBaseIE(InfoExtractor): + def _extract_playlist(self, fetch_page, get_metadata, get_entries): + first_page = fetch_page(0) + metadata = get_metadata(first_page) + + paged_list = InAdvancePagedList( + lambda idx: get_entries(fetch_page(idx) if idx else first_page), + metadata['page_count'], metadata['page_size']) + + return metadata, paged_list + + +class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE): + _VALID_URL = r'https?://space\.bilibili\.com/(?P\d+)(?P