Plugin cleanup and tweaks

This commit is contained in:
2023-02-20 19:18:45 -06:00
parent 372e4ff3dc
commit 3ad9e1c7bb
1138 changed files with 48878 additions and 40445 deletions

View File

@@ -1,16 +1,19 @@
# coding: utf-8
from __future__ import unicode_literals
import itertools
import re
from urllib.parse import urlparse, parse_qs
from .common import InfoExtractor
from ..utils import (
ExtractorError,
clean_html,
dict_get,
ExtractorError,
int_or_none,
join_nonempty,
merge_dicts,
parse_duration,
traverse_obj,
try_get,
unified_timestamp,
update_url_query,
)
@@ -18,6 +21,23 @@ from ..utils import (
class NaverBaseIE(InfoExtractor):
_CAPTION_EXT_RE = r'\.(?:ttml|vtt)'
@staticmethod # NB: Used in VLiveWebArchiveIE
def process_subtitles(vod_data, process_url):
ret = {'subtitles': {}, 'automatic_captions': {}}
for caption in traverse_obj(vod_data, ('captions', 'list', ...)):
caption_url = caption.get('source')
if not caption_url:
continue
type_ = 'automatic_captions' if caption.get('type') == 'auto' else 'subtitles'
lang = caption.get('locale') or join_nonempty('language', 'country', from_dict=caption) or 'und'
if caption.get('type') == 'fan':
lang += '_fan%d' % next(i for i in itertools.count(1) if f'{lang}_fan{i}' not in ret[type_])
ret[type_].setdefault(lang, []).extend({
'url': sub_url,
'name': join_nonempty('label', 'fanName', from_dict=caption, delim=' - '),
} for sub_url in process_url(caption_url))
return ret
def _extract_video_info(self, video_id, vid, key):
video_data = self._download_json(
'http://play.rmcnmv.naver.com/vod/play/v2.0/' + vid,
@@ -65,28 +85,16 @@ class NaverBaseIE(InfoExtractor):
formats.extend(self._extract_m3u8_formats(
update_url_query(stream_url, query), video_id,
'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
self._sort_formats(formats)
replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x)
def get_subs(caption_url):
if re.search(self._CAPTION_EXT_RE, caption_url):
return [{
'url': replace_ext(caption_url, 'ttml'),
}, {
'url': replace_ext(caption_url, 'vtt'),
}]
else:
return [{'url': caption_url}]
automatic_captions = {}
subtitles = {}
for caption in get_list('caption'):
caption_url = caption.get('source')
if not caption_url:
continue
sub_dict = automatic_captions if caption.get('type') == 'auto' else subtitles
sub_dict.setdefault(dict_get(caption, ('locale', 'language')), []).extend(get_subs(caption_url))
return [
replace_ext(caption_url, 'ttml'),
replace_ext(caption_url, 'vtt'),
]
return [caption_url]
user = meta.get('user', {})
@@ -94,13 +102,12 @@ class NaverBaseIE(InfoExtractor):
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'automatic_captions': automatic_captions,
'thumbnail': try_get(meta, lambda x: x['cover']['source']),
'view_count': int_or_none(meta.get('count')),
'uploader_id': user.get('id'),
'uploader': user.get('name'),
'uploader_url': user.get('url'),
**self.process_subtitles(video_data, get_subs),
}
@@ -237,7 +244,6 @@ class NaverLiveIE(InfoExtractor):
quality.get('url'), video_id, 'mp4',
m3u8_id=quality.get('qualityId'), live=True
))
self._sort_formats(formats)
return {
'id': video_id,
@@ -250,3 +256,142 @@ class NaverLiveIE(InfoExtractor):
'categories': [meta.get('categoryId')],
'is_live': True
}
class NaverNowIE(NaverBaseIE):
IE_NAME = 'navernow'
_VALID_URL = r'https?://now\.naver\.com/s/now\.(?P<id>\w+)'
_API_URL = 'https://apis.naver.com/now_web/oldnow_web/v4'
_TESTS = [{
'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay=',
'md5': 'e05854162c21c221481de16b2944a0bc',
'info_dict': {
'id': '4759-26331132',
'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)',
'ext': 'mp4',
'thumbnail': r're:^https?://.*\.jpg',
'timestamp': 1650369600,
'upload_date': '20220419',
'uploader_id': 'now',
'view_count': int,
'uploader_url': 'https://now.naver.com/show/4759',
'uploader': '아이키의 떰즈업',
},
'params': {
'noplaylist': True,
}
}, {
'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
'md5': '9f6118e398aa0f22b2152f554ea7851b',
'info_dict': {
'id': '4759-26601461',
'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.',
'ext': 'mp4',
'thumbnail': r're:^https?://.*\.jpg',
'upload_date': '20220504',
'timestamp': 1651648311,
'uploader_id': 'now',
'view_count': int,
'uploader_url': 'https://now.naver.com/show/4759',
'uploader': '아이키의 떰즈업',
},
'params': {
'noplaylist': True,
},
}, {
'url': 'https://now.naver.com/s/now.4759',
'info_dict': {
'id': '4759',
'title': '아이키의 떰즈업',
},
'playlist_mincount': 101
}, {
'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay',
'info_dict': {
'id': '4759',
'title': '아이키의 떰즈업',
},
'playlist_mincount': 101,
}, {
'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
'info_dict': {
'id': '4759',
'title': '아이키의 떰즈업',
},
'playlist_mincount': 101,
}, {
'url': 'https://now.naver.com/s/now.kihyunplay?shareReplayId=30573291#replay',
'only_matching': True,
}]
def _extract_replay(self, show_id, replay_id):
vod_info = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}', replay_id)
in_key = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}/inkey', replay_id)['inKey']
return merge_dicts({
'id': f'{show_id}-{replay_id}',
'title': traverse_obj(vod_info, ('episode', 'title')),
'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))),
'thumbnail': vod_info.get('thumbnail_image_url'),
}, self._extract_video_info(replay_id, vod_info['video_id'], in_key))
def _extract_show_replays(self, show_id):
page_size = 15
page = 1
while True:
show_vod_info = self._download_json(
f'{self._API_URL}/vod-shows/now.{show_id}', show_id,
query={'page': page, 'page_size': page_size},
note=f'Downloading JSON vod list for show {show_id} - page {page}'
)['response']['result']
for v in show_vod_info.get('vod_list') or []:
yield self._extract_replay(show_id, v['id'])
if len(show_vod_info.get('vod_list') or []) < page_size:
break
page += 1
def _extract_show_highlights(self, show_id, highlight_id=None):
page_size = 10
page = 1
while True:
highlights_videos = self._download_json(
f'{self._API_URL}/shows/now.{show_id}/highlights/videos/', show_id,
query={'page': page, 'page_size': page_size},
note=f'Downloading JSON highlights for show {show_id} - page {page}')
for highlight in highlights_videos.get('results') or []:
if highlight_id and highlight.get('clip_no') != int(highlight_id):
continue
yield merge_dicts({
'id': f'{show_id}-{highlight["clip_no"]}',
'title': highlight.get('title'),
'timestamp': unified_timestamp(highlight.get('regdate')),
'thumbnail': highlight.get('thumbnail_url'),
}, self._extract_video_info(highlight['clip_no'], highlight['video_id'], highlight['video_inkey']))
if len(highlights_videos.get('results') or []) < page_size:
break
page += 1
def _extract_highlight(self, show_id, highlight_id):
try:
return next(self._extract_show_highlights(show_id, highlight_id))
except StopIteration:
raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}')
def _real_extract(self, url):
show_id = self._match_id(url)
qs = parse_qs(urlparse(url).query)
if not self._yes_playlist(show_id, qs.get('shareHightlight')):
return self._extract_highlight(show_id, qs['shareHightlight'][0])
elif not self._yes_playlist(show_id, qs.get('shareReplayId')):
return self._extract_replay(show_id, qs['shareReplayId'][0])
show_info = self._download_json(
f'{self._API_URL}/shows/now.{show_id}/', show_id,
note=f'Downloading JSON vod list for show {show_id}')
return self.playlist_result(
itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)),
show_id, show_info.get('title'))