mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-12-14 07:12:41 +00:00
[cleanup] Minor fixes
This commit is contained in:
parent
80e8493ee7
commit
21633673c3
8 changed files with 26 additions and 18 deletions
|
@ -785,7 +785,7 @@
|
||||||
* [build] Improvements
|
* [build] Improvements
|
||||||
* Build standalone MacOS packages by [smplayer-dev](https://github.com/smplayer-dev)
|
* Build standalone MacOS packages by [smplayer-dev](https://github.com/smplayer-dev)
|
||||||
* Release windows exe built with `py2exe`
|
* Release windows exe built with `py2exe`
|
||||||
* Enable lazy-extractors in releases.
|
* Enable lazy-extractors in releases
|
||||||
* Set env var `YTDLP_NO_LAZY_EXTRACTORS` to forcefully disable this (experimental)
|
* Set env var `YTDLP_NO_LAZY_EXTRACTORS` to forcefully disable this (experimental)
|
||||||
* Clean up error reporting in update
|
* Clean up error reporting in update
|
||||||
* Refactor `pyinst.py`, misc cleanup and improve docs
|
* Refactor `pyinst.py`, misc cleanup and improve docs
|
||||||
|
@ -2031,7 +2031,7 @@
|
||||||
* **Format Sort:** Added `--format-sort` (`-S`), `--format-sort-force` (`--S-force`) - See [Sorting Formats](README.md#sorting-formats) for details
|
* **Format Sort:** Added `--format-sort` (`-S`), `--format-sort-force` (`--S-force`) - See [Sorting Formats](README.md#sorting-formats) for details
|
||||||
* **Format Selection:** See [Format Selection](README.md#format-selection) for details
|
* **Format Selection:** See [Format Selection](README.md#format-selection) for details
|
||||||
* New format selectors: `best*`, `worst*`, `bestvideo*`, `bestaudio*`, `worstvideo*`, `worstaudio*`
|
* New format selectors: `best*`, `worst*`, `bestvideo*`, `bestaudio*`, `worstvideo*`, `worstaudio*`
|
||||||
* Changed video format sorting to show video only files and video+audio files together.
|
* Changed video format sorting to show video only files and video+audio files together
|
||||||
* Added `--video-multistreams`, `--no-video-multistreams`, `--audio-multistreams`, `--no-audio-multistreams`
|
* Added `--video-multistreams`, `--no-video-multistreams`, `--audio-multistreams`, `--no-audio-multistreams`
|
||||||
* Added `b`,`w`,`v`,`a` as alias for `best`, `worst`, `video` and `audio` respectively
|
* Added `b`,`w`,`v`,`a` as alias for `best`, `worst`, `video` and `audio` respectively
|
||||||
* Shortcut Options: Added `--write-link`, `--write-url-link`, `--write-webloc-link`, `--write-desktop-link` by [h-h-h-h](https://github.com/h-h-h-h) - See [Internet Shortcut Options](README.md#internet-shortcut-options) for details
|
* Shortcut Options: Added `--write-link`, `--write-url-link`, `--write-webloc-link`, `--write-desktop-link` by [h-h-h-h](https://github.com/h-h-h-h) - See [Internet Shortcut Options](README.md#internet-shortcut-options) for details
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import os
|
|
||||||
import optparse
|
import optparse
|
||||||
|
import os
|
||||||
import sys
|
import sys
|
||||||
from inspect import getsource
|
from inspect import getsource
|
||||||
|
|
||||||
|
|
|
@ -1924,7 +1924,7 @@ class YoutubeDL:
|
||||||
and download
|
and download
|
||||||
and (
|
and (
|
||||||
not can_merge()
|
not can_merge()
|
||||||
or info_dict.get('is_live', False)
|
or info_dict.get('is_live') and not self.params.get('live_from_start')
|
||||||
or self.outtmpl_dict['default'] == '-'))
|
or self.outtmpl_dict['default'] == '-'))
|
||||||
compat = (
|
compat = (
|
||||||
prefer_best
|
prefer_best
|
||||||
|
|
|
@ -869,6 +869,7 @@ def main(argv=None):
|
||||||
|
|
||||||
|
|
||||||
from .extractor import gen_extractors, list_extractors
|
from .extractor import gen_extractors, list_extractors
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'main',
|
'main',
|
||||||
'YoutubeDL',
|
'YoutubeDL',
|
||||||
|
|
|
@ -1343,7 +1343,7 @@ class InfoExtractor:
|
||||||
return self._og_search_property('url', html, **kargs)
|
return self._og_search_property('url', html, **kargs)
|
||||||
|
|
||||||
def _html_extract_title(self, html, name='title', *, fatal=False, **kwargs):
|
def _html_extract_title(self, html, name='title', *, fatal=False, **kwargs):
|
||||||
return self._html_search_regex(r'(?s)<title>([^<]+)</title>', html, name, fatal=fatal, **kwargs)
|
return self._html_search_regex(r'(?s)<title\b[^>]*>([^<]+)</title>', html, name, fatal=fatal, **kwargs)
|
||||||
|
|
||||||
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
|
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
|
||||||
name = variadic(name)
|
name = variadic(name)
|
||||||
|
@ -1509,8 +1509,9 @@ class InfoExtractor:
|
||||||
'url': url_or_none(e.get('contentUrl')),
|
'url': url_or_none(e.get('contentUrl')),
|
||||||
'title': unescapeHTML(e.get('name')),
|
'title': unescapeHTML(e.get('name')),
|
||||||
'description': unescapeHTML(e.get('description')),
|
'description': unescapeHTML(e.get('description')),
|
||||||
'thumbnails': [{'url': url_or_none(url)}
|
'thumbnails': [{'url': url}
|
||||||
for url in variadic(traverse_obj(e, 'thumbnailUrl', 'thumbnailURL'))],
|
for url in variadic(traverse_obj(e, 'thumbnailUrl', 'thumbnailURL'))
|
||||||
|
if url_or_none(url)],
|
||||||
'duration': parse_duration(e.get('duration')),
|
'duration': parse_duration(e.get('duration')),
|
||||||
'timestamp': unified_timestamp(e.get('uploadDate')),
|
'timestamp': unified_timestamp(e.get('uploadDate')),
|
||||||
# author can be an instance of 'Organization' or 'Person' types.
|
# author can be an instance of 'Organization' or 'Person' types.
|
||||||
|
@ -2803,13 +2804,18 @@ class InfoExtractor:
|
||||||
mime_type = representation_attrib['mimeType']
|
mime_type = representation_attrib['mimeType']
|
||||||
content_type = representation_attrib.get('contentType', mime_type.split('/')[0])
|
content_type = representation_attrib.get('contentType', mime_type.split('/')[0])
|
||||||
|
|
||||||
codecs = parse_codecs(representation_attrib.get('codecs', ''))
|
codec_str = representation_attrib.get('codecs', '')
|
||||||
|
# Some kind of binary subtitle found in some youtube livestreams
|
||||||
|
if mime_type == 'application/x-rawcc':
|
||||||
|
codecs = {'scodec': codec_str}
|
||||||
|
else:
|
||||||
|
codecs = parse_codecs(codec_str)
|
||||||
if content_type not in ('video', 'audio', 'text'):
|
if content_type not in ('video', 'audio', 'text'):
|
||||||
if mime_type == 'image/jpeg':
|
if mime_type == 'image/jpeg':
|
||||||
content_type = mime_type
|
content_type = mime_type
|
||||||
elif codecs['vcodec'] != 'none':
|
elif codecs.get('vcodec', 'none') != 'none':
|
||||||
content_type = 'video'
|
content_type = 'video'
|
||||||
elif codecs['acodec'] != 'none':
|
elif codecs.get('acodec', 'none') != 'none':
|
||||||
content_type = 'audio'
|
content_type = 'audio'
|
||||||
elif codecs.get('scodec', 'none') != 'none':
|
elif codecs.get('scodec', 'none') != 'none':
|
||||||
content_type = 'text'
|
content_type = 'text'
|
||||||
|
|
|
@ -10,7 +10,6 @@ from ..utils import (
|
||||||
WebSocketsWrapper,
|
WebSocketsWrapper,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
std_headers,
|
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
@ -207,7 +206,7 @@ class FC2LiveIE(InfoExtractor):
|
||||||
'Cookie': str(self._get_cookies('https://live.fc2.com/'))[12:],
|
'Cookie': str(self._get_cookies('https://live.fc2.com/'))[12:],
|
||||||
'Origin': 'https://live.fc2.com',
|
'Origin': 'https://live.fc2.com',
|
||||||
'Accept': '*/*',
|
'Accept': '*/*',
|
||||||
'User-Agent': std_headers['User-Agent'],
|
'User-Agent': self.get_param('http_headers')['User-Agent'],
|
||||||
})
|
})
|
||||||
|
|
||||||
self.write_debug('[debug] Sending HLS server request')
|
self.write_debug('[debug] Sending HLS server request')
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
import itertools
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -9,8 +11,6 @@ from ..utils import (
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
import itertools
|
|
||||||
|
|
||||||
|
|
||||||
class VoicyBaseIE(InfoExtractor):
|
class VoicyBaseIE(InfoExtractor):
|
||||||
def _extract_from_playlist_data(self, value):
|
def _extract_from_playlist_data(self, value):
|
||||||
|
@ -105,7 +105,7 @@ class VoicyChannelIE(VoicyBaseIE):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
return not VoicyIE.suitable(url) and super(VoicyChannelIE, cls).suitable(url)
|
return not VoicyIE.suitable(url) and super().suitable(url)
|
||||||
|
|
||||||
def _entries(self, channel_id):
|
def _entries(self, channel_id):
|
||||||
pager = ''
|
pager = ''
|
||||||
|
|
|
@ -714,7 +714,9 @@ def sanitize_path(s, force=False):
|
||||||
def sanitize_url(url):
|
def sanitize_url(url):
|
||||||
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
|
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
|
||||||
# the number of unwanted failures due to missing protocol
|
# the number of unwanted failures due to missing protocol
|
||||||
if url.startswith('//'):
|
if url is None:
|
||||||
|
return
|
||||||
|
elif url.startswith('//'):
|
||||||
return 'http:%s' % url
|
return 'http:%s' % url
|
||||||
# Fix some common typos seen so far
|
# Fix some common typos seen so far
|
||||||
COMMON_TYPOS = (
|
COMMON_TYPOS = (
|
||||||
|
|
Loading…
Reference in a new issue