mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-12-13 23:02:34 +00:00
[tiktok] Use API to fetch higher quality video (#843)
Authored by: MinePlayersPE, llacb47
This commit is contained in:
parent
526d74ec5a
commit
bd9ff55bcd
1 changed files with 196 additions and 33 deletions
|
@ -2,18 +2,23 @@
|
|||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
str_or_none,
|
||||
try_get
|
||||
traverse_obj,
|
||||
try_get,
|
||||
qualities,
|
||||
)
|
||||
|
||||
|
||||
class TikTokIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.tiktok\.com/@[\w\._]+/video/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://www\.tiktok\.com/@[\w\.-]+/video/(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.tiktok.com/@leenabhushan/video/6748451240264420610',
|
||||
|
@ -61,13 +66,22 @@ class TikTokIE(InfoExtractor):
|
|||
'repost_count': int,
|
||||
'comment_count': int,
|
||||
}
|
||||
}, {
|
||||
# Promoted content/ad
|
||||
'url': 'https://www.tiktok.com/@MS4wLjABAAAAAR29F6J2Ktu0Daw03BJyXPNoRQ-W7U5a0Mn3lVCq2rQhjOd_WNLclHUoFgwX8Eno/video/6932675057474981122',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_APP_VERSION = '20.9.3'
|
||||
_MANIFEST_APP_VERSION = '291'
|
||||
QUALITIES = ('360p', '540p', '720p')
|
||||
|
||||
def _extract_aweme(self, props_data, webpage, url):
|
||||
video_info = try_get(
|
||||
props_data, lambda x: x['pageProps']['itemInfo']['itemStruct'], dict)
|
||||
author_info = try_get(
|
||||
props_data, lambda x: x['pageProps']['itemInfo']['itemStruct']['author'], dict) or {}
|
||||
music_info = try_get(
|
||||
props_data, lambda x: x['pageProps']['itemInfo']['itemStruct']['music'], dict) or {}
|
||||
stats_info = try_get(props_data, lambda x: x['pageProps']['itemInfo']['itemStruct']['stats'], dict) or {}
|
||||
|
||||
user_id = str_or_none(author_info.get('uniqueId'))
|
||||
|
@ -99,6 +113,9 @@ class TikTokIE(InfoExtractor):
|
|||
'uploader': user_id,
|
||||
'uploader_id': str_or_none(author_info.get('id')),
|
||||
'uploader_url': f'https://www.tiktok.com/@{user_id}',
|
||||
'track': str_or_none(music_info.get('title')),
|
||||
'album': str_or_none(music_info.get('album')) or None,
|
||||
'artist': str_or_none(music_info.get('authorName')),
|
||||
'thumbnails': thumbnails,
|
||||
'description': str_or_none(video_info.get('desc')),
|
||||
'webpage_url': self._og_search_url(webpage),
|
||||
|
@ -108,9 +125,185 @@ class TikTokIE(InfoExtractor):
|
|||
}
|
||||
}
|
||||
|
||||
def _extract_aweme_app(self, aweme_id):
|
||||
query = {
|
||||
'aweme_id': aweme_id,
|
||||
'version_name': self._APP_VERSION,
|
||||
'version_code': self._MANIFEST_APP_VERSION,
|
||||
'build_number': self._APP_VERSION,
|
||||
'manifest_version_code': self._MANIFEST_APP_VERSION,
|
||||
'update_version_code': self._MANIFEST_APP_VERSION,
|
||||
'openudid': ''.join(random.choice('0123456789abcdef') for i in range(16)),
|
||||
'uuid': ''.join([random.choice(string.digits) for num in range(16)]),
|
||||
'_rticket': int(time.time() * 1000),
|
||||
'ts': int(time.time()),
|
||||
'device_brand': 'Google',
|
||||
'device_type': 'Pixel 4',
|
||||
'device_platform': 'android',
|
||||
'resolution': '1080*1920',
|
||||
'dpi': 420,
|
||||
'os_version': '10',
|
||||
'os_api': '29',
|
||||
'carrier_region': 'US',
|
||||
'sys_region': 'US',
|
||||
'region': 'US',
|
||||
'app_name': 'trill',
|
||||
'app_language': 'en',
|
||||
'language': 'en',
|
||||
'timezone_name': 'America/New_York',
|
||||
'timezone_offset': '-14400',
|
||||
'channel': 'googleplay',
|
||||
'ac': 'wifi',
|
||||
'mcc_mnc': '310260',
|
||||
'is_my_cn': 0,
|
||||
'aid': 1180,
|
||||
'ssmix': 'a',
|
||||
'as': 'a1qwert123',
|
||||
'cp': 'cbfhckdckkde1',
|
||||
}
|
||||
|
||||
self._set_cookie('.tiktokv.com', 'odin_tt', ''.join(random.choice('0123456789abcdef') for i in range(160)))
|
||||
|
||||
aweme_detail = self._download_json(
|
||||
'https://api-t2.tiktokv.com/aweme/v1/aweme/detail/', aweme_id,
|
||||
'Downloading video details', 'Unable to download video details',
|
||||
headers={
|
||||
'User-Agent': f'com.ss.android.ugc.trill/{self._MANIFEST_APP_VERSION} (Linux; U; Android 10; en_US; Pixel 4; Build/QQ3A.200805.001; Cronet/58.0.2991.0)',
|
||||
}, query=query)['aweme_detail']
|
||||
video_info = aweme_detail['video']
|
||||
|
||||
def parse_url_key(url_key):
|
||||
format_id, codec, res, bitrate = self._search_regex(
|
||||
r'v[^_]+_(?P<id>(?P<codec>[^_]+)_(?P<res>\d+p)_(?P<bitrate>\d+))', url_key,
|
||||
'url key', default=(None, None, None, None), group=('id', 'codec', 'res', 'bitrate'))
|
||||
if not format_id:
|
||||
return {}, None
|
||||
return {
|
||||
'format_id': format_id,
|
||||
'vcodec': 'h265' if codec == 'bytevc1' else codec,
|
||||
'tbr': int_or_none(bitrate, scale=1000) or None,
|
||||
'quality': qualities(self.QUALITIES)(res),
|
||||
}, res
|
||||
|
||||
known_resolutions = {}
|
||||
|
||||
def extract_addr(addr, add_meta={}):
|
||||
parsed_meta, res = parse_url_key(addr.get('url_key', ''))
|
||||
if res:
|
||||
known_resolutions.setdefault(res, {}).setdefault('height', add_meta.get('height'))
|
||||
known_resolutions[res].setdefault('width', add_meta.get('width'))
|
||||
parsed_meta.update(known_resolutions.get(res, {}))
|
||||
add_meta.setdefault('height', int_or_none(res[:-1]))
|
||||
return [{
|
||||
'url': url,
|
||||
'filesize': int_or_none(addr.get('data_size')),
|
||||
'ext': 'mp4',
|
||||
'acodec': 'aac',
|
||||
**add_meta, **parsed_meta
|
||||
} for url in addr.get('url_list') or []]
|
||||
|
||||
# Hack: Add direct video links first to prioritize them when removing duplicate formats
|
||||
formats = []
|
||||
if video_info.get('play_addr'):
|
||||
formats.extend(extract_addr(video_info['play_addr'], {
|
||||
'format_id': 'play_addr',
|
||||
'format_note': 'Direct video',
|
||||
'vcodec': 'h265' if traverse_obj(
|
||||
video_info, 'is_bytevc1', 'is_h265') else 'h264', # Always h264?
|
||||
'width': video_info.get('width'),
|
||||
'height': video_info.get('height'),
|
||||
}))
|
||||
if video_info.get('download_addr'):
|
||||
formats.extend(extract_addr(video_info['download_addr'], {
|
||||
'format_id': 'download_addr',
|
||||
'format_note': 'Download video%s' % (', watermarked' if video_info.get('has_watermark') else ''),
|
||||
'vcodec': 'h264',
|
||||
'width': video_info.get('width'),
|
||||
'height': video_info.get('height'),
|
||||
'source_preference': -2 if video_info.get('has_watermark') else -1,
|
||||
}))
|
||||
if video_info.get('play_addr_h264'):
|
||||
formats.extend(extract_addr(video_info['play_addr_h264'], {
|
||||
'format_id': 'play_addr_h264',
|
||||
'format_note': 'Direct video',
|
||||
'vcodec': 'h264',
|
||||
}))
|
||||
if video_info.get('play_addr_bytevc1'):
|
||||
formats.extend(extract_addr(video_info['play_addr_bytevc1'], {
|
||||
'format_id': 'play_addr_bytevc1',
|
||||
'format_note': 'Direct video',
|
||||
'vcodec': 'h265',
|
||||
}))
|
||||
|
||||
for bitrate in video_info.get('bit_rate', []):
|
||||
if bitrate.get('play_addr'):
|
||||
formats.extend(extract_addr(bitrate['play_addr'], {
|
||||
'format_id': bitrate.get('gear_name'),
|
||||
'format_note': 'Playback video',
|
||||
'tbr': try_get(bitrate, lambda x: x['bit_rate'] / 1000),
|
||||
'vcodec': 'h265' if traverse_obj(
|
||||
bitrate, 'is_bytevc1', 'is_h265') else 'h264',
|
||||
}))
|
||||
|
||||
self._remove_duplicate_formats(formats)
|
||||
self._sort_formats(formats, ('quality', 'source', 'codec', 'size', 'br'))
|
||||
|
||||
thumbnails = []
|
||||
for cover_id in ('cover', 'ai_dynamic_cover', 'animated_cover', 'ai_dynamic_cover_bak',
|
||||
'origin_cover', 'dynamic_cover'):
|
||||
cover = video_info.get(cover_id)
|
||||
if cover:
|
||||
for cover_url in cover['url_list']:
|
||||
thumbnails.append({
|
||||
'id': cover_id,
|
||||
'url': cover_url,
|
||||
})
|
||||
|
||||
stats_info = aweme_detail.get('statistics', {})
|
||||
author_info = aweme_detail.get('author', {})
|
||||
music_info = aweme_detail.get('music', {})
|
||||
user_id = str_or_none(author_info.get('nickname'))
|
||||
|
||||
contained_music_track = traverse_obj(
|
||||
music_info, ('matched_song', 'title'), ('matched_pgc_sound', 'title'), expected_type=str)
|
||||
contained_music_author = traverse_obj(
|
||||
music_info, ('matched_song', 'author'), ('matched_pgc_sound', 'author'), 'author', expected_type=str)
|
||||
|
||||
is_generic_og_trackname = music_info.get('is_original_sound') and music_info.get('title') == 'original sound - %s' % music_info.get('owner_handle')
|
||||
if is_generic_og_trackname:
|
||||
music_track, music_author = contained_music_track or 'original sound', contained_music_author
|
||||
else:
|
||||
music_track, music_author = music_info.get('title'), music_info.get('author')
|
||||
|
||||
return {
|
||||
'id': aweme_id,
|
||||
'title': aweme_detail['desc'],
|
||||
'description': aweme_detail['desc'],
|
||||
'view_count': int_or_none(stats_info.get('play_count')),
|
||||
'like_count': int_or_none(stats_info.get('digg_count')),
|
||||
'repost_count': int_or_none(stats_info.get('share_count')),
|
||||
'comment_count': int_or_none(stats_info.get('comment_count')),
|
||||
'uploader': str_or_none(author_info.get('unique_id')),
|
||||
'creator': user_id,
|
||||
'uploader_id': str_or_none(author_info.get('uid')),
|
||||
'uploader_url': f'https://www.tiktok.com/@{user_id}' if user_id else None,
|
||||
'track': music_track,
|
||||
'album': str_or_none(music_info.get('album')) or None,
|
||||
'artist': music_author,
|
||||
'timestamp': int_or_none(aweme_detail.get('create_time')),
|
||||
'formats': formats,
|
||||
'thumbnails': thumbnails,
|
||||
'duration': int_or_none(traverse_obj(video_info, 'duration', ('download_addr', 'duration')), scale=1000)
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
try:
|
||||
return self._extract_aweme_app(video_id)
|
||||
except ExtractorError as e:
|
||||
self.report_warning(f'{e}; Retrying with webpage')
|
||||
|
||||
# If we only call once, we get a 403 when downlaoding the video.
|
||||
self._download_webpage(url, video_id)
|
||||
webpage = self._download_webpage(url, video_id, note='Downloading video webpage')
|
||||
|
@ -165,37 +358,7 @@ class TikTokUserIE(InfoExtractor):
|
|||
for video in data_json.get('itemList', []):
|
||||
video_id = video['id']
|
||||
video_url = f'https://www.tiktok.com/@{user_id}/video/{video_id}'
|
||||
download_url = try_get(video, (lambda x: x['video']['playAddr'],
|
||||
lambda x: x['video']['downloadAddr']))
|
||||
thumbnail = try_get(video, lambda x: x['video']['originCover'])
|
||||
height = try_get(video, lambda x: x['video']['height'], int)
|
||||
width = try_get(video, lambda x: x['video']['width'], int)
|
||||
yield {
|
||||
'id': video_id,
|
||||
'ie_key': TikTokIE.ie_key(),
|
||||
'extractor': 'TikTok',
|
||||
'url': download_url,
|
||||
'ext': 'mp4',
|
||||
'height': height,
|
||||
'width': width,
|
||||
'title': str_or_none(video.get('desc')),
|
||||
'duration': try_get(video, lambda x: x['video']['duration'], int),
|
||||
'view_count': try_get(video, lambda x: x['stats']['playCount'], int),
|
||||
'like_count': try_get(video, lambda x: x['stats']['diggCount'], int),
|
||||
'comment_count': try_get(video, lambda x: x['stats']['commentCount'], int),
|
||||
'repost_count': try_get(video, lambda x: x['stats']['shareCount'], int),
|
||||
'timestamp': video.get('createTime'),
|
||||
'creator': try_get(video, lambda x: x['author']['nickname'], str),
|
||||
'uploader': try_get(video, lambda x: x['author']['uniqueId'], str),
|
||||
'uploader_id': try_get(video, lambda x: x['author']['id'], str),
|
||||
'uploader_url': f'https://www.tiktok.com/@{user_id}',
|
||||
'thumbnails': [{'url': thumbnail, 'height': height, 'width': width}],
|
||||
'description': str_or_none(video.get('desc')),
|
||||
'webpage_url': video_url,
|
||||
'http_headers': {
|
||||
'Referer': video_url,
|
||||
}
|
||||
}
|
||||
yield self._url_result(video_url, 'TikTok', video_id, str_or_none(video.get('desc')))
|
||||
if not data_json.get('hasMore'):
|
||||
break
|
||||
cursor = data_json['cursor']
|
||||
|
|
Loading…
Reference in a new issue