ArchiveBox/archivebox/extractors/title.py

87 lines
2.3 KiB
Python
Raw Normal View History

2019-04-27 21:26:24 +00:00
__package__ = 'archivebox.extractors'
2019-05-01 03:13:04 +00:00
import re
2020-09-15 19:05:48 +00:00
from pathlib import Path
2019-04-27 21:26:24 +00:00
from typing import Optional
2019-05-01 03:13:04 +00:00
from ..index.schema import Link, ArchiveResult, ArchiveOutput, ArchiveError
2019-04-27 21:26:24 +00:00
from ..util import (
enforce_types,
is_static_file,
2019-05-01 03:13:04 +00:00
download_url,
htmldecode,
2019-04-27 21:26:24 +00:00
)
from ..config import (
TIMEOUT,
2020-06-26 02:14:40 +00:00
CHECK_SSL_VALIDITY,
2019-04-27 21:26:24 +00:00
SAVE_TITLE,
CURL_BINARY,
CURL_VERSION,
2020-06-26 02:14:40 +00:00
CURL_USER_AGENT,
2020-07-28 09:58:13 +00:00
setup_django,
2019-04-27 21:26:24 +00:00
)
from ..logging_util import TimedProgress
2019-05-01 03:13:04 +00:00
HTML_TITLE_REGEX = re.compile(
r'<title.*?>' # start matching text after <title> tag
r'(.[^<>]+)', # get everything up to these symbols
re.IGNORECASE | re.MULTILINE | re.DOTALL | re.UNICODE,
)
2019-04-27 21:26:24 +00:00
@enforce_types
def should_save_title(link: Link, out_dir: Optional[str]=None) -> bool:
# if link already has valid title, skip it
if link.title and not link.title.lower().startswith('http'):
return False
if is_static_file(link.url):
return False
return SAVE_TITLE
@enforce_types
2020-09-15 19:05:48 +00:00
def save_title(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult:
2019-04-27 21:26:24 +00:00
"""try to guess the page's title from its content"""
setup_django(out_dir=out_dir)
from core.models import Snapshot
2019-04-27 21:26:24 +00:00
output: ArchiveOutput = None
cmd = [
CURL_BINARY,
2020-06-26 02:14:40 +00:00
'--silent',
'--max-time', str(timeout),
'--location',
'--compressed',
2020-06-26 02:14:40 +00:00
*(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []),
*([] if CHECK_SSL_VALIDITY else ['--insecure']),
2019-04-27 21:26:24 +00:00
link.url,
]
status = 'succeeded'
timer = TimedProgress(timeout, prefix=' ')
try:
2019-05-01 03:13:04 +00:00
html = download_url(link.url, timeout=timeout)
match = re.search(HTML_TITLE_REGEX, html)
output = htmldecode(match.group(1).strip()) if match else None
if output:
if not link.title or len(output) >= len(link.title):
Snapshot.objects.filter(url=link.url, timestamp=link.timestamp).update(title=output)
else:
2019-04-27 21:26:24 +00:00
raise ArchiveError('Unable to detect page title')
except Exception as err:
status = 'failed'
output = err
finally:
timer.end()
return ArchiveResult(
cmd=cmd,
2020-09-15 19:05:48 +00:00
pwd=str(out_dir),
2019-04-27 21:26:24 +00:00
cmd_version=CURL_VERSION,
output=output,
status=status,
**timer.stats,
)