2019-04-27 21:26:24 +00:00
|
|
|
__package__ = 'archivebox.extractors'
|
|
|
|
|
|
|
|
|
2020-09-15 19:05:48 +00:00
|
|
|
from pathlib import Path
|
2019-04-27 21:26:24 +00:00
|
|
|
from typing import Optional, List, Dict, Tuple
|
|
|
|
from collections import defaultdict
|
|
|
|
|
2019-05-01 03:13:04 +00:00
|
|
|
from ..index.schema import Link, ArchiveResult, ArchiveOutput, ArchiveError
|
2020-06-26 02:14:40 +00:00
|
|
|
from ..system import run, chmod_file
|
2019-04-27 21:26:24 +00:00
|
|
|
from ..util import (
|
|
|
|
enforce_types,
|
|
|
|
is_static_file,
|
|
|
|
)
|
|
|
|
from ..config import (
|
|
|
|
TIMEOUT,
|
2020-10-15 13:42:46 +00:00
|
|
|
CURL_ARGS,
|
2020-06-26 02:14:40 +00:00
|
|
|
CHECK_SSL_VALIDITY,
|
2019-04-27 21:26:24 +00:00
|
|
|
SAVE_ARCHIVE_DOT_ORG,
|
|
|
|
CURL_BINARY,
|
|
|
|
CURL_VERSION,
|
2020-06-26 02:14:40 +00:00
|
|
|
CURL_USER_AGENT,
|
2019-04-27 21:26:24 +00:00
|
|
|
)
|
2020-07-22 16:02:13 +00:00
|
|
|
from ..logging_util import TimedProgress
|
2019-04-27 21:26:24 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@enforce_types
|
2020-09-15 19:05:48 +00:00
|
|
|
def should_save_archive_dot_org(link: Link, out_dir: Optional[Path]=None) -> bool:
|
|
|
|
out_dir = out_dir or Path(link.link_dir)
|
2019-04-27 21:26:24 +00:00
|
|
|
if is_static_file(link.url):
|
|
|
|
return False
|
|
|
|
|
2020-09-15 19:05:48 +00:00
|
|
|
if (out_dir / "archive.org.txt").exists():
|
2019-04-27 21:26:24 +00:00
|
|
|
# if open(path, 'r').read().strip() != 'None':
|
|
|
|
return False
|
|
|
|
|
|
|
|
return SAVE_ARCHIVE_DOT_ORG
|
|
|
|
|
|
|
|
@enforce_types
|
2020-09-15 19:05:48 +00:00
|
|
|
def save_archive_dot_org(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult:
|
2019-04-27 21:26:24 +00:00
|
|
|
"""submit site to archive.org for archiving via their service, save returned archive url"""
|
|
|
|
|
2020-09-15 19:05:48 +00:00
|
|
|
out_dir = out_dir or Path(link.link_dir)
|
2019-04-27 21:26:24 +00:00
|
|
|
output: ArchiveOutput = 'archive.org.txt'
|
|
|
|
archive_org_url = None
|
|
|
|
submit_url = 'https://web.archive.org/save/{}'.format(link.url)
|
|
|
|
cmd = [
|
|
|
|
CURL_BINARY,
|
2020-10-15 13:42:46 +00:00
|
|
|
*CURL_ARGS,
|
2020-10-15 13:49:54 +00:00
|
|
|
'--head',
|
2019-04-27 21:26:24 +00:00
|
|
|
'--max-time', str(timeout),
|
2020-06-26 02:14:40 +00:00
|
|
|
*(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []),
|
2019-04-27 21:26:24 +00:00
|
|
|
*([] if CHECK_SSL_VALIDITY else ['--insecure']),
|
|
|
|
submit_url,
|
|
|
|
]
|
|
|
|
status = 'succeeded'
|
|
|
|
timer = TimedProgress(timeout, prefix=' ')
|
|
|
|
try:
|
2020-09-15 19:05:48 +00:00
|
|
|
result = run(cmd, cwd=str(out_dir), timeout=timeout)
|
2019-04-27 21:26:24 +00:00
|
|
|
content_location, errors = parse_archive_dot_org_response(result.stdout)
|
|
|
|
if content_location:
|
2020-10-31 11:55:27 +00:00
|
|
|
archive_org_url = content_location[0]
|
2019-04-27 21:26:24 +00:00
|
|
|
elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
|
|
|
|
archive_org_url = None
|
|
|
|
# raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link.url)))
|
|
|
|
elif errors:
|
|
|
|
raise ArchiveError(', '.join(errors))
|
|
|
|
else:
|
|
|
|
raise ArchiveError('Failed to find "content-location" URL header in Archive.org response.')
|
|
|
|
except Exception as err:
|
|
|
|
status = 'failed'
|
|
|
|
output = err
|
|
|
|
finally:
|
|
|
|
timer.end()
|
|
|
|
|
|
|
|
if output and not isinstance(output, Exception):
|
|
|
|
# instead of writing None when archive.org rejects the url write the
|
|
|
|
# url to resubmit it to archive.org. This is so when the user visits
|
|
|
|
# the URL in person, it will attempt to re-archive it, and it'll show the
|
|
|
|
# nicer error message explaining why the url was rejected if it fails.
|
|
|
|
archive_org_url = archive_org_url or submit_url
|
2020-09-15 19:05:48 +00:00
|
|
|
with open(str(out_dir / output), 'w', encoding='utf-8') as f:
|
2019-04-27 21:26:24 +00:00
|
|
|
f.write(archive_org_url)
|
2020-09-15 19:05:48 +00:00
|
|
|
chmod_file('archive.org.txt', cwd=str(out_dir))
|
2019-04-27 21:26:24 +00:00
|
|
|
output = archive_org_url
|
|
|
|
|
|
|
|
return ArchiveResult(
|
|
|
|
cmd=cmd,
|
2020-09-15 19:05:48 +00:00
|
|
|
pwd=str(out_dir),
|
2019-04-27 21:26:24 +00:00
|
|
|
cmd_version=CURL_VERSION,
|
|
|
|
output=output,
|
|
|
|
status=status,
|
|
|
|
**timer.stats,
|
|
|
|
)
|
|
|
|
|
|
|
|
@enforce_types
|
|
|
|
def parse_archive_dot_org_response(response: bytes) -> Tuple[List[str], List[str]]:
|
|
|
|
# Parse archive.org response headers
|
|
|
|
headers: Dict[str, List[str]] = defaultdict(list)
|
|
|
|
|
|
|
|
# lowercase all the header names and store in dict
|
|
|
|
for header in response.splitlines():
|
|
|
|
if b':' not in header or not header.strip():
|
|
|
|
continue
|
|
|
|
name, val = header.decode().split(':', 1)
|
|
|
|
headers[name.lower().strip()].append(val.strip())
|
|
|
|
|
|
|
|
# Get successful archive url in "content-location" header or any errors
|
2020-07-22 05:46:38 +00:00
|
|
|
content_location = headers.get('content-location', headers['location'])
|
2019-04-27 21:26:24 +00:00
|
|
|
errors = headers['x-archive-wayback-runtime-error']
|
|
|
|
return content_location, errors
|
|
|
|
|