2017-10-18 22:38:17 +00:00
|
|
|
import os
|
2019-01-20 19:07:28 +00:00
|
|
|
import re
|
2018-04-17 10:57:41 +00:00
|
|
|
import sys
|
2017-10-18 22:38:17 +00:00
|
|
|
|
|
|
|
from functools import wraps
|
2018-04-25 07:49:26 +00:00
|
|
|
from collections import defaultdict
|
2017-10-18 22:38:17 +00:00
|
|
|
from datetime import datetime
|
|
|
|
|
2018-04-17 07:22:59 +00:00
|
|
|
from peekable import Peekable
|
|
|
|
|
2018-04-17 13:13:38 +00:00
|
|
|
from index import wget_output_path, parse_json_link_index, write_link_index
|
2017-10-18 22:38:17 +00:00
|
|
|
from links import links_after_timestamp
|
|
|
|
from config import (
|
|
|
|
CHROME_BINARY,
|
|
|
|
FETCH_WGET,
|
|
|
|
FETCH_WGET_REQUISITES,
|
|
|
|
FETCH_PDF,
|
|
|
|
FETCH_SCREENSHOT,
|
2018-06-10 22:45:41 +00:00
|
|
|
FETCH_DOM,
|
2019-01-11 12:02:49 +00:00
|
|
|
FETCH_WARC,
|
2019-01-11 10:18:49 +00:00
|
|
|
FETCH_GIT,
|
2019-01-11 10:52:29 +00:00
|
|
|
FETCH_MEDIA,
|
2017-10-18 22:38:17 +00:00
|
|
|
RESOLUTION,
|
2018-01-09 00:43:25 +00:00
|
|
|
CHECK_SSL_VALIDITY,
|
2017-10-18 22:38:17 +00:00
|
|
|
SUBMIT_ARCHIVE_DOT_ORG,
|
|
|
|
FETCH_FAVICON,
|
|
|
|
WGET_USER_AGENT,
|
2017-10-30 09:01:59 +00:00
|
|
|
CHROME_USER_DATA_DIR,
|
2018-10-14 02:12:26 +00:00
|
|
|
CHROME_SANDBOX,
|
2017-10-18 22:38:17 +00:00
|
|
|
TIMEOUT,
|
2019-01-11 11:33:35 +00:00
|
|
|
MEDIA_TIMEOUT,
|
2017-10-18 22:38:17 +00:00
|
|
|
ANSI,
|
2018-06-11 01:26:11 +00:00
|
|
|
ARCHIVE_DIR,
|
2019-01-11 10:27:25 +00:00
|
|
|
GIT_DOMAINS,
|
2019-02-05 04:02:33 +00:00
|
|
|
GIT_SHA,
|
2017-10-18 22:38:17 +00:00
|
|
|
)
|
|
|
|
from util import (
|
|
|
|
check_dependencies,
|
|
|
|
progress,
|
|
|
|
chmod_file,
|
2018-06-11 00:52:15 +00:00
|
|
|
pretty_path,
|
2019-01-20 19:07:28 +00:00
|
|
|
run, PIPE, DEVNULL
|
2017-10-18 22:38:17 +00:00
|
|
|
)
|
|
|
|
|
2017-10-23 09:57:34 +00:00
|
|
|
|
|
|
|
_RESULTS_TOTALS = { # globals are bad, mmkay
|
2017-10-18 22:38:17 +00:00
|
|
|
'skipped': 0,
|
|
|
|
'succeded': 0,
|
|
|
|
'failed': 0,
|
|
|
|
}
|
|
|
|
|
2017-10-30 11:09:33 +00:00
|
|
|
def archive_links(archive_path, links, source=None, resume=None):
|
2017-10-23 09:57:34 +00:00
|
|
|
check_dependencies()
|
|
|
|
|
2018-04-17 07:22:59 +00:00
|
|
|
to_archive = Peekable(links_after_timestamp(links, resume))
|
|
|
|
idx, link = 0, to_archive.peek(0)
|
2018-04-17 11:30:06 +00:00
|
|
|
|
2017-10-23 09:57:34 +00:00
|
|
|
try:
|
|
|
|
for idx, link in enumerate(to_archive):
|
2018-06-11 01:26:11 +00:00
|
|
|
link_dir = os.path.join(ARCHIVE_DIR, link['timestamp'])
|
2017-10-30 08:31:28 +00:00
|
|
|
archive_link(link_dir, link)
|
2017-10-23 09:57:34 +00:00
|
|
|
|
|
|
|
except (KeyboardInterrupt, SystemExit, Exception) as e:
|
2018-04-17 11:30:06 +00:00
|
|
|
print('{lightyellow}[X] [{now}] Downloading paused on link {timestamp} ({idx}/{total}){reset}'.format(
|
2017-10-23 09:57:34 +00:00
|
|
|
**ANSI,
|
2018-04-17 07:22:59 +00:00
|
|
|
now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
2018-04-17 11:30:06 +00:00
|
|
|
idx=idx+1,
|
2018-04-17 07:22:59 +00:00
|
|
|
timestamp=link['timestamp'],
|
2018-04-17 11:30:06 +00:00
|
|
|
total=len(links),
|
2017-10-23 09:57:34 +00:00
|
|
|
))
|
|
|
|
print(' Continue where you left off by running:')
|
2018-04-18 01:14:55 +00:00
|
|
|
print(' {} {}'.format(
|
2018-06-11 01:58:48 +00:00
|
|
|
pretty_path(sys.argv[0]),
|
2017-10-23 09:57:34 +00:00
|
|
|
link['timestamp'],
|
|
|
|
))
|
|
|
|
if not isinstance(e, KeyboardInterrupt):
|
|
|
|
raise e
|
|
|
|
raise SystemExit(1)
|
|
|
|
|
|
|
|
|
2018-04-17 21:16:29 +00:00
|
|
|
def archive_link(link_dir, link, overwrite=True):
|
2017-10-23 09:57:34 +00:00
|
|
|
"""download the DOM, PDF, and a screenshot into a folder named after the link's timestamp"""
|
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
try:
|
|
|
|
update_existing = os.path.exists(link_dir)
|
|
|
|
if update_existing:
|
|
|
|
link = {
|
|
|
|
**parse_json_link_index(link_dir),
|
|
|
|
**link,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
os.makedirs(link_dir)
|
|
|
|
|
|
|
|
log_link_archive(link_dir, link, update_existing)
|
2017-10-23 09:57:34 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if FETCH_FAVICON:
|
|
|
|
link = fetch_favicon(link_dir, link, overwrite=overwrite)
|
2019-01-11 10:52:29 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if FETCH_WGET:
|
|
|
|
link = fetch_wget(link_dir, link, overwrite=overwrite)
|
2017-10-23 09:57:34 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if FETCH_PDF:
|
|
|
|
link = fetch_pdf(link_dir, link, overwrite=overwrite)
|
2017-10-23 09:57:34 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if FETCH_SCREENSHOT:
|
|
|
|
link = fetch_screenshot(link_dir, link, overwrite=overwrite)
|
2017-10-23 09:57:34 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if FETCH_DOM:
|
|
|
|
link = fetch_dom(link_dir, link, overwrite=overwrite)
|
2018-06-10 22:45:41 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if SUBMIT_ARCHIVE_DOT_ORG:
|
|
|
|
link = archive_dot_org(link_dir, link, overwrite=overwrite)
|
2017-10-23 09:57:34 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if FETCH_GIT:
|
|
|
|
link = fetch_git(link_dir, link, overwrite=overwrite)
|
2019-01-11 10:18:49 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
if FETCH_MEDIA:
|
|
|
|
link = fetch_media(link_dir, link, overwrite=overwrite)
|
2019-01-11 10:52:29 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
write_link_index(link_dir, link)
|
2017-10-23 09:57:34 +00:00
|
|
|
|
2019-02-04 16:00:08 +00:00
|
|
|
except Exception as err:
|
2019-02-05 04:02:40 +00:00
|
|
|
print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))
|
2017-10-23 09:57:34 +00:00
|
|
|
|
|
|
|
return link
|
|
|
|
|
2017-10-30 11:09:33 +00:00
|
|
|
def log_link_archive(link_dir, link, update_existing):
|
2018-04-17 13:11:27 +00:00
|
|
|
print('[{symbol_color}{symbol}{reset}] [{now}] "{title}"\n {blue}{url}{reset}'.format(
|
2017-10-30 08:36:42 +00:00
|
|
|
symbol='*' if update_existing else '+',
|
|
|
|
symbol_color=ANSI['black' if update_existing else 'green'],
|
2018-04-17 13:11:27 +00:00
|
|
|
now=datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
|
2017-10-30 08:36:42 +00:00
|
|
|
**link,
|
|
|
|
**ANSI,
|
|
|
|
))
|
|
|
|
|
2018-06-11 00:52:15 +00:00
|
|
|
print(' > {}{}'.format(pretty_path(link_dir), '' if update_existing else ' (new)'))
|
2018-04-17 11:30:06 +00:00
|
|
|
if link['type']:
|
|
|
|
print(' i {}'.format(link['type']))
|
2017-10-30 08:36:42 +00:00
|
|
|
|
|
|
|
|
2017-10-23 09:57:34 +00:00
|
|
|
|
2017-10-18 22:38:17 +00:00
|
|
|
def attach_result_to_link(method):
|
2017-10-23 09:57:34 +00:00
|
|
|
"""
|
|
|
|
Instead of returning a result={output:'...', status:'success'} object,
|
|
|
|
attach that result to the links's history & latest fields, then return
|
|
|
|
the updated link object.
|
|
|
|
"""
|
2017-10-18 22:38:17 +00:00
|
|
|
def decorator(fetch_func):
|
|
|
|
@wraps(fetch_func)
|
2017-10-30 11:09:33 +00:00
|
|
|
def timed_fetch_func(link_dir, link, overwrite=False, **kwargs):
|
2017-10-18 22:38:17 +00:00
|
|
|
# initialize methods and history json field on link
|
2017-10-23 09:57:34 +00:00
|
|
|
link['latest'] = link.get('latest') or {}
|
|
|
|
link['latest'][method] = link['latest'].get(method) or None
|
2017-10-18 22:38:17 +00:00
|
|
|
link['history'] = link.get('history') or {}
|
|
|
|
link['history'][method] = link['history'].get(method) or []
|
|
|
|
|
|
|
|
start_ts = datetime.now().timestamp()
|
|
|
|
|
|
|
|
# if a valid method output is already present, dont run the fetch function
|
2017-10-23 09:57:34 +00:00
|
|
|
if link['latest'][method] and not overwrite:
|
2018-04-17 11:30:06 +00:00
|
|
|
print(' √ {}'.format(method))
|
2017-10-18 22:38:17 +00:00
|
|
|
result = None
|
|
|
|
else:
|
2018-04-17 11:30:06 +00:00
|
|
|
print(' > {}'.format(method))
|
2017-10-30 11:09:33 +00:00
|
|
|
result = fetch_func(link_dir, link, **kwargs)
|
2017-10-18 22:38:17 +00:00
|
|
|
|
|
|
|
end_ts = datetime.now().timestamp()
|
|
|
|
duration = str(end_ts * 1000 - start_ts * 1000).split('.')[0]
|
|
|
|
|
|
|
|
# append a history item recording fail/success
|
|
|
|
history_entry = {
|
|
|
|
'timestamp': str(start_ts).split('.')[0],
|
|
|
|
}
|
|
|
|
if result is None:
|
|
|
|
history_entry['status'] = 'skipped'
|
|
|
|
elif isinstance(result.get('output'), Exception):
|
|
|
|
history_entry['status'] = 'failed'
|
|
|
|
history_entry['duration'] = duration
|
|
|
|
history_entry.update(result or {})
|
|
|
|
link['history'][method].append(history_entry)
|
|
|
|
else:
|
|
|
|
history_entry['status'] = 'succeded'
|
|
|
|
history_entry['duration'] = duration
|
|
|
|
history_entry.update(result or {})
|
|
|
|
link['history'][method].append(history_entry)
|
2017-10-23 09:57:34 +00:00
|
|
|
link['latest'][method] = result['output']
|
2018-04-17 21:16:29 +00:00
|
|
|
|
2017-10-18 22:38:17 +00:00
|
|
|
_RESULTS_TOTALS[history_entry['status']] += 1
|
|
|
|
|
|
|
|
return link
|
|
|
|
return timed_fetch_func
|
|
|
|
return decorator
|
|
|
|
|
|
|
|
|
|
|
|
@attach_result_to_link('wget')
|
2019-01-15 03:40:55 +00:00
|
|
|
def fetch_wget(link_dir, link, requisites=FETCH_WGET_REQUISITES, warc=FETCH_WARC, timeout=TIMEOUT):
|
2017-10-18 22:38:17 +00:00
|
|
|
"""download full site using wget"""
|
|
|
|
|
2018-04-17 13:13:38 +00:00
|
|
|
domain_dir = os.path.join(link_dir, link['domain'])
|
2018-04-17 21:16:29 +00:00
|
|
|
existing_file = wget_output_path(link)
|
|
|
|
if os.path.exists(domain_dir) and existing_file:
|
|
|
|
return {'output': existing_file, 'status': 'skipped'}
|
2017-10-18 22:38:17 +00:00
|
|
|
|
2019-01-15 03:40:55 +00:00
|
|
|
if warc:
|
|
|
|
warc_dir = os.path.join(link_dir, 'warc')
|
|
|
|
os.makedirs(warc_dir, exist_ok=True)
|
|
|
|
warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))
|
|
|
|
|
2019-01-12 03:38:50 +00:00
|
|
|
# WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html
|
2017-10-18 22:38:17 +00:00
|
|
|
CMD = [
|
2019-01-12 03:13:51 +00:00
|
|
|
'wget',
|
2019-01-12 03:38:50 +00:00
|
|
|
# '--server-response', # print headers for better error parsing
|
2019-01-12 03:13:51 +00:00
|
|
|
'--no-verbose',
|
|
|
|
'--adjust-extension',
|
|
|
|
'--convert-links',
|
|
|
|
'--force-directories',
|
|
|
|
'--backup-converted',
|
|
|
|
'--span-hosts',
|
|
|
|
'--no-parent',
|
|
|
|
'--restrict-file-names=unix',
|
2019-01-20 19:07:43 +00:00
|
|
|
'--timeout={}'.format(timeout),
|
|
|
|
*(() if warc else ('--timestamping',)),
|
2019-01-15 03:40:55 +00:00
|
|
|
*(('--warc-file={}'.format(warc_path),) if warc else ()),
|
2019-01-12 03:13:51 +00:00
|
|
|
*(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),
|
2019-02-05 04:02:48 +00:00
|
|
|
*(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),
|
2018-01-09 00:43:25 +00:00
|
|
|
*((() if CHECK_SSL_VALIDITY else ('--no-check-certificate',))),
|
2017-10-18 22:38:17 +00:00
|
|
|
link['url'],
|
|
|
|
]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2019-01-20 19:08:00 +00:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # index.html
|
2017-10-18 22:38:17 +00:00
|
|
|
end()
|
2018-04-17 13:11:27 +00:00
|
|
|
output = wget_output_path(link, look_in=domain_dir)
|
2018-06-11 01:14:46 +00:00
|
|
|
|
2019-02-05 03:20:36 +00:00
|
|
|
output_tail = [' ' + line for line in (result.stdout + result.stderr).decode().rsplit('\n', 3)[-3:] if line.strip()]
|
|
|
|
|
|
|
|
# parse out number of files downloaded from "Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)"
|
|
|
|
files_downloaded = (
|
|
|
|
int(output_tail[-1].strip().split(' ', 2)[1] or 0)
|
|
|
|
if 'Downloaded:' in output_tail[-1]
|
|
|
|
else 0
|
|
|
|
)
|
|
|
|
|
2018-06-11 01:14:46 +00:00
|
|
|
# Check for common failure cases
|
2019-02-05 03:20:36 +00:00
|
|
|
if result.returncode > 0 and files_downloaded < 1:
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Got wget response code {}:'.format(result.returncode))
|
2019-02-05 03:20:36 +00:00
|
|
|
print('\n'.join(output_tail))
|
2018-06-17 23:09:01 +00:00
|
|
|
if b'403: Forbidden' in result.stderr:
|
|
|
|
raise Exception('403 Forbidden (try changing WGET_USER_AGENT)')
|
|
|
|
if b'404: Not Found' in result.stderr:
|
|
|
|
raise Exception('404 Not Found')
|
|
|
|
if b'ERROR 500: Internal Server Error' in result.stderr:
|
|
|
|
raise Exception('500 Internal Server Error')
|
2019-01-20 19:08:00 +00:00
|
|
|
raise Exception('Got an error from the server')
|
2017-10-18 22:38:17 +00:00
|
|
|
except Exception as e:
|
|
|
|
end()
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' {}Some resources were skipped: {}{}'.format(ANSI['lightyellow'], e, ANSI['reset']))
|
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' cd {};'.format(link_dir))
|
2019-02-05 04:02:48 +00:00
|
|
|
print(' {}'.format(' '.join(CMD).replace(WGET_USER_AGENT, '"{}"'.format(WGET_USER_AGENT))))
|
2017-10-18 22:38:17 +00:00
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@attach_result_to_link('pdf')
|
2017-10-30 11:09:33 +00:00
|
|
|
def fetch_pdf(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
|
2017-10-18 22:38:17 +00:00
|
|
|
"""print PDF of site to file using chrome --headless"""
|
|
|
|
|
|
|
|
if link['type'] in ('PDF', 'image'):
|
2018-04-17 13:13:38 +00:00
|
|
|
return {'output': wget_output_path(link)}
|
2017-10-18 22:38:17 +00:00
|
|
|
|
2017-10-30 11:09:33 +00:00
|
|
|
if os.path.exists(os.path.join(link_dir, 'output.pdf')):
|
2017-10-18 22:38:17 +00:00
|
|
|
return {'output': 'output.pdf', 'status': 'skipped'}
|
|
|
|
|
|
|
|
CMD = [
|
2017-10-30 11:09:33 +00:00
|
|
|
*chrome_headless(user_data_dir=user_data_dir),
|
|
|
|
'--print-to-pdf',
|
2019-01-12 03:38:50 +00:00
|
|
|
'--hide-scrollbars',
|
2019-01-20 19:08:00 +00:00
|
|
|
'--timeout={}'.format((timeout) * 1000),
|
2019-01-12 03:38:50 +00:00
|
|
|
*(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
|
2017-10-18 22:38:17 +00:00
|
|
|
link['url']
|
|
|
|
]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2019-01-20 19:08:00 +00:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # output.pdf
|
2017-10-18 22:38:17 +00:00
|
|
|
end()
|
|
|
|
if result.returncode:
|
2017-10-18 22:47:19 +00:00
|
|
|
print(' ', (result.stderr or result.stdout).decode())
|
2017-10-18 22:38:17 +00:00
|
|
|
raise Exception('Failed to print PDF')
|
2018-03-15 00:04:04 +00:00
|
|
|
chmod_file('output.pdf', cwd=link_dir)
|
2017-10-18 22:38:17 +00:00
|
|
|
output = 'output.pdf'
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
2018-04-17 13:11:27 +00:00
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' cd {};'.format(link_dir))
|
|
|
|
print(' {}'.format(' '.join(CMD)))
|
2017-10-18 22:38:17 +00:00
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
@attach_result_to_link('screenshot')
|
2017-10-30 11:09:33 +00:00
|
|
|
def fetch_screenshot(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR, resolution=RESOLUTION):
|
2017-10-18 22:38:17 +00:00
|
|
|
"""take screenshot of site using chrome --headless"""
|
|
|
|
|
|
|
|
if link['type'] in ('PDF', 'image'):
|
2018-04-17 13:13:38 +00:00
|
|
|
return {'output': wget_output_path(link)}
|
2017-10-18 22:38:17 +00:00
|
|
|
|
2017-10-30 11:09:33 +00:00
|
|
|
if os.path.exists(os.path.join(link_dir, 'screenshot.png')):
|
2017-10-18 22:38:17 +00:00
|
|
|
return {'output': 'screenshot.png', 'status': 'skipped'}
|
|
|
|
|
|
|
|
CMD = [
|
2017-10-30 11:09:33 +00:00
|
|
|
*chrome_headless(user_data_dir=user_data_dir),
|
|
|
|
'--screenshot',
|
2017-10-18 22:38:17 +00:00
|
|
|
'--window-size={}'.format(resolution),
|
2018-06-17 23:09:09 +00:00
|
|
|
'--hide-scrollbars',
|
2019-01-20 19:08:00 +00:00
|
|
|
'--timeout={}'.format((timeout) * 1000),
|
2019-01-12 03:38:50 +00:00
|
|
|
*(() if CHECK_SSL_VALIDITY else ('--disable-web-security', '--ignore-certificate-errors')),
|
2018-06-17 23:09:09 +00:00
|
|
|
# '--full-page', # TODO: make this actually work using ./bin/screenshot fullPage: true
|
|
|
|
link['url'],
|
2017-10-18 22:38:17 +00:00
|
|
|
]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2019-01-20 19:08:00 +00:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout) # sreenshot.png
|
2017-10-18 22:38:17 +00:00
|
|
|
end()
|
|
|
|
if result.returncode:
|
2017-10-18 22:47:19 +00:00
|
|
|
print(' ', (result.stderr or result.stdout).decode())
|
2017-10-18 22:38:17 +00:00
|
|
|
raise Exception('Failed to take screenshot')
|
2017-10-30 11:09:33 +00:00
|
|
|
chmod_file('screenshot.png', cwd=link_dir)
|
2017-10-18 22:38:17 +00:00
|
|
|
output = 'screenshot.png'
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
2018-04-17 13:11:27 +00:00
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' cd {};'.format(link_dir))
|
|
|
|
print(' {}'.format(' '.join(CMD)))
|
2017-10-18 22:38:17 +00:00
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
2018-06-10 22:45:41 +00:00
|
|
|
@attach_result_to_link('dom')
|
|
|
|
def fetch_dom(link_dir, link, timeout=TIMEOUT, user_data_dir=CHROME_USER_DATA_DIR):
|
|
|
|
"""print HTML of site to file using chrome --dump-html"""
|
|
|
|
|
|
|
|
if link['type'] in ('PDF', 'image'):
|
|
|
|
return {'output': wget_output_path(link)}
|
|
|
|
|
|
|
|
output_path = os.path.join(link_dir, 'output.html')
|
|
|
|
|
|
|
|
if os.path.exists(output_path):
|
|
|
|
return {'output': 'output.html', 'status': 'skipped'}
|
|
|
|
|
|
|
|
CMD = [
|
|
|
|
*chrome_headless(user_data_dir=user_data_dir),
|
|
|
|
'--dump-dom',
|
2019-01-20 19:08:00 +00:00
|
|
|
'--timeout={}'.format((timeout) * 1000),
|
2018-06-10 22:45:41 +00:00
|
|
|
link['url']
|
|
|
|
]
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
|
|
|
with open(output_path, 'w+') as f:
|
2019-01-20 19:08:00 +00:00
|
|
|
result = run(CMD, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout) # output.html
|
2018-06-10 22:45:41 +00:00
|
|
|
end()
|
|
|
|
if result.returncode:
|
|
|
|
print(' ', (result.stderr).decode())
|
|
|
|
raise Exception('Failed to fetch DOM')
|
|
|
|
chmod_file('output.html', cwd=link_dir)
|
|
|
|
output = 'output.html'
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' cd {};'.format(link_dir))
|
|
|
|
print(' {}'.format(' '.join(CMD)))
|
2018-06-10 22:45:41 +00:00
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
2017-10-18 22:38:17 +00:00
|
|
|
|
|
|
|
@attach_result_to_link('archive_org')
|
2017-10-30 11:09:33 +00:00
|
|
|
def archive_dot_org(link_dir, link, timeout=TIMEOUT):
|
2017-10-18 22:38:17 +00:00
|
|
|
"""submit site to archive.org for archiving via their service, save returned archive url"""
|
|
|
|
|
2017-10-30 11:09:33 +00:00
|
|
|
path = os.path.join(link_dir, 'archive.org.txt')
|
2017-10-18 22:38:17 +00:00
|
|
|
if os.path.exists(path):
|
|
|
|
archive_org_url = open(path, 'r').read().strip()
|
|
|
|
return {'output': archive_org_url, 'status': 'skipped'}
|
|
|
|
|
2018-10-15 11:09:31 +00:00
|
|
|
submit_url = 'https://web.archive.org/save/{}'.format(link['url'])
|
2017-10-18 22:38:17 +00:00
|
|
|
|
|
|
|
success = False
|
2019-01-20 17:34:15 +00:00
|
|
|
CMD = [
|
|
|
|
'curl',
|
|
|
|
'--location',
|
|
|
|
'--head',
|
2019-02-05 04:02:33 +00:00
|
|
|
'--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA),
|
2019-01-20 17:34:15 +00:00
|
|
|
'--max-time', str(timeout),
|
|
|
|
'--get',
|
|
|
|
*(() if CHECK_SSL_VALIDITY else ('--insecure',)),
|
|
|
|
submit_url,
|
|
|
|
]
|
2017-10-18 22:38:17 +00:00
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2019-01-20 19:08:00 +00:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout) # archive.org.txt
|
2017-10-18 22:38:17 +00:00
|
|
|
end()
|
|
|
|
|
|
|
|
# Parse archive.org response headers
|
2018-04-25 07:49:26 +00:00
|
|
|
headers = defaultdict(list)
|
|
|
|
|
|
|
|
# lowercase all the header names and store in dict
|
|
|
|
for header in result.stdout.splitlines():
|
|
|
|
if b':' not in header or not header.strip():
|
|
|
|
continue
|
|
|
|
name, val = header.decode().split(':', 1)
|
|
|
|
headers[name.lower().strip()].append(val.strip())
|
|
|
|
|
|
|
|
# Get successful archive url in "content-location" header or any errors
|
|
|
|
content_location = headers['content-location']
|
|
|
|
errors = headers['x-archive-wayback-runtime-error']
|
2017-10-18 22:38:17 +00:00
|
|
|
|
|
|
|
if content_location:
|
2018-04-25 07:49:26 +00:00
|
|
|
saved_url = 'https://web.archive.org{}'.format(content_location[0])
|
2017-10-18 22:38:17 +00:00
|
|
|
success = True
|
2018-04-25 07:49:26 +00:00
|
|
|
elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:
|
2017-10-18 22:38:17 +00:00
|
|
|
output = submit_url
|
|
|
|
# raise Exception('Archive.org denied by {}/robots.txt'.format(link['domain']))
|
|
|
|
elif errors:
|
2018-04-25 07:49:26 +00:00
|
|
|
raise Exception(', '.join(errors))
|
2017-10-18 22:38:17 +00:00
|
|
|
else:
|
2018-04-17 21:16:41 +00:00
|
|
|
raise Exception('Failed to find "content-location" URL header in Archive.org response.')
|
2017-10-18 22:38:17 +00:00
|
|
|
except Exception as e:
|
|
|
|
end()
|
2018-04-17 13:11:27 +00:00
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' {}'.format(' '.join(CMD)))
|
2017-10-18 22:38:17 +00:00
|
|
|
output = e
|
|
|
|
|
|
|
|
if success:
|
2017-10-30 11:09:33 +00:00
|
|
|
with open(os.path.join(link_dir, 'archive.org.txt'), 'w', encoding='utf-8') as f:
|
2017-10-18 22:38:17 +00:00
|
|
|
f.write(saved_url)
|
2017-10-30 11:09:33 +00:00
|
|
|
chmod_file('archive.org.txt', cwd=link_dir)
|
2017-10-18 22:38:17 +00:00
|
|
|
output = saved_url
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
|
|
|
@attach_result_to_link('favicon')
|
2017-10-30 11:09:33 +00:00
|
|
|
def fetch_favicon(link_dir, link, timeout=TIMEOUT):
|
2017-10-18 22:38:17 +00:00
|
|
|
"""download site favicon from google's favicon api"""
|
|
|
|
|
2017-10-30 11:09:33 +00:00
|
|
|
if os.path.exists(os.path.join(link_dir, 'favicon.ico')):
|
2017-10-18 22:38:17 +00:00
|
|
|
return {'output': 'favicon.ico', 'status': 'skipped'}
|
|
|
|
|
2019-01-20 19:08:00 +00:00
|
|
|
CMD = [
|
|
|
|
'curl',
|
|
|
|
'--max-time', str(timeout),
|
|
|
|
'https://www.google.com/s2/favicons?domain={domain}'.format(**link),
|
|
|
|
]
|
2017-10-30 11:09:33 +00:00
|
|
|
fout = open('{}/favicon.ico'.format(link_dir), 'w')
|
2017-10-18 22:38:17 +00:00
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2019-01-20 19:08:00 +00:00
|
|
|
run(CMD, stdout=fout, stderr=DEVNULL, cwd=link_dir, timeout=timeout) # favicon.ico
|
2017-10-18 22:38:17 +00:00
|
|
|
fout.close()
|
|
|
|
end()
|
2017-10-30 11:09:33 +00:00
|
|
|
chmod_file('favicon.ico', cwd=link_dir)
|
2017-10-18 22:38:17 +00:00
|
|
|
output = 'favicon.ico'
|
|
|
|
except Exception as e:
|
|
|
|
fout.close()
|
|
|
|
end()
|
2018-04-17 13:11:27 +00:00
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' {}'.format(' '.join(CMD)))
|
2017-10-18 22:38:17 +00:00
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
2019-01-11 10:52:29 +00:00
|
|
|
@attach_result_to_link('media')
|
2019-01-11 11:33:35 +00:00
|
|
|
def fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT, overwrite=False):
|
2019-01-11 10:52:29 +00:00
|
|
|
"""Download playlists or individual video, audio, and subtitles using youtube-dl"""
|
|
|
|
|
|
|
|
|
2019-01-11 11:33:35 +00:00
|
|
|
# import ipdb; ipdb.set_trace()
|
|
|
|
output = os.path.join(link_dir, 'media')
|
2019-02-05 03:21:00 +00:00
|
|
|
already_done = os.path.exists(output) # and os.listdir(output)
|
2019-01-11 11:50:42 +00:00
|
|
|
if already_done and not overwrite:
|
2019-01-11 10:52:29 +00:00
|
|
|
return {'output': 'media', 'status': 'skipped'}
|
|
|
|
|
2019-01-11 11:33:35 +00:00
|
|
|
os.makedirs(output, exist_ok=True)
|
2019-01-11 10:52:29 +00:00
|
|
|
CMD = [
|
|
|
|
'youtube-dl',
|
|
|
|
'--write-description',
|
|
|
|
'--write-info-json',
|
|
|
|
'--write-annotations',
|
|
|
|
'--yes-playlist',
|
2019-01-11 11:33:35 +00:00
|
|
|
'--write-thumbnail',
|
2019-01-11 10:52:29 +00:00
|
|
|
'--no-call-home',
|
|
|
|
'--no-check-certificate',
|
2019-01-11 11:33:35 +00:00
|
|
|
'--user-agent',
|
2019-01-11 10:52:29 +00:00
|
|
|
'--all-subs',
|
|
|
|
'-x',
|
2019-01-11 11:33:35 +00:00
|
|
|
'-k',
|
2019-01-11 10:52:29 +00:00
|
|
|
'--audio-format', 'mp3',
|
|
|
|
'--audio-quality', '320K',
|
|
|
|
'--embed-thumbnail',
|
|
|
|
'--add-metadata',
|
2019-01-11 11:50:42 +00:00
|
|
|
link['url'],
|
2019-01-11 10:52:29 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
2019-01-11 11:33:35 +00:00
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=output, timeout=timeout + 1) # audio/audio.mp3
|
2019-02-05 04:15:34 +00:00
|
|
|
chmod_file('media', cwd=link_dir)
|
|
|
|
output = 'media'
|
2019-01-11 10:52:29 +00:00
|
|
|
end()
|
|
|
|
if result.returncode:
|
2019-02-05 04:08:54 +00:00
|
|
|
if (b'ERROR: Unsupported URL' in result.stderr
|
|
|
|
or b'HTTP Error 404' in result.stderr
|
|
|
|
or b'HTTP Error 403' in result.stderr
|
|
|
|
or b'URL could be a direct video link' in result.stderr):
|
2019-01-11 11:33:35 +00:00
|
|
|
pass
|
|
|
|
else:
|
|
|
|
print(' got youtubedl response code {}:'.format(result.returncode))
|
2019-01-11 11:50:42 +00:00
|
|
|
print(result.stderr)
|
2019-01-11 11:33:35 +00:00
|
|
|
raise Exception('Failed to download media')
|
2019-01-11 10:52:29 +00:00
|
|
|
except Exception as e:
|
|
|
|
end()
|
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' cd {};'.format(link_dir))
|
|
|
|
print(' {}'.format(' '.join(CMD)))
|
2019-01-11 11:33:35 +00:00
|
|
|
output = e
|
2019-01-11 10:52:29 +00:00
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
2017-10-30 09:01:59 +00:00
|
|
|
|
2019-01-11 12:02:49 +00:00
|
|
|
|
2019-01-11 10:18:49 +00:00
|
|
|
@attach_result_to_link('git')
|
|
|
|
def fetch_git(link_dir, link, timeout=TIMEOUT):
|
|
|
|
"""download full site using git"""
|
|
|
|
|
2019-01-11 10:27:25 +00:00
|
|
|
if not (link['domain'] in GIT_DOMAINS
|
2019-01-11 10:18:49 +00:00
|
|
|
or link['url'].endswith('.git')
|
|
|
|
or link['type'] == 'git'):
|
|
|
|
return
|
|
|
|
|
|
|
|
if os.path.exists(os.path.join(link_dir, 'git')):
|
|
|
|
return {'output': 'git', 'status': 'skipped'}
|
|
|
|
|
2019-02-05 03:21:08 +00:00
|
|
|
CMD = ['git', 'clone', '--mirror', '--recursive', link['url'].split('#')[0], 'git']
|
2019-01-11 10:18:49 +00:00
|
|
|
output = 'git'
|
|
|
|
|
|
|
|
end = progress(timeout, prefix=' ')
|
|
|
|
try:
|
|
|
|
result = run(CMD, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout + 1) # git/<reponame>
|
|
|
|
end()
|
|
|
|
|
|
|
|
if result.returncode > 0:
|
|
|
|
print(' got git response code {}:'.format(result.returncode))
|
|
|
|
raise Exception('Failed git download')
|
|
|
|
except Exception as e:
|
|
|
|
end()
|
|
|
|
print(' {}Failed: {} {}{}'.format(ANSI['red'], e.__class__.__name__, e, ANSI['reset']))
|
2019-01-20 19:08:00 +00:00
|
|
|
print(' Run to see full output:')
|
|
|
|
print(' cd {};'.format(link_dir))
|
|
|
|
print(' {}'.format(' '.join(CMD)))
|
2019-01-11 10:18:49 +00:00
|
|
|
output = e
|
|
|
|
|
|
|
|
return {
|
|
|
|
'cmd': CMD,
|
|
|
|
'output': output,
|
|
|
|
}
|
|
|
|
|
2017-10-30 11:09:33 +00:00
|
|
|
def chrome_headless(binary=CHROME_BINARY, user_data_dir=CHROME_USER_DATA_DIR):
|
2018-06-10 22:45:41 +00:00
|
|
|
args = [binary, '--headless'] # '--disable-gpu'
|
2018-10-14 02:12:26 +00:00
|
|
|
if not CHROME_SANDBOX:
|
|
|
|
args.append('--no-sandbox')
|
2019-02-05 04:03:16 +00:00
|
|
|
default_profile = os.path.expanduser('~/Library/Application Support/Google/Chrome')
|
2017-10-30 09:01:59 +00:00
|
|
|
if user_data_dir:
|
2019-01-15 03:40:55 +00:00
|
|
|
args.append('--user-data-dir={}'.format(user_data_dir))
|
2017-10-30 11:09:33 +00:00
|
|
|
elif os.path.exists(default_profile):
|
2019-01-15 03:40:55 +00:00
|
|
|
args.append('--user-data-dir={}'.format(default_profile))
|
2017-10-30 11:09:33 +00:00
|
|
|
return args
|