ArchiveBox/archivebox/index.py

290 lines
8.9 KiB
Python
Raw Normal View History

import os
import json
from datetime import datetime
from string import Template
from typing import List, Tuple, Iterator, Optional
2019-03-26 07:20:41 +00:00
2019-03-12 16:50:58 +00:00
try:
from distutils.dir_util import copy_tree
except ImportError:
print('[X] Missing "distutils" python package. To install it, run:')
print(' pip install distutils')
from schema import Link, ArchiveIndex, ArchiveResult
2017-07-05 21:33:51 +00:00
from config import (
OUTPUT_DIR,
2018-06-11 01:12:55 +00:00
TEMPLATES_DIR,
GIT_SHA,
2018-04-17 13:13:38 +00:00
FOOTER_INFO,
2017-07-05 21:33:51 +00:00
)
2017-10-23 09:58:41 +00:00
from util import (
merge_links,
2017-10-23 09:58:41 +00:00
chmod_file,
urlencode,
2017-10-23 09:58:41 +00:00
derived_link_info,
wget_output_path,
ExtendedEncoder,
enforce_types,
2017-10-23 09:58:41 +00:00
)
2019-03-21 05:28:12 +00:00
from parse import parse_links
from links import validate_links
from logs import (
2019-03-22 19:09:39 +00:00
log_indexing_process_started,
2019-03-21 05:28:12 +00:00
log_indexing_started,
log_indexing_finished,
log_parsing_started,
log_parsing_finished,
)
TITLE_LOADING_MSG = 'Not yet archived...'
### Homepage index for all the links
@enforce_types
2019-03-26 07:20:41 +00:00
def write_links_index(out_dir: str, links: List[Link], finished: bool=False) -> None:
"""create index.html file for a given list of links"""
2019-03-22 19:09:39 +00:00
log_indexing_process_started()
2019-03-22 19:09:39 +00:00
log_indexing_started(out_dir, 'index.json')
2019-02-07 06:06:21 +00:00
write_json_links_index(out_dir, links)
2019-03-21 05:28:12 +00:00
log_indexing_finished(out_dir, 'index.json')
2019-02-07 06:06:21 +00:00
2019-03-22 19:09:39 +00:00
log_indexing_started(out_dir, 'index.html')
write_html_links_index(out_dir, links, finished=finished)
2019-03-21 05:28:12 +00:00
log_indexing_finished(out_dir, 'index.html')
@enforce_types
2019-03-27 03:25:07 +00:00
def load_links_index(out_dir: str=OUTPUT_DIR, import_path: Optional[str]=None) -> Tuple[List[Link], List[Link]]:
2019-03-21 05:28:12 +00:00
"""parse and load existing index with any new links from import_path merged in"""
2019-03-26 07:20:41 +00:00
existing_links: List[Link] = []
2019-03-21 05:28:12 +00:00
if out_dir:
existing_links = list(parse_json_links_index(out_dir))
2019-03-21 05:28:12 +00:00
2019-03-26 07:20:41 +00:00
new_links: List[Link] = []
2019-03-21 05:28:12 +00:00
if import_path:
# parse and validate the import file
log_parsing_started(import_path)
raw_links, parser_name = parse_links(import_path)
new_links = list(validate_links(raw_links))
2019-03-21 05:28:12 +00:00
# merge existing links in out_dir and new links
all_links = list(validate_links(existing_links + new_links))
2019-03-21 05:28:12 +00:00
num_new_links = len(all_links) - len(existing_links)
if import_path and parser_name:
log_parsing_finished(num_new_links, parser_name)
return all_links, new_links
@enforce_types
2019-03-26 07:20:41 +00:00
def write_json_links_index(out_dir: str, links: List[Link]) -> None:
"""write the json link index to a given path"""
assert isinstance(links, List), 'Links must be a list, not a generator.'
assert isinstance(links[0].history, dict)
assert isinstance(links[0].sources, list)
if links[0].history.get('title'):
assert isinstance(links[0].history['title'][0], ArchiveResult)
if links[0].sources:
assert isinstance(links[0].sources[0], str)
path = os.path.join(out_dir, 'index.json')
index_json = ArchiveIndex(
info='ArchiveBox Index',
source='https://github.com/pirate/ArchiveBox',
docs='https://github.com/pirate/ArchiveBox/wiki',
version=GIT_SHA,
num_links=len(links),
updated=datetime.now(),
links=links,
)
assert isinstance(index_json._asdict(), dict)
with open(path, 'w', encoding='utf-8') as f:
json.dump(index_json._asdict(), f, indent=4, cls=ExtendedEncoder)
chmod_file(path)
@enforce_types
def parse_json_links_index(out_dir: str=OUTPUT_DIR) -> Iterator[Link]:
2019-03-21 05:28:12 +00:00
"""parse a archive index json file and return the list of links"""
2017-10-23 09:58:41 +00:00
index_path = os.path.join(out_dir, 'index.json')
if os.path.exists(index_path):
with open(index_path, 'r', encoding='utf-8') as f:
links = json.load(f)['links']
for link in links:
yield Link(**link)
2017-10-23 09:58:41 +00:00
return ()
2017-10-23 09:58:41 +00:00
@enforce_types
2019-03-26 07:20:41 +00:00
def write_html_links_index(out_dir: str, links: List[Link], finished: bool=False) -> None:
"""write the html link index to a given path"""
path = os.path.join(out_dir, 'index.html')
2018-06-11 01:12:55 +00:00
copy_tree(os.path.join(TEMPLATES_DIR, 'static'), os.path.join(out_dir, 'static'))
2018-04-17 11:00:40 +00:00
2018-09-12 23:25:48 +00:00
with open(os.path.join(out_dir, 'robots.txt'), 'w+') as f:
f.write('User-agent: *\nDisallow: /')
2018-06-11 01:12:55 +00:00
with open(os.path.join(TEMPLATES_DIR, 'index.html'), 'r', encoding='utf-8') as f:
index_html = f.read()
2018-06-11 01:12:55 +00:00
with open(os.path.join(TEMPLATES_DIR, 'index_row.html'), 'r', encoding='utf-8') as f:
link_row_html = f.read()
link_rows = '\n'.join(
Template(link_row_html).substitute(**{
**derived_link_info(link),
'title': (
link.title
or (link.base_url if link.is_archived else TITLE_LOADING_MSG)
),
2019-03-27 07:49:39 +00:00
'tags': (link.tags or '') + (' {}'.format(link.extension) if link.is_static else ''),
2019-03-20 08:30:00 +00:00
'favicon_url': (
os.path.join('archive', link.timestamp, 'favicon.ico')
2019-03-20 08:30:00 +00:00
# if link['is_archived'] else 'data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs='
),
'archive_url': urlencode(
wget_output_path(link) or 'index.html'
),
})
for link in links
)
2017-07-04 11:24:03 +00:00
template_vars = {
'num_links': len(links),
'date_updated': datetime.now().strftime('%Y-%m-%d'),
'time_updated': datetime.now().strftime('%Y-%m-%d %H:%M'),
2018-04-17 13:14:01 +00:00
'footer_info': FOOTER_INFO,
'git_sha': GIT_SHA,
'short_git_sha': GIT_SHA[:8],
'rows': link_rows,
'status': 'finished' if finished else 'running',
2017-07-04 11:24:03 +00:00
}
with open(path, 'w', encoding='utf-8') as f:
2017-07-05 21:33:51 +00:00
f.write(Template(index_html).substitute(**template_vars))
2017-10-23 09:58:41 +00:00
chmod_file(path)
2017-07-05 21:33:51 +00:00
@enforce_types
2019-03-26 07:20:41 +00:00
def patch_links_index(link: Link, out_dir: str=OUTPUT_DIR) -> None:
"""hack to in-place update one row's info in the generated index html"""
title = link.title or link.latest_outputs()['title']
successful = link.num_outputs
# Patch JSON index
changed = False
2019-03-21 05:28:12 +00:00
json_file_links = parse_json_links_index(out_dir)
patched_links = []
2019-03-21 05:28:12 +00:00
for saved_link in json_file_links:
if saved_link.url == link.url:
2019-03-27 07:49:39 +00:00
patched_links.append(saved_link.overwrite(
title=title,
history=link.history,
updated=link.updated,
))
else:
patched_links.append(saved_link)
write_json_links_index(out_dir, patched_links)
# Patch HTML index
2019-03-21 05:28:12 +00:00
html_path = os.path.join(out_dir, 'index.html')
html = open(html_path, 'r').read().split('\n')
for idx, line in enumerate(html):
if title and ('<span data-title-for="{}"'.format(link.url) in line):
html[idx] = '<span>{}</span>'.format(title)
elif successful and ('<span data-number-for="{}"'.format(link.url) in line):
html[idx] = '<span>{}</span>'.format(successful)
break
with open(html_path, 'w') as f:
f.write('\n'.join(html))
2019-03-21 05:28:12 +00:00
2017-10-23 09:58:41 +00:00
### Individual link index
@enforce_types
2019-03-26 07:20:41 +00:00
def write_link_index(out_dir: str, link: Link) -> None:
write_json_link_index(out_dir, link)
write_html_link_index(out_dir, link)
@enforce_types
2019-03-26 07:20:41 +00:00
def write_json_link_index(out_dir: str, link: Link) -> None:
"""write a json file with some info about the link"""
path = os.path.join(out_dir, 'index.json')
with open(path, 'w', encoding='utf-8') as f:
json.dump(link._asdict(), f, indent=4, cls=ExtendedEncoder)
chmod_file(path)
@enforce_types
def parse_json_link_index(out_dir: str) -> Optional[Link]:
2017-10-23 09:58:41 +00:00
"""load the json link index from a given directory"""
existing_index = os.path.join(out_dir, 'index.json')
if os.path.exists(existing_index):
with open(existing_index, 'r', encoding='utf-8') as f:
link_json = json.load(f)
return Link(**link_json)
return None
2017-10-23 09:58:41 +00:00
@enforce_types
2019-03-26 07:20:41 +00:00
def load_json_link_index(out_dir: str, link: Link) -> Link:
2019-03-21 05:28:12 +00:00
"""check for an existing link archive in the given directory,
and load+merge it into the given link dict
"""
existing_link = parse_json_link_index(out_dir)
if existing_link:
return merge_links(existing_link, link)
return link
2019-03-21 05:28:12 +00:00
@enforce_types
2019-03-26 07:20:41 +00:00
def write_html_link_index(out_dir: str, link: Link) -> None:
2019-03-08 22:46:14 +00:00
with open(os.path.join(TEMPLATES_DIR, 'link_index.html'), 'r', encoding='utf-8') as f:
link_html = f.read()
path = os.path.join(out_dir, 'index.html')
with open(path, 'w', encoding='utf-8') as f:
f.write(Template(link_html).substitute({
**derived_link_info(link),
'title': (
link.title
or (link.base_url if link.is_archived else TITLE_LOADING_MSG)
),
'archive_url': urlencode(
wget_output_path(link)
or (link.domain if link.is_archived else 'about:blank')
),
'extension': link.extension or 'html',
'tags': link.tags or 'untagged',
2019-03-27 07:49:39 +00:00
'status': 'archived' if link.is_archived else 'not yet archived',
'status_color': 'success' if link.is_archived else 'danger',
}))
chmod_file(path)