2017-10-19 00:08:33 +00:00
|
|
|
"""
|
2018-12-20 13:13:50 +00:00
|
|
|
In ArchiveBox, a Link represents a single entry that we track in the
|
2017-10-19 00:08:33 +00:00
|
|
|
json index. All links pass through all archiver functions and the latest,
|
2017-10-23 09:58:41 +00:00
|
|
|
most up-to-date canonical output for each is stored in "latest".
|
|
|
|
|
2017-10-19 00:08:33 +00:00
|
|
|
Link {
|
2019-03-22 19:09:39 +00:00
|
|
|
timestamp: str, (how we uniquely id links)
|
|
|
|
url: str,
|
|
|
|
title: str,
|
|
|
|
tags: str,
|
|
|
|
sources: [str],
|
2017-10-19 00:08:33 +00:00
|
|
|
history: {
|
|
|
|
pdf: [
|
2019-03-22 19:09:39 +00:00
|
|
|
{start_ts, end_ts, duration, cmd, pwd, status, output},
|
2017-10-19 00:08:33 +00:00
|
|
|
...
|
|
|
|
],
|
2019-03-22 19:09:39 +00:00
|
|
|
...
|
2017-10-19 00:08:33 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
"""
|
2017-10-18 22:38:17 +00:00
|
|
|
|
2018-04-17 14:30:25 +00:00
|
|
|
from html import unescape
|
2018-09-14 22:08:59 +00:00
|
|
|
from collections import OrderedDict
|
2018-04-17 07:22:59 +00:00
|
|
|
|
2017-10-23 09:58:41 +00:00
|
|
|
from util import (
|
2017-10-30 07:50:37 +00:00
|
|
|
merge_links,
|
2019-02-21 22:45:28 +00:00
|
|
|
check_link_structure,
|
|
|
|
check_links_structure,
|
2017-10-23 09:58:41 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2017-10-18 22:38:17 +00:00
|
|
|
def validate_links(links):
|
2019-02-21 22:45:28 +00:00
|
|
|
check_links_structure(links)
|
2017-10-23 09:58:41 +00:00
|
|
|
links = archivable_links(links) # remove chrome://, about:, mailto: etc.
|
2018-04-17 13:14:13 +00:00
|
|
|
links = uniquefied_links(links) # merge/dedupe duplicate timestamps & urls
|
2018-04-17 13:49:32 +00:00
|
|
|
links = sorted_links(links) # deterministically sort the links based on timstamp, url
|
2019-02-21 22:45:28 +00:00
|
|
|
|
2017-10-18 22:38:17 +00:00
|
|
|
if not links:
|
|
|
|
print('[X] No links found :(')
|
|
|
|
raise SystemExit(1)
|
|
|
|
|
2018-04-17 14:30:25 +00:00
|
|
|
for link in links:
|
2019-03-23 03:10:11 +00:00
|
|
|
link['title'] = unescape(link['title'].strip()) if link['title'] else None
|
2019-02-21 22:45:28 +00:00
|
|
|
check_link_structure(link)
|
|
|
|
|
2017-10-18 22:38:17 +00:00
|
|
|
return list(links)
|
|
|
|
|
2019-02-21 22:45:28 +00:00
|
|
|
|
2017-10-23 09:58:41 +00:00
|
|
|
def archivable_links(links):
|
|
|
|
"""remove chrome://, about:// or other schemed links that cant be archived"""
|
|
|
|
return (
|
|
|
|
link
|
|
|
|
for link in links
|
2019-02-21 22:45:28 +00:00
|
|
|
if any(link['url'].lower().startswith(s) for s in ('http://', 'https://', 'ftp://'))
|
2017-10-23 09:58:41 +00:00
|
|
|
)
|
2017-10-18 22:38:17 +00:00
|
|
|
|
2019-02-21 22:45:28 +00:00
|
|
|
|
2017-10-18 22:38:17 +00:00
|
|
|
def uniquefied_links(sorted_links):
|
|
|
|
"""
|
|
|
|
ensures that all non-duplicate links have monotonically increasing timestamps
|
|
|
|
"""
|
|
|
|
|
2018-09-14 22:08:59 +00:00
|
|
|
unique_urls = OrderedDict()
|
2017-10-18 22:38:17 +00:00
|
|
|
|
|
|
|
lower = lambda url: url.lower().strip()
|
|
|
|
without_www = lambda url: url.replace('://www.', '://', 1)
|
|
|
|
without_trailing_slash = lambda url: url[:-1] if url[-1] == '/' else url.replace('/?', '?')
|
|
|
|
|
|
|
|
for link in sorted_links:
|
2017-10-19 00:33:31 +00:00
|
|
|
fuzzy_url = without_www(without_trailing_slash(lower(link['url'])))
|
|
|
|
if fuzzy_url in unique_urls:
|
2017-10-18 22:38:17 +00:00
|
|
|
# merge with any other links that share the same url
|
2017-10-19 00:33:31 +00:00
|
|
|
link = merge_links(unique_urls[fuzzy_url], link)
|
|
|
|
unique_urls[fuzzy_url] = link
|
|
|
|
|
2018-09-14 22:08:59 +00:00
|
|
|
unique_timestamps = OrderedDict()
|
2017-10-19 00:33:31 +00:00
|
|
|
for link in unique_urls.values():
|
|
|
|
link['timestamp'] = lowest_uniq_timestamp(unique_timestamps, link['timestamp'])
|
|
|
|
unique_timestamps[link['timestamp']] = link
|
|
|
|
|
|
|
|
return unique_timestamps.values()
|
2017-10-18 22:38:17 +00:00
|
|
|
|
2019-02-21 22:45:28 +00:00
|
|
|
|
2017-10-23 09:58:41 +00:00
|
|
|
def sorted_links(links):
|
2018-09-14 22:08:59 +00:00
|
|
|
sort_func = lambda link: (link['timestamp'].split('.', 1)[0], link['url'])
|
2017-10-23 09:58:41 +00:00
|
|
|
return sorted(links, key=sort_func, reverse=True)
|
|
|
|
|
2019-02-21 22:45:28 +00:00
|
|
|
|
2017-10-18 22:38:17 +00:00
|
|
|
def links_after_timestamp(links, timestamp=None):
|
|
|
|
if not timestamp:
|
|
|
|
yield from links
|
|
|
|
return
|
|
|
|
|
|
|
|
for link in links:
|
|
|
|
try:
|
|
|
|
if float(link['timestamp']) <= float(timestamp):
|
|
|
|
yield link
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
print('Resume value and all timestamp values must be valid numbers.')
|
|
|
|
|
2019-02-21 22:45:28 +00:00
|
|
|
|
2017-10-19 00:33:31 +00:00
|
|
|
def lowest_uniq_timestamp(used_timestamps, timestamp):
|
2017-10-18 22:38:17 +00:00
|
|
|
"""resolve duplicate timestamps by appending a decimal 1234, 1234 -> 1234.1, 1234.2"""
|
|
|
|
|
2017-10-19 00:33:31 +00:00
|
|
|
timestamp = timestamp.split('.')[0]
|
|
|
|
nonce = 0
|
|
|
|
|
|
|
|
# first try 152323423 before 152323423.0
|
2017-10-18 22:38:17 +00:00
|
|
|
if timestamp not in used_timestamps:
|
|
|
|
return timestamp
|
|
|
|
|
|
|
|
new_timestamp = '{}.{}'.format(timestamp, nonce)
|
|
|
|
while new_timestamp in used_timestamps:
|
|
|
|
nonce += 1
|
|
|
|
new_timestamp = '{}.{}'.format(timestamp, nonce)
|
|
|
|
|
|
|
|
return new_timestamp
|