2020-07-31 13:32:29 +00:00
|
|
|
__package__ = 'archivebox'
|
|
|
|
|
2017-10-23 09:58:41 +00:00
|
|
|
import re
|
2020-12-06 00:01:18 +00:00
|
|
|
import requests
|
2020-06-26 01:30:29 +00:00
|
|
|
import json as pyjson
|
2024-02-27 21:30:31 +00:00
|
|
|
import http.cookiejar
|
2019-05-01 03:13:04 +00:00
|
|
|
|
|
|
|
from typing import List, Optional, Any
|
2020-12-06 00:01:18 +00:00
|
|
|
from pathlib import Path
|
2019-03-31 01:29:16 +00:00
|
|
|
from inspect import signature
|
2019-03-27 02:26:21 +00:00
|
|
|
from functools import wraps
|
2019-03-26 23:21:34 +00:00
|
|
|
from hashlib import sha256
|
2019-03-26 09:33:34 +00:00
|
|
|
from urllib.parse import urlparse, quote, unquote
|
|
|
|
from html import escape, unescape
|
2021-04-10 08:19:30 +00:00
|
|
|
from datetime import datetime, timezone
|
2020-07-16 23:35:13 +00:00
|
|
|
from dateparser import parse as dateparser
|
2020-10-31 11:56:51 +00:00
|
|
|
from requests.exceptions import RequestException, ReadTimeout
|
2020-12-06 00:01:18 +00:00
|
|
|
|
|
|
|
from .vendor.base32_crockford import encode as base32_encode # type: ignore
|
2020-07-22 15:24:08 +00:00
|
|
|
from w3lib.encoding import html_body_declared_encoding, http_content_type_encoding
|
2023-08-28 15:27:03 +00:00
|
|
|
from os.path import lexists
|
|
|
|
from os import remove as remove_file
|
2019-03-26 23:21:34 +00:00
|
|
|
|
2020-02-15 12:31:27 +00:00
|
|
|
try:
|
|
|
|
import chardet
|
|
|
|
detect_encoding = lambda rawdata: chardet.detect(rawdata)["encoding"]
|
|
|
|
except ImportError:
|
|
|
|
detect_encoding = lambda rawdata: "utf-8"
|
|
|
|
|
2019-03-08 22:01:15 +00:00
|
|
|
### Parsing Helpers
|
|
|
|
|
2019-03-26 09:33:34 +00:00
|
|
|
# All of these are (str) -> str
|
|
|
|
# shortcuts to: https://docs.python.org/3/library/urllib.parse.html#url-parsing
|
2019-03-26 23:21:34 +00:00
|
|
|
scheme = lambda url: urlparse(url).scheme.lower()
|
2019-02-19 06:45:19 +00:00
|
|
|
without_scheme = lambda url: urlparse(url)._replace(scheme='').geturl().strip('//')
|
|
|
|
without_query = lambda url: urlparse(url)._replace(query='').geturl().strip('//')
|
|
|
|
without_fragment = lambda url: urlparse(url)._replace(fragment='').geturl().strip('//')
|
|
|
|
without_path = lambda url: urlparse(url)._replace(path='', fragment='', query='').geturl().strip('//')
|
|
|
|
path = lambda url: urlparse(url).path
|
|
|
|
basename = lambda url: urlparse(url).path.rsplit('/', 1)[-1]
|
|
|
|
domain = lambda url: urlparse(url).netloc
|
|
|
|
query = lambda url: urlparse(url).query
|
|
|
|
fragment = lambda url: urlparse(url).fragment
|
|
|
|
extension = lambda url: basename(url).rsplit('.', 1)[-1].lower() if '.' in basename(url) else ''
|
2018-04-17 07:54:59 +00:00
|
|
|
base_url = lambda url: without_scheme(url) # uniq base url used to dedupe links
|
2017-10-23 09:58:41 +00:00
|
|
|
|
2019-03-26 23:21:34 +00:00
|
|
|
without_www = lambda url: url.replace('://www.', '://', 1)
|
|
|
|
without_trailing_slash = lambda url: url[:-1] if url[-1] == '/' else url.replace('/?', '?')
|
2019-04-02 20:36:41 +00:00
|
|
|
hashurl = lambda url: base32_encode(int(sha256(base_url(url).encode('utf-8')).hexdigest(), 16))[:20]
|
2019-03-26 23:21:34 +00:00
|
|
|
|
|
|
|
urlencode = lambda s: s and quote(s, encoding='utf-8', errors='replace')
|
|
|
|
urldecode = lambda s: s and unquote(s)
|
|
|
|
htmlencode = lambda s: s and escape(s, quote=True)
|
|
|
|
htmldecode = lambda s: s and unescape(s)
|
|
|
|
|
2019-04-02 20:36:41 +00:00
|
|
|
short_ts = lambda ts: str(parse_date(ts).timestamp()).split('.')[0]
|
2021-04-10 08:19:30 +00:00
|
|
|
ts_to_date_str = lambda ts: ts and parse_date(ts).strftime('%Y-%m-%d %H:%M')
|
2019-04-02 20:36:41 +00:00
|
|
|
ts_to_iso = lambda ts: ts and parse_date(ts).isoformat()
|
|
|
|
|
2024-04-24 02:53:18 +00:00
|
|
|
COLOR_REGEX = re.compile(r'\[(?P<arg_1>\d+)(;(?P<arg_2>\d+)(;(?P<arg_3>\d+))?)?m')
|
|
|
|
|
2024-04-24 21:38:21 +00:00
|
|
|
|
|
|
|
# https://mathiasbynens.be/demo/url-regex
|
2019-02-27 09:49:25 +00:00
|
|
|
URL_REGEX = re.compile(
|
2024-04-26 04:36:11 +00:00
|
|
|
r'(?=('
|
|
|
|
r'http[s]?://' # start matching from allowed schemes
|
|
|
|
r'(?:[a-zA-Z]|[0-9]' # followed by allowed alphanum characters
|
|
|
|
r'|[-_$@.&+!*\(\),]' # or allowed symbols (keep hyphen first to match literal hyphen)
|
|
|
|
r'|[^\u0000-\u007F])+' # or allowed unicode bytes
|
|
|
|
r'[^\]\[<>"\'\s]+' # stop parsing at these symbols
|
2021-03-27 08:30:40 +00:00
|
|
|
r'))',
|
2024-04-24 02:53:18 +00:00
|
|
|
re.IGNORECASE | re.UNICODE,
|
2019-02-27 09:49:25 +00:00
|
|
|
)
|
2019-03-26 09:33:34 +00:00
|
|
|
|
2024-04-24 02:53:18 +00:00
|
|
|
def parens_are_matched(string: str, open_char='(', close_char=')'):
|
|
|
|
"""check that all parentheses in a string are balanced and nested properly"""
|
|
|
|
count = 0
|
|
|
|
for c in string:
|
|
|
|
if c == open_char:
|
|
|
|
count += 1
|
|
|
|
elif c == close_char:
|
|
|
|
count -= 1
|
|
|
|
if count < 0:
|
|
|
|
return False
|
|
|
|
return count == 0
|
|
|
|
|
|
|
|
def fix_url_from_markdown(url_str: str) -> str:
|
|
|
|
"""
|
|
|
|
cleanup a regex-parsed url that may contain dangling trailing parens from markdown link syntax
|
|
|
|
helpful to fix URLs parsed from markdown e.g.
|
|
|
|
input: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def).somemoretext
|
|
|
|
result: https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def
|
2024-04-25 00:50:18 +00:00
|
|
|
|
|
|
|
IMPORTANT ASSUMPTION: valid urls wont have unbalanced or incorrectly nested parentheses
|
|
|
|
e.g. this will fail the user actually wants to ingest a url like 'https://example.com/some_wei)(rd_url'
|
|
|
|
in that case it will return https://example.com/some_wei (truncated up to the first unbalanced paren)
|
|
|
|
This assumption is true 99.9999% of the time, and for the rare edge case the user can use url_list parser.
|
2024-04-24 02:53:18 +00:00
|
|
|
"""
|
|
|
|
trimmed_url = url_str
|
|
|
|
|
|
|
|
# cut off one trailing character at a time
|
|
|
|
# until parens are balanced e.g. /a(b)c).x(y)z -> /a(b)c
|
|
|
|
while not parens_are_matched(trimmed_url):
|
|
|
|
trimmed_url = trimmed_url[:-1]
|
|
|
|
|
|
|
|
# make sure trimmed url is still valid
|
|
|
|
if re.findall(URL_REGEX, trimmed_url):
|
|
|
|
return trimmed_url
|
|
|
|
|
|
|
|
return url_str
|
|
|
|
|
|
|
|
def find_all_urls(urls_str: str):
|
|
|
|
for url in re.findall(URL_REGEX, urls_str):
|
|
|
|
yield fix_url_from_markdown(url)
|
|
|
|
|
2020-07-02 07:22:37 +00:00
|
|
|
|
2020-07-02 07:12:30 +00:00
|
|
|
def is_static_file(url: str):
|
|
|
|
# TODO: the proper way is with MIME type detection + ext, not only extension
|
|
|
|
from .config import STATICFILE_EXTENSIONS
|
|
|
|
return extension(url).lower() in STATICFILE_EXTENSIONS
|
2020-07-01 17:23:59 +00:00
|
|
|
|
2019-03-26 07:20:41 +00:00
|
|
|
|
2019-03-27 02:26:21 +00:00
|
|
|
def enforce_types(func):
|
|
|
|
"""
|
2019-03-27 03:25:07 +00:00
|
|
|
Enforce function arg and kwarg types at runtime using its python3 type hints
|
2019-03-27 02:26:21 +00:00
|
|
|
"""
|
2019-03-27 03:25:07 +00:00
|
|
|
# TODO: check return type as well
|
2019-03-27 02:26:21 +00:00
|
|
|
|
|
|
|
@wraps(func)
|
|
|
|
def typechecked_function(*args, **kwargs):
|
|
|
|
sig = signature(func)
|
|
|
|
|
|
|
|
def check_argument_type(arg_key, arg_val):
|
|
|
|
try:
|
|
|
|
annotation = sig.parameters[arg_key].annotation
|
|
|
|
except KeyError:
|
2019-03-31 01:29:16 +00:00
|
|
|
annotation = None
|
2019-03-27 02:26:21 +00:00
|
|
|
|
2019-03-31 01:29:16 +00:00
|
|
|
if annotation is not None and annotation.__class__ is type:
|
2019-03-27 02:26:21 +00:00
|
|
|
if not isinstance(arg_val, annotation):
|
|
|
|
raise TypeError(
|
|
|
|
'{}(..., {}: {}) got unexpected {} argument {}={}'.format(
|
|
|
|
func.__name__,
|
|
|
|
arg_key,
|
|
|
|
annotation.__name__,
|
|
|
|
type(arg_val).__name__,
|
|
|
|
arg_key,
|
2019-03-27 22:26:22 +00:00
|
|
|
str(arg_val)[:64],
|
2019-03-27 02:26:21 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
# check args
|
|
|
|
for arg_val, arg_key in zip(args, sig.parameters):
|
|
|
|
check_argument_type(arg_key, arg_val)
|
|
|
|
|
|
|
|
# check kwargs
|
|
|
|
for arg_key, arg_val in kwargs.items():
|
|
|
|
check_argument_type(arg_key, arg_val)
|
|
|
|
|
|
|
|
return func(*args, **kwargs)
|
|
|
|
|
|
|
|
return typechecked_function
|
|
|
|
|
2017-10-23 09:58:41 +00:00
|
|
|
|
2019-05-01 03:13:04 +00:00
|
|
|
def docstring(text: Optional[str]):
|
|
|
|
"""attach the given docstring to the decorated function"""
|
|
|
|
def decorator(func):
|
|
|
|
if text:
|
|
|
|
func.__doc__ = text
|
|
|
|
return func
|
|
|
|
return decorator
|
2017-10-23 09:58:41 +00:00
|
|
|
|
2019-03-08 22:01:15 +00:00
|
|
|
|
2019-03-27 03:25:07 +00:00
|
|
|
@enforce_types
|
2019-03-26 07:20:41 +00:00
|
|
|
def str_between(string: str, start: str, end: str=None) -> str:
|
2019-03-08 22:01:15 +00:00
|
|
|
"""(<abc>12345</def>, <abc>, </def>) -> 12345"""
|
|
|
|
|
|
|
|
content = string.split(start, 1)[-1]
|
|
|
|
if end is not None:
|
|
|
|
content = content.rsplit(end, 1)[0]
|
|
|
|
|
|
|
|
return content
|
|
|
|
|
|
|
|
|
2019-03-27 03:25:07 +00:00
|
|
|
@enforce_types
|
2019-03-27 02:26:21 +00:00
|
|
|
def parse_date(date: Any) -> Optional[datetime]:
|
|
|
|
"""Parse unix timestamps, iso format, and human-readable strings"""
|
|
|
|
|
|
|
|
if date is None:
|
|
|
|
return None
|
2019-04-02 20:36:41 +00:00
|
|
|
|
|
|
|
if isinstance(date, datetime):
|
2021-04-10 08:19:30 +00:00
|
|
|
if date.tzinfo is None:
|
|
|
|
return date.replace(tzinfo=timezone.utc)
|
|
|
|
|
|
|
|
assert date.tzinfo.utcoffset(datetime.now()).seconds == 0, 'Refusing to load a non-UTC date!'
|
2019-04-02 20:36:41 +00:00
|
|
|
return date
|
2019-03-27 02:26:21 +00:00
|
|
|
|
|
|
|
if isinstance(date, (float, int)):
|
|
|
|
date = str(date)
|
|
|
|
|
|
|
|
if isinstance(date, str):
|
2021-04-10 08:19:30 +00:00
|
|
|
return dateparser(date, settings={'TIMEZONE': 'UTC'}).replace(tzinfo=timezone.utc)
|
2019-08-26 21:25:22 +00:00
|
|
|
|
2019-03-27 02:26:21 +00:00
|
|
|
raise ValueError('Tried to parse invalid date! {}'.format(date))
|
|
|
|
|
|
|
|
|
2019-03-27 03:25:07 +00:00
|
|
|
@enforce_types
|
2020-07-02 07:12:30 +00:00
|
|
|
def download_url(url: str, timeout: int=None) -> str:
|
2019-03-23 02:05:45 +00:00
|
|
|
"""Download the contents of a remote url and return the text"""
|
2024-02-27 21:30:31 +00:00
|
|
|
from .config import (
|
2024-03-05 07:48:35 +00:00
|
|
|
TIMEOUT,
|
|
|
|
CHECK_SSL_VALIDITY,
|
|
|
|
WGET_USER_AGENT,
|
|
|
|
COOKIES_FILE,
|
2024-02-27 21:30:31 +00:00
|
|
|
)
|
2020-07-02 07:12:30 +00:00
|
|
|
timeout = timeout or TIMEOUT
|
2024-03-05 07:48:35 +00:00
|
|
|
session = requests.Session()
|
2024-02-27 21:30:31 +00:00
|
|
|
|
2024-03-05 07:48:35 +00:00
|
|
|
if COOKIES_FILE and Path(COOKIES_FILE).is_file():
|
|
|
|
cookie_jar = http.cookiejar.MozillaCookieJar(COOKIES_FILE)
|
|
|
|
cookie_jar.load(ignore_discard=True, ignore_expires=True)
|
|
|
|
for cookie in cookie_jar:
|
|
|
|
session.cookies.set(cookie.name, cookie.value, domain=cookie.domain, path=cookie.path)
|
2024-02-27 21:30:31 +00:00
|
|
|
|
2024-03-05 07:48:35 +00:00
|
|
|
response = session.get(
|
2020-06-30 09:55:54 +00:00
|
|
|
url,
|
|
|
|
headers={'User-Agent': WGET_USER_AGENT},
|
|
|
|
verify=CHECK_SSL_VALIDITY,
|
|
|
|
timeout=timeout,
|
|
|
|
)
|
2020-07-22 15:24:08 +00:00
|
|
|
|
|
|
|
content_type = response.headers.get('Content-Type', '')
|
|
|
|
encoding = http_content_type_encoding(content_type) or html_body_declared_encoding(response.text)
|
|
|
|
|
|
|
|
if encoding is not None:
|
|
|
|
response.encoding = encoding
|
|
|
|
|
2024-02-22 12:49:09 +00:00
|
|
|
try:
|
|
|
|
return response.text
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
# if response is non-test (e.g. image or other binary files), just return the filename instead
|
|
|
|
return url.rsplit('/', 1)[-1]
|
2019-03-08 22:01:15 +00:00
|
|
|
|
2020-09-11 14:06:52 +00:00
|
|
|
@enforce_types
|
|
|
|
def get_headers(url: str, timeout: int=None) -> str:
|
|
|
|
"""Download the contents of a remote url and return the headers"""
|
|
|
|
from .config import TIMEOUT, CHECK_SSL_VALIDITY, WGET_USER_AGENT
|
|
|
|
timeout = timeout or TIMEOUT
|
2020-09-23 18:14:49 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
response = requests.head(
|
|
|
|
url,
|
|
|
|
headers={'User-Agent': WGET_USER_AGENT},
|
|
|
|
verify=CHECK_SSL_VALIDITY,
|
|
|
|
timeout=timeout,
|
2020-10-31 11:56:51 +00:00
|
|
|
allow_redirects=True,
|
2020-09-23 18:14:49 +00:00
|
|
|
)
|
2020-09-24 13:37:27 +00:00
|
|
|
if response.status_code >= 400:
|
|
|
|
raise RequestException
|
2020-10-31 11:56:51 +00:00
|
|
|
except ReadTimeout:
|
|
|
|
raise
|
2020-09-23 18:14:49 +00:00
|
|
|
except RequestException:
|
|
|
|
response = requests.get(
|
|
|
|
url,
|
|
|
|
headers={'User-Agent': WGET_USER_AGENT},
|
|
|
|
verify=CHECK_SSL_VALIDITY,
|
|
|
|
timeout=timeout,
|
2020-09-24 13:37:27 +00:00
|
|
|
stream=True
|
2020-09-23 18:14:49 +00:00
|
|
|
)
|
2020-09-11 14:06:52 +00:00
|
|
|
|
2021-01-31 01:41:39 +00:00
|
|
|
return pyjson.dumps(
|
|
|
|
{
|
|
|
|
'Status-Code': response.status_code,
|
|
|
|
**dict(response.headers),
|
|
|
|
},
|
|
|
|
indent=4,
|
|
|
|
)
|
2020-09-11 14:06:52 +00:00
|
|
|
|
2019-03-27 03:25:07 +00:00
|
|
|
|
|
|
|
@enforce_types
|
2019-03-26 07:20:41 +00:00
|
|
|
def chrome_args(**options) -> List[str]:
|
2019-03-21 05:28:12 +00:00
|
|
|
"""helper to build up a chrome shell command with arguments"""
|
2019-01-20 19:07:28 +00:00
|
|
|
|
2024-01-12 03:02:46 +00:00
|
|
|
# Chrome CLI flag documentation: https://peter.sh/experiments/chromium-command-line-switches/
|
|
|
|
|
2024-02-24 00:40:03 +00:00
|
|
|
from .config import (
|
|
|
|
CHROME_OPTIONS,
|
|
|
|
CHROME_VERSION,
|
|
|
|
CHROME_EXTRA_ARGS,
|
|
|
|
)
|
2020-07-02 07:12:30 +00:00
|
|
|
|
2019-03-23 03:00:53 +00:00
|
|
|
options = {**CHROME_OPTIONS, **options}
|
|
|
|
|
2021-04-06 03:33:08 +00:00
|
|
|
if not options['CHROME_BINARY']:
|
|
|
|
raise Exception('Could not find any CHROME_BINARY installed on your system')
|
|
|
|
|
2019-03-23 02:05:45 +00:00
|
|
|
cmd_args = [options['CHROME_BINARY']]
|
2019-03-21 05:28:12 +00:00
|
|
|
|
2024-03-01 20:50:32 +00:00
|
|
|
cmd_args += CHROME_EXTRA_ARGS
|
|
|
|
|
2019-03-23 03:00:53 +00:00
|
|
|
if options['CHROME_HEADLESS']:
|
2023-07-31 19:34:58 +00:00
|
|
|
chrome_major_version = int(re.search(r'\s(\d+)\.\d', CHROME_VERSION)[1])
|
|
|
|
if chrome_major_version >= 111:
|
2023-03-17 10:30:14 +00:00
|
|
|
cmd_args += ("--headless=new",)
|
|
|
|
else:
|
|
|
|
cmd_args += ('--headless',)
|
2023-03-13 10:49:57 +00:00
|
|
|
|
2019-03-23 02:05:45 +00:00
|
|
|
if not options['CHROME_SANDBOX']:
|
2020-07-22 03:39:21 +00:00
|
|
|
# assume this means we are running inside a docker container
|
2023-03-13 10:49:57 +00:00
|
|
|
# in docker, GPU support is limited, sandboxing is unecessary,
|
2020-07-22 03:39:21 +00:00
|
|
|
# and SHM is limited to 64MB by default (which is too low to be usable).
|
|
|
|
cmd_args += (
|
2023-03-13 10:49:57 +00:00
|
|
|
"--no-sandbox",
|
|
|
|
"--no-zygote",
|
|
|
|
"--disable-dev-shm-usage",
|
|
|
|
"--disable-software-rasterizer",
|
|
|
|
"--run-all-compositor-stages-before-draw",
|
|
|
|
"--hide-scrollbars",
|
|
|
|
"--autoplay-policy=no-user-gesture-required",
|
|
|
|
"--no-first-run",
|
|
|
|
"--use-fake-ui-for-media-stream",
|
|
|
|
"--use-fake-device-for-media-stream",
|
|
|
|
"--disable-sync",
|
2024-01-12 03:02:46 +00:00
|
|
|
# "--password-store=basic",
|
2020-07-22 03:39:21 +00:00
|
|
|
)
|
2024-01-12 03:51:27 +00:00
|
|
|
|
|
|
|
# disable automatic updating when running headless, as there's no user to see the upgrade prompts
|
|
|
|
cmd_args += ("--simulate-outdated-no-au='Tue, 31 Dec 2099 23:59:59 GMT'",)
|
2020-07-22 03:39:21 +00:00
|
|
|
|
2024-01-12 03:51:27 +00:00
|
|
|
# set window size for screenshot/pdf/etc. rendering
|
|
|
|
cmd_args += ('--window-size={}'.format(options['RESOLUTION']),)
|
2019-03-21 05:28:12 +00:00
|
|
|
|
2019-03-23 02:05:45 +00:00
|
|
|
if not options['CHECK_SSL_VALIDITY']:
|
2019-03-21 05:28:12 +00:00
|
|
|
cmd_args += ('--disable-web-security', '--ignore-certificate-errors')
|
|
|
|
|
2019-03-23 02:05:45 +00:00
|
|
|
if options['CHROME_USER_AGENT']:
|
|
|
|
cmd_args += ('--user-agent={}'.format(options['CHROME_USER_AGENT']),)
|
2019-03-21 05:28:12 +00:00
|
|
|
|
2023-03-14 11:29:41 +00:00
|
|
|
if options['CHROME_TIMEOUT']:
|
|
|
|
cmd_args += ('--timeout={}'.format(options['CHROME_TIMEOUT'] * 1000),)
|
2019-03-21 05:28:12 +00:00
|
|
|
|
2019-03-23 02:05:45 +00:00
|
|
|
if options['CHROME_USER_DATA_DIR']:
|
|
|
|
cmd_args.append('--user-data-dir={}'.format(options['CHROME_USER_DATA_DIR']))
|
2024-03-18 21:40:40 +00:00
|
|
|
cmd_args.append('--profile-directory=Default')
|
2024-02-24 00:40:03 +00:00
|
|
|
|
2024-03-06 03:13:45 +00:00
|
|
|
return dedupe(cmd_args)
|
2019-03-26 09:33:34 +00:00
|
|
|
|
|
|
|
|
2023-08-28 15:27:03 +00:00
|
|
|
def chrome_cleanup():
|
|
|
|
"""
|
|
|
|
Cleans up any state or runtime files that chrome leaves behind when killed by
|
|
|
|
a timeout or other error
|
|
|
|
"""
|
|
|
|
|
|
|
|
from .config import IN_DOCKER
|
|
|
|
|
|
|
|
if IN_DOCKER and lexists("/home/archivebox/.config/chromium/SingletonLock"):
|
|
|
|
remove_file("/home/archivebox/.config/chromium/SingletonLock")
|
2020-11-28 07:12:27 +00:00
|
|
|
|
2024-04-25 10:56:22 +00:00
|
|
|
@enforce_types
|
|
|
|
def ansi_to_html(text: str) -> str:
|
2020-07-01 17:23:59 +00:00
|
|
|
"""
|
|
|
|
Based on: https://stackoverflow.com/questions/19212665/python-converting-ansi-color-codes-to-html
|
|
|
|
"""
|
2020-07-02 07:12:30 +00:00
|
|
|
from .config import COLOR_DICT
|
2020-07-02 07:22:37 +00:00
|
|
|
|
2020-07-01 17:23:59 +00:00
|
|
|
TEMPLATE = '<span style="color: rgb{}"><br>'
|
|
|
|
text = text.replace('[m', '</span>')
|
|
|
|
|
|
|
|
def single_sub(match):
|
|
|
|
argsdict = match.groupdict()
|
|
|
|
if argsdict['arg_3'] is None:
|
|
|
|
if argsdict['arg_2'] is None:
|
2020-07-24 17:25:25 +00:00
|
|
|
_, color = 0, argsdict['arg_1']
|
2020-07-01 17:23:59 +00:00
|
|
|
else:
|
2020-07-24 17:25:25 +00:00
|
|
|
_, color = argsdict['arg_1'], argsdict['arg_2']
|
2020-07-01 17:23:59 +00:00
|
|
|
else:
|
2020-07-24 17:25:25 +00:00
|
|
|
_, color = argsdict['arg_3'], argsdict['arg_2']
|
2020-07-01 17:23:59 +00:00
|
|
|
|
|
|
|
return TEMPLATE.format(COLOR_DICT[color][0])
|
|
|
|
|
|
|
|
return COLOR_REGEX.sub(single_sub, text)
|
|
|
|
|
2019-03-26 09:33:34 +00:00
|
|
|
|
2024-02-21 21:13:06 +00:00
|
|
|
@enforce_types
|
2024-03-06 03:13:45 +00:00
|
|
|
def dedupe(options: List[str]) -> List[str]:
|
2024-02-21 21:13:06 +00:00
|
|
|
"""
|
2024-03-01 20:50:32 +00:00
|
|
|
Deduplicates the given options. Options that come later clobber earlier
|
|
|
|
conflicting options.
|
2024-02-21 21:13:06 +00:00
|
|
|
"""
|
2024-03-01 20:50:32 +00:00
|
|
|
deduped = {}
|
|
|
|
|
|
|
|
for option in options:
|
|
|
|
deduped[option.split('=')[0]] = option
|
|
|
|
|
|
|
|
return list(deduped.values())
|
2024-02-21 21:13:06 +00:00
|
|
|
|
|
|
|
|
2020-07-13 15:24:49 +00:00
|
|
|
class AttributeDict(dict):
|
|
|
|
"""Helper to allow accessing dict values via Example.key or Example['key']"""
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
# Recursively convert nested dicts to AttributeDicts (optional):
|
|
|
|
# for key, val in self.items():
|
|
|
|
# if isinstance(val, dict) and type(val) is not AttributeDict:
|
|
|
|
# self[key] = AttributeDict(val)
|
|
|
|
|
|
|
|
def __getattr__(self, attr: str) -> Any:
|
|
|
|
return dict.__getitem__(self, attr)
|
|
|
|
|
|
|
|
def __setattr__(self, attr: str, value: Any) -> None:
|
|
|
|
return dict.__setitem__(self, attr, value)
|
|
|
|
|
|
|
|
|
2019-05-01 03:13:04 +00:00
|
|
|
class ExtendedEncoder(pyjson.JSONEncoder):
|
2019-03-26 09:33:34 +00:00
|
|
|
"""
|
|
|
|
Extended json serializer that supports serializing several model
|
|
|
|
fields and objects
|
|
|
|
"""
|
|
|
|
|
|
|
|
def default(self, obj):
|
|
|
|
cls_name = obj.__class__.__name__
|
|
|
|
|
|
|
|
if hasattr(obj, '_asdict'):
|
|
|
|
return obj._asdict()
|
|
|
|
|
|
|
|
elif isinstance(obj, bytes):
|
|
|
|
return obj.decode()
|
|
|
|
|
|
|
|
elif isinstance(obj, datetime):
|
|
|
|
return obj.isoformat()
|
|
|
|
|
|
|
|
elif isinstance(obj, Exception):
|
|
|
|
return '{}: {}'.format(obj.__class__.__name__, obj)
|
2020-09-08 21:29:43 +00:00
|
|
|
|
|
|
|
elif isinstance(obj, Path):
|
|
|
|
return str(obj)
|
|
|
|
|
2019-03-26 09:33:34 +00:00
|
|
|
elif cls_name in ('dict_items', 'dict_keys', 'dict_values'):
|
|
|
|
return tuple(obj)
|
|
|
|
|
2019-05-01 03:13:04 +00:00
|
|
|
return pyjson.JSONEncoder.default(self, obj)
|
2019-04-27 21:26:24 +00:00
|
|
|
|
2024-04-24 02:53:18 +00:00
|
|
|
|
|
|
|
### URL PARSING TESTS / ASSERTIONS
|
2024-04-25 00:45:45 +00:00
|
|
|
|
|
|
|
# Check that plain text regex URL parsing works as expected
|
|
|
|
# this is last-line-of-defense to make sure the URL_REGEX isn't
|
|
|
|
# misbehaving due to some OS-level or environment level quirks (e.g. regex engine / cpython / locale differences)
|
|
|
|
# the consequences of bad URL parsing could be disastrous and lead to many
|
|
|
|
# incorrect/badly parsed links being added to the archive, so this is worth the cost of checking
|
2024-04-24 02:53:18 +00:00
|
|
|
|
2024-04-25 02:41:11 +00:00
|
|
|
assert fix_url_from_markdown('http://example.com/a(b)c).x(y)z') == 'http://example.com/a(b)c'
|
2024-04-24 02:53:18 +00:00
|
|
|
assert fix_url_from_markdown('https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def).link(with)_trailingtext') == 'https://wikipedia.org/en/some_article_(Disambiguation).html?abc=def'
|
|
|
|
|
|
|
|
URL_REGEX_TESTS = [
|
|
|
|
('https://example.com', ['https://example.com']),
|
|
|
|
('http://abc-file234example.com/abc?def=abc&23423=sdfsdf#abc=234&234=a234', ['http://abc-file234example.com/abc?def=abc&23423=sdfsdf#abc=234&234=a234']),
|
|
|
|
|
|
|
|
('https://twitter.com/share?url=https://akaao.success-corp.co.jp&text=ア@サ!ト&hashtags=ア%オ,元+ア.ア-オ_イ*シ$ロ abc', ['https://twitter.com/share?url=https://akaao.success-corp.co.jp&text=ア@サ!ト&hashtags=ア%オ,元+ア.ア-オ_イ*シ$ロ', 'https://akaao.success-corp.co.jp&text=ア@サ!ト&hashtags=ア%オ,元+ア.ア-オ_イ*シ$ロ']),
|
|
|
|
('<a href="https://twitter.com/share#url=https://akaao.success-corp.co.jp&text=ア@サ!ト?hashtags=ア%オ,元+ア&abc=.ア-オ_イ*シ$ロ"> abc', ['https://twitter.com/share#url=https://akaao.success-corp.co.jp&text=ア@サ!ト?hashtags=ア%オ,元+ア&abc=.ア-オ_イ*シ$ロ', 'https://akaao.success-corp.co.jp&text=ア@サ!ト?hashtags=ア%オ,元+ア&abc=.ア-オ_イ*シ$ロ']),
|
|
|
|
|
|
|
|
('///a', []),
|
|
|
|
('http://', []),
|
|
|
|
('http://../', ['http://../']),
|
|
|
|
('http://-error-.invalid/', ['http://-error-.invalid/']),
|
|
|
|
('https://a(b)c+1#2?3&4/', ['https://a(b)c+1#2?3&4/']),
|
|
|
|
('http://उदाहरण.परीक्षा', ['http://उदाहरण.परीक्षा']),
|
|
|
|
('http://例子.测试', ['http://例子.测试']),
|
|
|
|
('http://➡.ws/䨹 htps://abc.1243?234', ['http://➡.ws/䨹']),
|
|
|
|
('http://⌘.ws">https://exa+mple.com//:abc ', ['http://⌘.ws', 'https://exa+mple.com//:abc']),
|
|
|
|
('http://مثال.إختبار/abc?def=ت&ب=abc#abc=234', ['http://مثال.إختبار/abc?def=ت&ب=abc#abc=234']),
|
|
|
|
('http://-.~_!$&()*+,;=:%40:80%2f::::::@example.c\'om', ['http://-.~_!$&()*+,;=:%40:80%2f::::::@example.c']),
|
|
|
|
|
|
|
|
('http://us:pa@ex.co:42/http://ex.co:19/a?_d=4#-a=2.3', ['http://us:pa@ex.co:42/http://ex.co:19/a?_d=4#-a=2.3', 'http://ex.co:19/a?_d=4#-a=2.3']),
|
|
|
|
('http://code.google.com/events/#&product=browser', ['http://code.google.com/events/#&product=browser']),
|
|
|
|
('http://foo.bar?q=Spaces should be encoded', ['http://foo.bar?q=Spaces']),
|
|
|
|
('http://foo.com/blah_(wikipedia)#c(i)t[e]-1', ['http://foo.com/blah_(wikipedia)#c(i)t']),
|
|
|
|
('http://foo.com/(something)?after=parens', ['http://foo.com/(something)?after=parens']),
|
|
|
|
('http://foo.com/unicode_(✪)_in_parens) abc', ['http://foo.com/unicode_(✪)_in_parens']),
|
|
|
|
('http://foo.bar/?q=Test%20URL-encoded%20stuff', ['http://foo.bar/?q=Test%20URL-encoded%20stuff']),
|
|
|
|
|
|
|
|
('[xyz](http://a.b/?q=(Test)%20U)RL-encoded%20stuff', ['http://a.b/?q=(Test)%20U']),
|
|
|
|
('[xyz](http://a.b/?q=(Test)%20U)-ab https://abc+123', ['http://a.b/?q=(Test)%20U', 'https://abc+123']),
|
|
|
|
('[xyz](http://a.b/?q=(Test)%20U) https://a(b)c+12)3', ['http://a.b/?q=(Test)%20U', 'https://a(b)c+12']),
|
|
|
|
('[xyz](http://a.b/?q=(Test)a\nabchttps://a(b)c+12)3', ['http://a.b/?q=(Test)a', 'https://a(b)c+12']),
|
|
|
|
('http://foo.bar/?q=Test%20URL-encoded%20stuff', ['http://foo.bar/?q=Test%20URL-encoded%20stuff']),
|
|
|
|
]
|
|
|
|
for urls_str, expected_url_matches in URL_REGEX_TESTS:
|
|
|
|
url_matches = list(find_all_urls(urls_str))
|
|
|
|
assert url_matches == expected_url_matches, 'FAILED URL_REGEX CHECK!'
|
2024-04-25 00:45:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
# More test cases
|
|
|
|
_test_url_strs = {
|
|
|
|
'example.com': 0,
|
|
|
|
'/example.com': 0,
|
|
|
|
'//example.com': 0,
|
|
|
|
':/example.com': 0,
|
|
|
|
'://example.com': 0,
|
|
|
|
'htt://example8.com': 0,
|
|
|
|
'/htt://example.com': 0,
|
|
|
|
'https://example': 1,
|
|
|
|
'https://localhost/2345': 1,
|
|
|
|
'https://localhost:1234/123': 1,
|
|
|
|
'://': 0,
|
|
|
|
'https://': 0,
|
|
|
|
'http://': 0,
|
|
|
|
'ftp://': 0,
|
|
|
|
'ftp://example.com': 0,
|
|
|
|
'https://example.com': 1,
|
|
|
|
'https://example.com/': 1,
|
|
|
|
'https://a.example.com': 1,
|
|
|
|
'https://a.example.com/': 1,
|
|
|
|
'https://a.example.com/what/is/happening.html': 1,
|
|
|
|
'https://a.example.com/what/ís/happening.html': 1,
|
|
|
|
'https://a.example.com/what/is/happening.html?what=1&2%20b#höw-about-this=1a': 1,
|
|
|
|
'https://a.example.com/what/is/happéning/?what=1&2%20b#how-aboüt-this=1a': 1,
|
|
|
|
'HTtpS://a.example.com/what/is/happening/?what=1&2%20b#how-about-this=1af&2f%20b': 1,
|
|
|
|
'https://example.com/?what=1#how-about-this=1&2%20baf': 1,
|
|
|
|
'https://example.com?what=1#how-about-this=1&2%20baf': 1,
|
|
|
|
'<test>http://example7.com</test>': 1,
|
|
|
|
'https://<test>': 0,
|
|
|
|
'https://[test]': 0,
|
|
|
|
'http://"test"': 0,
|
|
|
|
'http://\'test\'': 0,
|
|
|
|
'[https://example8.com/what/is/this.php?what=1]': 1,
|
|
|
|
'[and http://example9.com?what=1&other=3#and-thing=2]': 1,
|
|
|
|
'<what>https://example10.com#and-thing=2 "</about>': 1,
|
|
|
|
'abc<this["https://example11.com/what/is#and-thing=2?whoami=23&where=1"]that>def': 1,
|
|
|
|
'sdflkf[what](https://example12.com/who/what.php?whoami=1#whatami=2)?am=hi': 1,
|
|
|
|
'<or>http://examplehttp://15.badc</that>': 2,
|
|
|
|
'https://a.example.com/one.html?url=http://example.com/inside/of/another?=http://': 2,
|
|
|
|
'[https://a.example.com/one.html?url=http://example.com/inside/of/another?=](http://a.example.com)': 3,
|
|
|
|
}
|
|
|
|
for url_str, num_urls in _test_url_strs.items():
|
|
|
|
assert len(list(find_all_urls(url_str))) == num_urls, (
|
|
|
|
f'{url_str} does not contain {num_urls} urls')
|