mirror of
https://github.com/ArchiveBox/ArchiveBox
synced 2024-11-22 12:13:05 +00:00
fix lgtm errors
This commit is contained in:
parent
cc80ceb0a2
commit
d6de04a83a
5 changed files with 7 additions and 6 deletions
|
@ -33,6 +33,8 @@ LOGOUT_REDIRECT_URL = '/'
|
|||
PASSWORD_RESET_URL = '/accounts/password_reset/'
|
||||
APPEND_SLASH = True
|
||||
|
||||
DEBUG = DEBUG or sys.environ.get('DEBUG', 'false').lower() != 'false' or '--debug' in sys.argv
|
||||
|
||||
INSTALLED_APPS = [
|
||||
'django.contrib.auth',
|
||||
'django.contrib.contenttypes',
|
||||
|
|
|
@ -42,14 +42,13 @@ def save_favicon(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT)
|
|||
*([] if CHECK_SSL_VALIDITY else ['--insecure']),
|
||||
'https://www.google.com/s2/favicons?domain={}'.format(domain(link.url)),
|
||||
]
|
||||
status = 'pending'
|
||||
status = 'failed'
|
||||
timer = TimedProgress(timeout, prefix=' ')
|
||||
try:
|
||||
run(cmd, cwd=str(out_dir), timeout=timeout)
|
||||
chmod_file(output, cwd=str(out_dir))
|
||||
status = 'succeeded'
|
||||
except Exception as err:
|
||||
status = 'failed'
|
||||
output = err
|
||||
finally:
|
||||
timer.end()
|
||||
|
|
|
@ -2,7 +2,6 @@ __package__ = 'archivebox.index'
|
|||
|
||||
import os
|
||||
import shutil
|
||||
import json as pyjson
|
||||
from pathlib import Path
|
||||
|
||||
from itertools import chain
|
||||
|
@ -42,6 +41,7 @@ from .html import (
|
|||
write_html_link_details,
|
||||
)
|
||||
from .json import (
|
||||
pyjson,
|
||||
parse_json_link_details,
|
||||
write_json_link_details,
|
||||
)
|
||||
|
|
|
@ -51,9 +51,9 @@ def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]:
|
|||
# look inside the URL for any sub-urls, e.g. for archive.org links
|
||||
# https://web.archive.org/web/20200531203453/https://www.reddit.com/r/socialism/comments/gu24ke/nypd_officers_claim_they_are_protecting_the_rule/fsfq0sw/
|
||||
# -> https://www.reddit.com/r/socialism/comments/gu24ke/nypd_officers_claim_they_are_protecting_the_rule/fsfq0sw/
|
||||
for url in re.findall(URL_REGEX, line[1:]):
|
||||
for sub_url in re.findall(URL_REGEX, line[1:]):
|
||||
yield Link(
|
||||
url=htmldecode(url),
|
||||
url=htmldecode(sub_url),
|
||||
timestamp=str(datetime.now().timestamp()),
|
||||
title=None,
|
||||
tags=None,
|
||||
|
|
|
@ -45,7 +45,7 @@ def parse_wallabag_atom_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
|
|||
time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z")
|
||||
try:
|
||||
tags = str_between(get_row('category'), 'label="', '" />')
|
||||
except:
|
||||
except Exception:
|
||||
tags = None
|
||||
|
||||
yield Link(
|
||||
|
|
Loading…
Reference in a new issue