mirror of
https://github.com/ArchiveBox/ArchiveBox
synced 2024-11-10 06:34:16 +00:00
Added headers extractor
This commit is contained in:
parent
eb34a6af62
commit
852e3c9cff
3 changed files with 92 additions and 0 deletions
|
@ -35,6 +35,7 @@ from .dom import should_save_dom, save_dom
|
||||||
from .git import should_save_git, save_git
|
from .git import should_save_git, save_git
|
||||||
from .media import should_save_media, save_media
|
from .media import should_save_media, save_media
|
||||||
from .archive_org import should_save_archive_dot_org, save_archive_dot_org
|
from .archive_org import should_save_archive_dot_org, save_archive_dot_org
|
||||||
|
from .headers import should_save_headers, save_headers
|
||||||
|
|
||||||
def get_default_archive_methods():
|
def get_default_archive_methods():
|
||||||
return [
|
return [
|
||||||
|
@ -49,6 +50,7 @@ def get_default_archive_methods():
|
||||||
('mercury', should_save_mercury, save_mercury),
|
('mercury', should_save_mercury, save_mercury),
|
||||||
('git', should_save_git, save_git),
|
('git', should_save_git, save_git),
|
||||||
('media', should_save_media, save_media),
|
('media', should_save_media, save_media),
|
||||||
|
('headers', should_save_headers, save_headers),
|
||||||
('archive_org', should_save_archive_dot_org, save_archive_dot_org),
|
('archive_org', should_save_archive_dot_org, save_archive_dot_org),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
76
archivebox/extractors/headers.py
Normal file
76
archivebox/extractors/headers.py
Normal file
|
@ -0,0 +1,76 @@
|
||||||
|
__package__ = 'archivebox.extractors'
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
|
|
||||||
|
from typing import Optional
|
||||||
|
import json
|
||||||
|
|
||||||
|
from ..index.schema import Link, ArchiveResult, ArchiveError
|
||||||
|
from ..system import run, atomic_write
|
||||||
|
from ..util import (
|
||||||
|
enforce_types,
|
||||||
|
get_headers,
|
||||||
|
is_static_file,
|
||||||
|
)
|
||||||
|
from ..config import (
|
||||||
|
TIMEOUT,
|
||||||
|
CURL_BINARY,
|
||||||
|
CURL_USER_AGENT,
|
||||||
|
CURL_VERSION,
|
||||||
|
CHECK_SSL_VALIDITY,
|
||||||
|
DEPENDENCIES,
|
||||||
|
)
|
||||||
|
from ..logging_util import TimedProgress
|
||||||
|
|
||||||
|
@enforce_types
|
||||||
|
def should_save_headers(link: Link, out_dir: Optional[str]=None) -> bool:
|
||||||
|
out_dir = out_dir or link.link_dir
|
||||||
|
if is_static_file(link.url):
|
||||||
|
return False
|
||||||
|
|
||||||
|
output = Path(out_dir or link.link_dir) / 'headers.json'
|
||||||
|
return not output.exists()
|
||||||
|
|
||||||
|
|
||||||
|
@enforce_types
|
||||||
|
def save_headers(link: Link, out_dir: Optional[str]=None, timeout: int=TIMEOUT) -> ArchiveResult:
|
||||||
|
"""Download site headers"""
|
||||||
|
|
||||||
|
out_dir = Path(out_dir or link.link_dir)
|
||||||
|
output_folder = out_dir.absolute()
|
||||||
|
output: ArchiveOutput = 'headers.json'
|
||||||
|
|
||||||
|
status = 'succeeded'
|
||||||
|
timer = TimedProgress(timeout, prefix=' ')
|
||||||
|
|
||||||
|
cmd = [
|
||||||
|
CURL_BINARY,
|
||||||
|
'-s',
|
||||||
|
'-I',
|
||||||
|
'-X',
|
||||||
|
'-D',
|
||||||
|
*(['--user-agent', '{}'.format(CURL_USER_AGENT)] if CURL_USER_AGENT else []),
|
||||||
|
*([] if CHECK_SSL_VALIDITY else ['--insecure']),
|
||||||
|
link.url,
|
||||||
|
]
|
||||||
|
try:
|
||||||
|
json_headers = get_headers(link.url)
|
||||||
|
|
||||||
|
output_folder.mkdir(exist_ok=True)
|
||||||
|
atomic_write(str(output_folder / "headers.json"), json_headers)
|
||||||
|
|
||||||
|
except (Exception, OSError) as err:
|
||||||
|
status = 'failed'
|
||||||
|
output = err
|
||||||
|
finally:
|
||||||
|
timer.end()
|
||||||
|
|
||||||
|
return ArchiveResult(
|
||||||
|
cmd=cmd,
|
||||||
|
pwd=str(out_dir),
|
||||||
|
cmd_version=CURL_VERSION,
|
||||||
|
output=output,
|
||||||
|
status=status,
|
||||||
|
**timer.stats,
|
||||||
|
)
|
|
@ -173,6 +173,20 @@ def download_url(url: str, timeout: int=None) -> str:
|
||||||
|
|
||||||
return response.text
|
return response.text
|
||||||
|
|
||||||
|
@enforce_types
|
||||||
|
def get_headers(url: str, timeout: int=None) -> str:
|
||||||
|
"""Download the contents of a remote url and return the headers"""
|
||||||
|
from .config import TIMEOUT, CHECK_SSL_VALIDITY, WGET_USER_AGENT
|
||||||
|
timeout = timeout or TIMEOUT
|
||||||
|
response = requests.get(
|
||||||
|
url,
|
||||||
|
headers={'User-Agent': WGET_USER_AGENT},
|
||||||
|
verify=CHECK_SSL_VALIDITY,
|
||||||
|
timeout=timeout,
|
||||||
|
)
|
||||||
|
|
||||||
|
return pyjson.dumps(dict(response.headers))
|
||||||
|
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
def chrome_args(**options) -> List[str]:
|
def chrome_args(**options) -> List[str]:
|
||||||
|
|
Loading…
Reference in a new issue