mirror of
https://github.com/ArchiveBox/ArchiveBox
synced 2024-11-21 19:53:06 +00:00
Add htmltotext extractor
Saves HTML text nodes and selected element attributes in `htmltotext.txt` for each Snapshot. Primarily intended to be used for search indexing.
This commit is contained in:
parent
6555719489
commit
310b4d1242
9 changed files with 203 additions and 104 deletions
|
@ -134,6 +134,7 @@ CONFIG_SCHEMA: Dict[str, ConfigDefaultDict] = {
|
||||||
'SAVE_SINGLEFILE': {'type': bool, 'default': True, 'aliases': ('FETCH_SINGLEFILE',)},
|
'SAVE_SINGLEFILE': {'type': bool, 'default': True, 'aliases': ('FETCH_SINGLEFILE',)},
|
||||||
'SAVE_READABILITY': {'type': bool, 'default': True, 'aliases': ('FETCH_READABILITY',)},
|
'SAVE_READABILITY': {'type': bool, 'default': True, 'aliases': ('FETCH_READABILITY',)},
|
||||||
'SAVE_MERCURY': {'type': bool, 'default': True, 'aliases': ('FETCH_MERCURY',)},
|
'SAVE_MERCURY': {'type': bool, 'default': True, 'aliases': ('FETCH_MERCURY',)},
|
||||||
|
'SAVE_HTMLTOTEXT': {'type': bool, 'default': True, 'aliases': ('FETCH_HTMLTOTEXT',)},
|
||||||
'SAVE_PDF': {'type': bool, 'default': True, 'aliases': ('FETCH_PDF',)},
|
'SAVE_PDF': {'type': bool, 'default': True, 'aliases': ('FETCH_PDF',)},
|
||||||
'SAVE_SCREENSHOT': {'type': bool, 'default': True, 'aliases': ('FETCH_SCREENSHOT',)},
|
'SAVE_SCREENSHOT': {'type': bool, 'default': True, 'aliases': ('FETCH_SCREENSHOT',)},
|
||||||
'SAVE_DOM': {'type': bool, 'default': True, 'aliases': ('FETCH_DOM',)},
|
'SAVE_DOM': {'type': bool, 'default': True, 'aliases': ('FETCH_DOM',)},
|
||||||
|
|
18
archivebox/core/migrations/0022_auto_20231023_2008.py
Normal file
18
archivebox/core/migrations/0022_auto_20231023_2008.py
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
# Generated by Django 3.1.14 on 2023-10-23 20:08
|
||||||
|
|
||||||
|
from django.db import migrations, models
|
||||||
|
|
||||||
|
|
||||||
|
class Migration(migrations.Migration):
|
||||||
|
|
||||||
|
dependencies = [
|
||||||
|
('core', '0021_auto_20220914_0934'),
|
||||||
|
]
|
||||||
|
|
||||||
|
operations = [
|
||||||
|
migrations.AlterField(
|
||||||
|
model_name='archiveresult',
|
||||||
|
name='extractor',
|
||||||
|
field=models.CharField(choices=[('favicon', 'favicon'), ('headers', 'headers'), ('singlefile', 'singlefile'), ('pdf', 'pdf'), ('screenshot', 'screenshot'), ('dom', 'dom'), ('wget', 'wget'), ('title', 'title'), ('readability', 'readability'), ('mercury', 'mercury'), ('htmltotext', 'htmltotext'), ('git', 'git'), ('media', 'media'), ('archive_org', 'archive_org')], max_length=32),
|
||||||
|
),
|
||||||
|
]
|
|
@ -33,6 +33,7 @@ from .wget import should_save_wget, save_wget
|
||||||
from .singlefile import should_save_singlefile, save_singlefile
|
from .singlefile import should_save_singlefile, save_singlefile
|
||||||
from .readability import should_save_readability, save_readability
|
from .readability import should_save_readability, save_readability
|
||||||
from .mercury import should_save_mercury, save_mercury
|
from .mercury import should_save_mercury, save_mercury
|
||||||
|
from .htmltotext import should_save_htmltotext, save_htmltotext
|
||||||
from .pdf import should_save_pdf, save_pdf
|
from .pdf import should_save_pdf, save_pdf
|
||||||
from .screenshot import should_save_screenshot, save_screenshot
|
from .screenshot import should_save_screenshot, save_screenshot
|
||||||
from .dom import should_save_dom, save_dom
|
from .dom import should_save_dom, save_dom
|
||||||
|
@ -51,15 +52,24 @@ def get_default_archive_methods():
|
||||||
('screenshot', should_save_screenshot, save_screenshot),
|
('screenshot', should_save_screenshot, save_screenshot),
|
||||||
('dom', should_save_dom, save_dom),
|
('dom', should_save_dom, save_dom),
|
||||||
('wget', should_save_wget, save_wget),
|
('wget', should_save_wget, save_wget),
|
||||||
('title', should_save_title, save_title), # keep title and readability below wget and singlefile, as it depends on them
|
# keep title, readability, and htmltotext below wget and singlefile, as they depend on them
|
||||||
|
('title', should_save_title, save_title),
|
||||||
('readability', should_save_readability, save_readability),
|
('readability', should_save_readability, save_readability),
|
||||||
('mercury', should_save_mercury, save_mercury),
|
('mercury', should_save_mercury, save_mercury),
|
||||||
|
('htmltotext', should_save_htmltotext, save_htmltotext),
|
||||||
('git', should_save_git, save_git),
|
('git', should_save_git, save_git),
|
||||||
('media', should_save_media, save_media),
|
('media', should_save_media, save_media),
|
||||||
('archive_org', should_save_archive_dot_org, save_archive_dot_org),
|
('archive_org', should_save_archive_dot_org, save_archive_dot_org),
|
||||||
]
|
]
|
||||||
|
|
||||||
ARCHIVE_METHODS_INDEXING_PRECEDENCE = [('readability', 1), ('singlefile', 2), ('dom', 3), ('wget', 4)]
|
ARCHIVE_METHODS_INDEXING_PRECEDENCE = [
|
||||||
|
('readability', 1),
|
||||||
|
('mercury', 2),
|
||||||
|
('htmltotext', 3),
|
||||||
|
('singlefile', 4),
|
||||||
|
('dom', 5),
|
||||||
|
('wget', 6)
|
||||||
|
]
|
||||||
|
|
||||||
@enforce_types
|
@enforce_types
|
||||||
def ignore_methods(to_ignore: List[str]):
|
def ignore_methods(to_ignore: List[str]):
|
||||||
|
|
154
archivebox/extractors/htmltotext.py
Normal file
154
archivebox/extractors/htmltotext.py
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
__package__ = 'archivebox.extractors'
|
||||||
|
|
||||||
|
from html.parser import HTMLParser
|
||||||
|
import io
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from ..config import (
|
||||||
|
SAVE_HTMLTOTEXT,
|
||||||
|
TIMEOUT,
|
||||||
|
VERSION,
|
||||||
|
)
|
||||||
|
from ..index.schema import Link, ArchiveResult, ArchiveError
|
||||||
|
from ..logging_util import TimedProgress
|
||||||
|
from ..system import atomic_write
|
||||||
|
from ..util import (
|
||||||
|
enforce_types,
|
||||||
|
is_static_file,
|
||||||
|
)
|
||||||
|
from .title import get_html
|
||||||
|
|
||||||
|
class HTMLTextExtractor(HTMLParser):
|
||||||
|
TEXT_ATTRS = [
|
||||||
|
"alt", "cite", "href", "label",
|
||||||
|
"list", "placeholder", "title", "value"
|
||||||
|
]
|
||||||
|
NOTEXT_TAGS = ["script", "style", "template"]
|
||||||
|
NOTEXT_HREF = ["data:", "javascript:", "#"]
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.output = io.StringIO()
|
||||||
|
self._tag_stack = []
|
||||||
|
|
||||||
|
def _is_text_attr(self, name, value):
|
||||||
|
if not isinstance(value, str):
|
||||||
|
return False
|
||||||
|
if name == "href" and any(map(lambda p: value.startswith(p), self.NOTEXT_HREF)):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if name in self.TEXT_ATTRS:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _parent_tag(self):
|
||||||
|
try:
|
||||||
|
return self._tag_stack[-1]
|
||||||
|
except IndexError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _in_notext_tag(self):
|
||||||
|
return any([t in self._tag_stack for t in self.NOTEXT_TAGS])
|
||||||
|
|
||||||
|
def handle_starttag(self, tag, attrs):
|
||||||
|
self._tag_stack.append(tag)
|
||||||
|
|
||||||
|
# Don't write out attribute values if any ancestor
|
||||||
|
# is in NOTEXT_TAGS
|
||||||
|
if self._in_notext_tag():
|
||||||
|
return
|
||||||
|
|
||||||
|
for name, value in attrs:
|
||||||
|
if self._is_text_attr(name, value):
|
||||||
|
self.output.write(f"({value.strip()}) ")
|
||||||
|
|
||||||
|
def handle_endtag(self, tag):
|
||||||
|
orig_stack = self._tag_stack.copy()
|
||||||
|
try:
|
||||||
|
# Keep popping tags until we find the nearest
|
||||||
|
# ancestor matching this end tag
|
||||||
|
while tag != self._tag_stack.pop():
|
||||||
|
pass
|
||||||
|
# Write a space after every tag, to ensure that tokens
|
||||||
|
# in tag text aren't concatenated. This may result in
|
||||||
|
# excess spaces, which should be ignored by search tokenizers.
|
||||||
|
if not self._in_notext_tag() and tag not in self.NOTEXT_TAGS:
|
||||||
|
self.output.write(" ")
|
||||||
|
except IndexError:
|
||||||
|
# Got to the top of the stack, but somehow missed
|
||||||
|
# this end tag -- maybe malformed markup -- restore the
|
||||||
|
# stack
|
||||||
|
self._tag_stack = orig_stack
|
||||||
|
|
||||||
|
def handle_data(self, data):
|
||||||
|
# Don't output text data if any ancestor is in NOTEXT_TAGS
|
||||||
|
if self._in_notext_tag():
|
||||||
|
return
|
||||||
|
|
||||||
|
data = data.lstrip()
|
||||||
|
len_before_rstrip = len(data)
|
||||||
|
data = data.rstrip()
|
||||||
|
spaces_rstripped = len_before_rstrip - len(data)
|
||||||
|
if data:
|
||||||
|
self.output.write(data)
|
||||||
|
if spaces_rstripped:
|
||||||
|
# Add back a single space if 1 or more
|
||||||
|
# whitespace characters were stripped
|
||||||
|
self.output.write(' ')
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.output.getvalue()
|
||||||
|
|
||||||
|
|
||||||
|
@enforce_types
|
||||||
|
def should_save_htmltotext(link: Link, out_dir: Optional[Path]=None, overwrite: Optional[bool]=False) -> bool:
|
||||||
|
if is_static_file(link.url):
|
||||||
|
return False
|
||||||
|
|
||||||
|
out_dir = out_dir or Path(link.link_dir)
|
||||||
|
if not overwrite and (out_dir / 'htmltotext.txt').exists():
|
||||||
|
return False
|
||||||
|
|
||||||
|
return SAVE_HTMLTOTEXT
|
||||||
|
|
||||||
|
|
||||||
|
@enforce_types
|
||||||
|
def save_htmltotext(link: Link, out_dir: Optional[Path]=None, timeout: int=TIMEOUT) -> ArchiveResult:
|
||||||
|
"""extract search-indexing-friendly text from an HTML document"""
|
||||||
|
|
||||||
|
out_dir = Path(out_dir or link.link_dir)
|
||||||
|
output = "htmltotext.txt"
|
||||||
|
|
||||||
|
timer = TimedProgress(timeout, prefix=' ')
|
||||||
|
extracted_text = None
|
||||||
|
try:
|
||||||
|
extractor = HTMLTextExtractor()
|
||||||
|
document = get_html(link, out_dir)
|
||||||
|
|
||||||
|
if not document:
|
||||||
|
raise ArchiveError('htmltotext could not find HTML to parse for article text')
|
||||||
|
|
||||||
|
extractor.feed(document)
|
||||||
|
extractor.close()
|
||||||
|
extracted_text = str(extractor)
|
||||||
|
|
||||||
|
atomic_write(str(out_dir / output), extracted_text)
|
||||||
|
except (Exception, OSError) as err:
|
||||||
|
status = 'failed'
|
||||||
|
output = err
|
||||||
|
cmd = ['(internal) archivebox.extractors.htmltotext', './{singlefile,dom}.html']
|
||||||
|
finally:
|
||||||
|
timer.end()
|
||||||
|
|
||||||
|
return ArchiveResult(
|
||||||
|
cmd=cmd,
|
||||||
|
pwd=str(out_dir),
|
||||||
|
cmd_version=VERSION,
|
||||||
|
output=output,
|
||||||
|
status=status,
|
||||||
|
index_texts=[extracted_text] if extracted_text else [],
|
||||||
|
**timer.stats,
|
||||||
|
)
|
|
@ -143,7 +143,7 @@ def snapshot_icons(snapshot) -> str:
|
||||||
"mercury": "🅼",
|
"mercury": "🅼",
|
||||||
"warc": "📦"
|
"warc": "📦"
|
||||||
}
|
}
|
||||||
exclude = ["favicon", "title", "headers", "archive_org"]
|
exclude = ["favicon", "title", "headers", "htmltotext", "archive_org"]
|
||||||
# Missing specific entry for WARC
|
# Missing specific entry for WARC
|
||||||
|
|
||||||
extractor_outputs = defaultdict(lambda: None)
|
extractor_outputs = defaultdict(lambda: None)
|
||||||
|
|
|
@ -429,6 +429,7 @@ class Link:
|
||||||
'singlefile_path': 'singlefile.html',
|
'singlefile_path': 'singlefile.html',
|
||||||
'readability_path': 'readability/content.html',
|
'readability_path': 'readability/content.html',
|
||||||
'mercury_path': 'mercury/content.html',
|
'mercury_path': 'mercury/content.html',
|
||||||
|
'htmltotext_path': 'htmltotext.txt',
|
||||||
'pdf_path': 'output.pdf',
|
'pdf_path': 'output.pdf',
|
||||||
'screenshot_path': 'screenshot.png',
|
'screenshot_path': 'screenshot.png',
|
||||||
'dom_path': 'output.html',
|
'dom_path': 'output.html',
|
||||||
|
@ -452,6 +453,7 @@ class Link:
|
||||||
'singlefile_path': static_path,
|
'singlefile_path': static_path,
|
||||||
'readability_path': static_path,
|
'readability_path': static_path,
|
||||||
'mercury_path': static_path,
|
'mercury_path': static_path,
|
||||||
|
'htmltotext_path': static_path,
|
||||||
})
|
})
|
||||||
return canonical
|
return canonical
|
||||||
|
|
||||||
|
|
|
@ -1,117 +1,23 @@
|
||||||
from html.parser import HTMLParser
|
|
||||||
import io
|
|
||||||
|
|
||||||
from django.db.models import QuerySet
|
from django.db.models import QuerySet
|
||||||
|
|
||||||
from archivebox.util import enforce_types
|
from archivebox.util import enforce_types
|
||||||
from archivebox.config import ANSI, SEARCH_PROCESS_HTML
|
from archivebox.config import ANSI
|
||||||
|
|
||||||
BLOCK_SIZE = 32768
|
|
||||||
|
|
||||||
def log_index_started(url):
|
def log_index_started(url):
|
||||||
print('{green}[*] Indexing url: {} in the search index {reset}'.format(url, **ANSI))
|
print('{green}[*] Indexing url: {} in the search index {reset}'.format(url, **ANSI))
|
||||||
print( )
|
print( )
|
||||||
|
|
||||||
|
def get_file_result_content(res, extra_path, use_pwd=False):
|
||||||
class HTMLTextExtractor(HTMLParser):
|
|
||||||
|
|
||||||
TEXT_ATTRS = ["alt", "cite", "href", "label", "list", "placeholder", "title", "value"]
|
|
||||||
NOTEXT_TAGS = ["script", "style", "template"]
|
|
||||||
NOTEXT_HREF = ["data:", "javascript:", "#"]
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.output = io.StringIO()
|
|
||||||
self._tag_stack = []
|
|
||||||
|
|
||||||
def _is_text_attr(self, name, value):
|
|
||||||
if not isinstance(value, str):
|
|
||||||
return False
|
|
||||||
if name == "href" and any(map(lambda p: value.startswith(p), self.NOTEXT_HREF)):
|
|
||||||
return False
|
|
||||||
|
|
||||||
if name in self.TEXT_ATTRS:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _parent_tag(self):
|
|
||||||
try:
|
|
||||||
return self._tag_stack[-1]
|
|
||||||
except IndexError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _in_notext_tag(self):
|
|
||||||
return any([t in self._tag_stack for t in self.NOTEXT_TAGS])
|
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
|
||||||
self._tag_stack.append(tag)
|
|
||||||
|
|
||||||
# Don't write out attribute values if any ancestor
|
|
||||||
# is in NOTEXT_TAGS
|
|
||||||
if self._in_notext_tag():
|
|
||||||
return
|
|
||||||
|
|
||||||
for name, value in attrs:
|
|
||||||
if self._is_text_attr(name, value):
|
|
||||||
self.output.write(value.strip())
|
|
||||||
self.output.write(" ")
|
|
||||||
|
|
||||||
def handle_endtag(self, tag):
|
|
||||||
orig_stack = self._tag_stack.copy()
|
|
||||||
try:
|
|
||||||
# Keep popping tags until we find the nearest
|
|
||||||
# ancestor matching this end tag
|
|
||||||
while tag != self._tag_stack.pop():
|
|
||||||
pass
|
|
||||||
# Write a space after every tag, to ensure that tokens
|
|
||||||
# in tag text aren't concatenated. This may result in
|
|
||||||
# excess spaces, which should be ignored by search tokenizers.
|
|
||||||
if not self._in_notext_tag() and tag not in self.NOTEXT_TAGS:
|
|
||||||
self.output.write(" ")
|
|
||||||
except IndexError:
|
|
||||||
# Got to the top of the stack, but somehow missed
|
|
||||||
# this end tag -- maybe malformed markup -- restore the
|
|
||||||
# stack
|
|
||||||
self._tag_stack = orig_stack
|
|
||||||
|
|
||||||
def handle_data(self, data):
|
|
||||||
# Don't output text data if any ancestor is in NOTEXT_TAGS
|
|
||||||
if self._in_notext_tag():
|
|
||||||
return
|
|
||||||
|
|
||||||
self.output.write(data)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return self.output.getvalue()
|
|
||||||
|
|
||||||
|
|
||||||
def _read_all(file: io.TextIOBase) -> str:
|
|
||||||
return file.read()
|
|
||||||
|
|
||||||
|
|
||||||
def _extract_html_text(file: io.TextIOBase) -> str:
|
|
||||||
extractor = HTMLTextExtractor()
|
|
||||||
while (block := file.read(BLOCK_SIZE)):
|
|
||||||
extractor.feed(block)
|
|
||||||
else:
|
|
||||||
extractor.close()
|
|
||||||
|
|
||||||
return str(extractor)
|
|
||||||
|
|
||||||
|
|
||||||
def get_file_result_content(res, extra_path, use_pwd=False, *, filter=_read_all):
|
|
||||||
if use_pwd:
|
if use_pwd:
|
||||||
fpath = f'{res.pwd}/{res.output}'
|
fpath = f'{res.pwd}/{res.output}'
|
||||||
else:
|
else:
|
||||||
fpath = f'{res.output}'
|
fpath = f'{res.output}'
|
||||||
|
|
||||||
if extra_path:
|
if extra_path:
|
||||||
fpath = f'{fpath}/{extra_path}'
|
fpath = f'{fpath}/{extra_path}'
|
||||||
|
|
||||||
with open(fpath, 'r', encoding='utf-8', errors='replace') as file:
|
with open(fpath, 'r', encoding='utf-8') as file:
|
||||||
data = filter(file)
|
data = file.read()
|
||||||
if data:
|
if data:
|
||||||
return [data]
|
return [data]
|
||||||
return []
|
return []
|
||||||
|
@ -132,8 +38,7 @@ def get_indexable_content(results: QuerySet):
|
||||||
if method == 'readability':
|
if method == 'readability':
|
||||||
return get_file_result_content(res, 'content.txt', use_pwd=True)
|
return get_file_result_content(res, 'content.txt', use_pwd=True)
|
||||||
elif method == 'singlefile':
|
elif method == 'singlefile':
|
||||||
filter = _extract_html_text if SEARCH_PROCESS_HTML else _read_all
|
return get_file_result_content(res, '', use_pwd=True)
|
||||||
return get_file_result_content(res, '', use_pwd=True, filter=filter)
|
|
||||||
elif method == 'dom':
|
elif method == 'dom':
|
||||||
return get_file_result_content(res, '', use_pwd=True)
|
return get_file_result_content(res, '', use_pwd=True)
|
||||||
elif method == 'wget':
|
elif method == 'wget':
|
||||||
|
|
|
@ -17,6 +17,7 @@ def disable_extractors_dict():
|
||||||
"USE_SINGLEFILE": "false",
|
"USE_SINGLEFILE": "false",
|
||||||
"USE_READABILITY": "false",
|
"USE_READABILITY": "false",
|
||||||
"USE_MERCURY": "false",
|
"USE_MERCURY": "false",
|
||||||
|
"SAVE_HTMLTOTEXT": "false",
|
||||||
"SAVE_PDF": "false",
|
"SAVE_PDF": "false",
|
||||||
"SAVE_SCREENSHOT": "false",
|
"SAVE_SCREENSHOT": "false",
|
||||||
"SAVE_DOM": "false",
|
"SAVE_DOM": "false",
|
||||||
|
|
|
@ -39,6 +39,14 @@ def test_mercury_works(tmp_path, process, disable_extractors_dict):
|
||||||
output_file = archived_item_path / "mercury" / "content.html"
|
output_file = archived_item_path / "mercury" / "content.html"
|
||||||
assert output_file.exists()
|
assert output_file.exists()
|
||||||
|
|
||||||
|
def test_htmltotext_works(tmp_path, process, disable_extractors_dict):
|
||||||
|
disable_extractors_dict.update({"SAVE_HTMLTOTEXT": "true"})
|
||||||
|
add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'],
|
||||||
|
capture_output=True, env=disable_extractors_dict)
|
||||||
|
archived_item_path = list(tmp_path.glob("archive/**/*"))[0]
|
||||||
|
output_file = archived_item_path / "htmltotext.txt"
|
||||||
|
assert output_file.exists()
|
||||||
|
|
||||||
def test_readability_works_with_wget(tmp_path, process, disable_extractors_dict):
|
def test_readability_works_with_wget(tmp_path, process, disable_extractors_dict):
|
||||||
disable_extractors_dict.update({"USE_READABILITY": "true", "USE_WGET": "true"})
|
disable_extractors_dict.update({"USE_READABILITY": "true", "USE_WGET": "true"})
|
||||||
add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'],
|
add_process = subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/example.com.html'],
|
||||||
|
|
Loading…
Reference in a new issue