ArchiveBox/archivebox/search/__init__.py

144 lines
4.6 KiB
Python
Raw Normal View History

2024-10-01 00:25:15 +00:00
__package__ = 'archivebox.search'
2020-11-17 23:42:57 +00:00
from pathlib import Path
from typing import List, Union
2020-11-17 23:42:57 +00:00
from django.db.models import QuerySet
from django.conf import settings
2020-11-17 23:42:57 +00:00
2024-10-28 11:07:35 +00:00
import abx
import archivebox
from archivebox.index.schema import Link
2024-10-01 00:25:15 +00:00
from archivebox.misc.util import enforce_types
2024-09-30 22:59:05 +00:00
from archivebox.misc.logging import stderr
from archivebox.config.common import SEARCH_BACKEND_CONFIG
2020-11-17 23:42:57 +00:00
2024-10-01 00:25:15 +00:00
def log_index_started(url):
print('[green][*] Indexing url: {} in the search index[/]'.format(url))
2024-10-01 00:25:15 +00:00
print( )
def get_file_result_content(res, extra_path, use_pwd=False):
if use_pwd:
fpath = f'{res.pwd}/{res.output}'
else:
fpath = f'{res.output}'
if extra_path:
fpath = f'{fpath}/{extra_path}'
with open(fpath, 'r', encoding='utf-8') as file:
data = file.read()
if data:
return [data]
return []
# This should be abstracted by a plugin interface for extractors
@enforce_types
def get_indexable_content(results: QuerySet):
if not results:
return []
# Only use the first method available
res, method = results.first(), results.first().extractor
if method not in ('readability', 'singlefile', 'dom', 'wget'):
return []
# This should come from a plugin interface
# TODO: banish this duplication and get these from the extractor file
if method == 'readability':
return get_file_result_content(res, 'content.txt', use_pwd=True)
elif method == 'singlefile':
return get_file_result_content(res, '', use_pwd=True)
elif method == 'dom':
return get_file_result_content(res, '', use_pwd=True)
elif method == 'wget':
return get_file_result_content(res, '', use_pwd=True)
2020-11-17 23:42:57 +00:00
def import_backend():
2024-10-28 11:07:35 +00:00
for backend in abx.as_dict(archivebox.pm.hook.get_SEARCHBACKENDS()).values():
if backend.name == SEARCH_BACKEND_CONFIG.SEARCH_BACKEND_ENGINE:
return backend
raise Exception(f'Could not load {SEARCH_BACKEND_CONFIG.SEARCH_BACKEND_ENGINE} as search backend')
2020-11-17 23:42:57 +00:00
@enforce_types
def write_search_index(link: Link, texts: Union[List[str], None]=None, out_dir: Path=settings.DATA_DIR, skip_text_index: bool=False) -> None:
if not SEARCH_BACKEND_CONFIG.USE_INDEXING_BACKEND:
return
2020-11-17 23:42:57 +00:00
if not skip_text_index and texts:
from core.models import Snapshot
2020-11-17 23:42:57 +00:00
snap = Snapshot.objects.filter(url=link.url).first()
backend = import_backend()
2020-11-17 23:42:57 +00:00
if snap:
try:
backend.index(snapshot_id=str(snap.pk), texts=texts)
except Exception as err:
stderr()
stderr(
f'[X] The search backend threw an exception={err}:',
color='red',
)
@enforce_types
def query_search_index(query: str, out_dir: Path=settings.DATA_DIR) -> QuerySet:
from core.models import Snapshot
if SEARCH_BACKEND_CONFIG.USE_SEARCHING_BACKEND:
backend = import_backend()
try:
snapshot_pks = backend.search(query)
except Exception as err:
stderr()
stderr(
f'[X] The search backend threw an exception={err}:',
color='red',
)
raise
else:
# TODO preserve ordering from backend
qsearch = Snapshot.objects.filter(pk__in=snapshot_pks)
return qsearch
return Snapshot.objects.none()
@enforce_types
def flush_search_index(snapshots: QuerySet):
if not SEARCH_BACKEND_CONFIG.USE_INDEXING_BACKEND or not snapshots:
return
backend = import_backend()
snapshot_pks = (str(pk) for pk in snapshots.values_list('pk', flat=True))
try:
backend.flush(snapshot_pks)
except Exception as err:
stderr()
stderr(
f'[X] The search backend threw an exception={err}:',
color='red',
)
@enforce_types
def index_links(links: Union[List[Link],None], out_dir: Path=settings.DATA_DIR):
if not links:
return
from core.models import Snapshot, ArchiveResult
for link in links:
snap = Snapshot.objects.filter(url=link.url).first()
if snap:
results = ArchiveResult.objects.indexable().filter(snapshot=snap)
2020-11-23 22:23:26 +00:00
log_index_started(link.url)
try:
texts = get_indexable_content(results)
except Exception as err:
stderr()
stderr(
f'[X] An Exception ocurred reading the indexable content={err}:',
color='red',
)
else:
2020-12-11 14:51:11 +00:00
write_search_index(link, texts, out_dir=out_dir)