From 2291f021479b6b6d64fab0da9cd1c74dff6a59f2 Mon Sep 17 00:00:00 2001 From: Nick Sweeting Date: Sat, 16 Nov 2024 02:48:17 -0800 Subject: [PATCH] setup seed model --- archivebox/seeds/models.py | 66 +++++++++++++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 8 deletions(-) diff --git a/archivebox/seeds/models.py b/archivebox/seeds/models.py index 7fe49c83..ce96c913 100644 --- a/archivebox/seeds/models.py +++ b/archivebox/seeds/models.py @@ -1,11 +1,19 @@ __package__ = 'archivebox.seeds' +from typing import TYPE_CHECKING +from pathlib import Path from django.db import models +from django.db.models import QuerySet from django.conf import settings +from django.urls import reverse_lazy +from archivebox.config import CONSTANTS +from abid_utils.models import ABIDModel, ABIDField, AutoDateTimeField, ModelWithHealthStats, get_or_create_system_user_pk -from abid_utils.models import ABIDModel, ABIDField, AutoDateTimeField, ModelWithHealthStats +if TYPE_CHECKING: + from crawls.models import Crawl, CrawlSchedule + from core.models import Snapshot class Seed(ABIDModel, ModelWithHealthStats): @@ -40,15 +48,42 @@ class Seed(ABIDModel, ModelWithHealthStats): abid = ABIDField(prefix=abid_prefix) uri = models.URLField(max_length=2000, blank=False, null=False) # unique source location where URLs will be loaded from + label = models.CharField(max_length=255, null=False, blank=True, default='', help_text='A human-readable label for this seed') + notes = models.TextField(blank=True, null=False, default='', help_text='Any extra notes this seed should have') - extractor = models.CharField(default='auto', max_length=32) # suggested extractor to use to load this URL source - tags_str = models.CharField(max_length=255, null=False, blank=True, default='') # tags to attach to any URLs that come from this source - config = models.JSONField(default=dict) # extra config to put in scope when loading URLs from this source + extractor = models.CharField(default='auto', max_length=32, help_text='The parser / extractor to use to load URLs from this source (default: auto)') + tags_str = models.CharField(max_length=255, null=False, blank=True, default='', help_text='An optional comma-separated list of tags to attach to any URLs that come from this source') + config = models.JSONField(default=dict, help_text='An optional JSON object containing extra config to put in scope when loading URLs from this source') created_at = AutoDateTimeField(default=None, null=False, db_index=True) modified_at = models.DateTimeField(auto_now=True) created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, default=None, null=False) + + crawl_set: models.Manager['Crawl'] + + class Meta: + verbose_name = 'Seed' + verbose_name_plural = 'Seeds' + + unique_together = (('created_by', 'uri', 'extractor'),) + + + @classmethod + def from_file(cls, source_file: Path, label: str='', parser: str='auto', tag: str='', created_by: int|None=None, config: dict|None=None): + source_path = str(source_file.resolve()).replace(str(CONSTANTS.DATA_DIR), '/data') + + seed, _ = cls.objects.get_or_create( + label=label or source_file.name, + uri=f'file://{source_path}', + created_by_id=getattr(created_by, 'pk', created_by) or get_or_create_system_user_pk(), + extractor=parser, + tags_str=tag, + config=config or {}, + ) + seed.save() + return seed + @property def source_type(self): # e.g. http/https:// @@ -58,8 +93,23 @@ class Seed(ABIDModel, ModelWithHealthStats): # etc.. return self.uri.split('://', 1)[0].lower() - class Meta: - verbose_name = 'Seed' - verbose_name_plural = 'Seeds' + @property + def api_url(self) -> str: + # /api/v1/core/seed/{uulid} + return reverse_lazy('api-1:get_seed', args=[self.abid]) # + f'?api_key={get_or_create_api_token(request.user)}' + + @property + def api_docs_url(self) -> str: + return '/api/v1/docs#/Core%20Models/api_v1_core_get_seed' + + @property + def scheduled_crawl_set(self) -> QuerySet['CrawlSchedule']: + from crawls.models import CrawlSchedule + return CrawlSchedule.objects.filter(template__seed_id=self.pk) + + @property + def snapshot_set(self) -> QuerySet['Snapshot']: + from core.models import Snapshot - unique_together = (('created_by', 'uri', 'extractor'),) + crawl_ids = self.crawl_set.values_list('pk', flat=True) + return Snapshot.objects.filter(crawl_id__in=crawl_ids)