__package__ = 'archivebox.parsers'
from typing import IO, Iterable
from datetime import datetime
from ..index.schema import Link
from ..util import (
htmldecode,
enforce_types,
str_between,
)
@enforce_types
def parse_shaarli_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse Shaarli-specific RSS XML-format files into links"""
rss_file.seek(0)
entries = rss_file.read().split('')[1:]
for entry in entries:
# example entry:
#
# Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online
#
# https://demo.shaarli.org/?cEV4vw
# 2019-01-30T06:06:01+00:00
# 2019-01-30T06:06:01+00:00
# — Permalink
]]>
#
trailing_removed = entry.split('', 1)[0]
leading_removed = trailing_removed.strip()
rows = leading_removed.split('\n')
def get_row(key):
return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]
title = str_between(get_row('title'), '
', '').strip()
url = str_between(get_row('link'), '')
ts_str = str_between(get_row('published'), '', '')
time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z")
yield Link(
url=htmldecode(url),
timestamp=str(time.timestamp()),
title=htmldecode(title) or None,
tags=None,
sources=[rss_file.name],
)
KEY = 'shaarli_rss'
NAME = 'Shaarli RSS'
PARSER = parse_shaarli_rss_export