mirror of
https://github.com/ArchiveBox/ArchiveBox
synced 2024-11-23 12:43:10 +00:00
Merge pull request #507 from ehainry/master
Add parser for Wallabag Atom feeds
This commit is contained in:
commit
494af5f2e1
2 changed files with 59 additions and 0 deletions
|
@ -33,6 +33,7 @@ from ..logging_util import TimedProgress, log_source_saved
|
|||
|
||||
from .pocket_html import parse_pocket_html_export
|
||||
from .pinboard_rss import parse_pinboard_rss_export
|
||||
from .wallabag_atom import parse_wallabag_atom_export
|
||||
from .shaarli_rss import parse_shaarli_rss_export
|
||||
from .medium_rss import parse_medium_rss_export
|
||||
from .netscape_html import parse_netscape_html_export
|
||||
|
@ -43,6 +44,7 @@ from .generic_txt import parse_generic_txt_export
|
|||
|
||||
PARSERS = (
|
||||
# Specialized parsers
|
||||
('Wallabag ATOM', parse_wallabag_atom_export),
|
||||
('Pocket HTML', parse_pocket_html_export),
|
||||
('Pinboard RSS', parse_pinboard_rss_export),
|
||||
('Shaarli RSS', parse_shaarli_rss_export),
|
||||
|
|
57
archivebox/parsers/wallabag_atom.py
Normal file
57
archivebox/parsers/wallabag_atom.py
Normal file
|
@ -0,0 +1,57 @@
|
|||
__package__ = 'archivebox.parsers'
|
||||
|
||||
|
||||
from typing import IO, Iterable
|
||||
from datetime import datetime
|
||||
|
||||
from ..index.schema import Link
|
||||
from ..util import (
|
||||
htmldecode,
|
||||
enforce_types,
|
||||
str_between,
|
||||
)
|
||||
|
||||
|
||||
@enforce_types
|
||||
def parse_wallabag_atom_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
|
||||
"""Parse Wallabag Atom files into links"""
|
||||
|
||||
rss_file.seek(0)
|
||||
entries = rss_file.read().split('<entry>')[1:]
|
||||
for entry in entries:
|
||||
# example entry:
|
||||
# <entry>
|
||||
# <title><![CDATA[Orient Ray vs Mako: Is There Much Difference? - iknowwatches.com]]></title>
|
||||
# <link rel="alternate" type="text/html"
|
||||
# href="http://wallabag.drycat.fr/view/14041"/>
|
||||
# <link rel="via">https://iknowwatches.com/orient-ray-vs-mako/</link>
|
||||
# <id>wallabag:wallabag.drycat.fr:milosh:entry:14041</id>
|
||||
# <updated>2020-10-18T09:14:02+02:00</updated>
|
||||
# <published>2020-10-18T09:13:56+02:00</published>
|
||||
# <category term="montres" label="montres" />
|
||||
# <content type="html" xml:lang="en">
|
||||
# </entry>
|
||||
|
||||
trailing_removed = entry.split('</entry>', 1)[0]
|
||||
leading_removed = trailing_removed.strip()
|
||||
rows = leading_removed.split('\n')
|
||||
|
||||
def get_row(key):
|
||||
return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]
|
||||
|
||||
title = str_between(get_row('title'), '<title><![CDATA[', ']]></title>').strip()
|
||||
url = str_between(get_row('link rel="via"'), '<link rel="via">', '</link>')
|
||||
ts_str = str_between(get_row('published'), '<published>', '</published>')
|
||||
time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z")
|
||||
try:
|
||||
tags = str_between(get_row('category'), 'label="', '" />')
|
||||
except:
|
||||
tags = None
|
||||
|
||||
yield Link(
|
||||
url=htmldecode(url),
|
||||
timestamp=str(time.timestamp()),
|
||||
title=htmldecode(title) or None,
|
||||
tags=tags or '',
|
||||
sources=[rss_file.name],
|
||||
)
|
Loading…
Reference in a new issue