Add generic_jsonl parser (#1370)

This commit is contained in:
Nick Sweeting 2024-03-14 20:19:21 -07:00 committed by GitHub
commit 0872c84ba7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 168 additions and 53 deletions

View file

@ -44,6 +44,7 @@ from . import medium_rss
from . import netscape_html
from . import generic_rss
from . import generic_json
from . import generic_jsonl
from . import generic_html
from . import generic_txt
from . import url_list
@ -63,6 +64,7 @@ PARSERS = {
netscape_html.KEY: (netscape_html.NAME, netscape_html.PARSER),
generic_rss.KEY: (generic_rss.NAME, generic_rss.PARSER),
generic_json.KEY: (generic_json.NAME, generic_json.PARSER),
generic_jsonl.KEY: (generic_jsonl.NAME, generic_jsonl.PARSER),
generic_html.KEY: (generic_html.NAME, generic_html.PARSER),
# Catchall fallback parser

View file

@ -11,29 +11,12 @@ from ..util import (
enforce_types,
)
@enforce_types
def parse_generic_json_export(json_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse JSON-format bookmarks export files (produced by pinboard.in/export/, or wallabag)"""
json_file.seek(0)
try:
links = json.load(json_file)
except json.decoder.JSONDecodeError:
# sometimes the first line is a comment or other junk, so try without
json_file.seek(0)
first_line = json_file.readline()
#print(' > Trying JSON parser without first line: "', first_line.strip(), '"', sep= '')
links = json.load(json_file)
# we may fail again, which means we really don't know what to do
# This gets used by generic_jsonl, too
def jsonObjectToLink(link: str, source: str):
json_date = lambda s: datetime.strptime(s, '%Y-%m-%dT%H:%M:%S%z')
for link in links:
# example line
# {"href":"http:\/\/www.reddit.com\/r\/example","description":"title here","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:42Z","shared":"no","toread":"no","tags":"reddit android"}]
if link:
# Parse URL
url = link.get('href') or link.get('url') or link.get('URL')
if not url:
@ -75,14 +58,35 @@ def parse_generic_json_export(json_file: IO[str], **_kwargs) -> Iterable[Link]:
if ',' not in tags:
tags = tags.replace(' ', ',')
yield Link(
return Link(
url=htmldecode(url),
timestamp=ts_str,
title=htmldecode(title) or None,
tags=htmldecode(tags),
sources=[json_file.name],
sources=[source],
)
@enforce_types
def parse_generic_json_export(json_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse JSON-format bookmarks export files (produced by pinboard.in/export/, or wallabag)"""
json_file.seek(0)
try:
links = json.load(json_file)
if type(links) != list:
raise Exception('JSON parser expects list of objects, maybe this is JSONL?')
except json.decoder.JSONDecodeError:
# sometimes the first line is a comment or other junk, so try without
json_file.seek(0)
first_line = json_file.readline()
#print(' > Trying JSON parser without first line: "', first_line.strip(), '"', sep= '')
links = json.load(json_file)
# we may fail again, which means we really don't know what to do
for link in links:
if link:
yield jsonObjectToLink(link,json_file.name)
KEY = 'json'
NAME = 'Generic JSON'

View file

@ -0,0 +1,34 @@
__package__ = 'archivebox.parsers'
import json
from typing import IO, Iterable
from datetime import datetime, timezone
from ..index.schema import Link
from ..util import (
htmldecode,
enforce_types,
)
from .generic_json import jsonObjectToLink
def parse_line(line: str):
if line.strip() != "":
return json.loads(line)
@enforce_types
def parse_generic_jsonl_export(json_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse JSONL format bookmarks export files"""
json_file.seek(0)
links = [ parse_line(line) for line in json_file ]
for link in links:
if link:
yield jsonObjectToLink(link,json_file.name)
KEY = 'jsonl'
NAME = 'Generic JSONL'
PARSER = parse_generic_jsonl_export

View file

@ -0,0 +1 @@
{"href":"http://127.0.0.1:8080/static/example.com.html","description":"Example","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:42Z","shared":"no","toread":"no","tags":"Tag1 Tag2","trap":"http://www.example.com/should-not-exist"}

View file

@ -0,0 +1,4 @@
{"href":"http://127.0.0.1:8080/static/example.com.html","description":"Example","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:42Z","shared":"no","toread":"no","tags":"Tag1 Tag2","trap":"http://www.example.com/should-not-exist"}
{"href":"http://127.0.0.1:8080/static/iana.org.html","description":"Example 2","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:43Z","shared":"no","toread":"no","tags":"Tag3,Tag4 with Space"}
{"href":"http://127.0.0.1:8080/static/shift_jis.html","description":"Example 2","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:44Z","shared":"no","toread":"no","tags":["Tag5","Tag6 with Space"]}
{"href":"http://127.0.0.1:8080/static/title_og_with_html","description":"Example 2","extended":"","meta":"18a973f09c9cc0608c116967b64e0419","hash":"910293f019c2f4bb1a749fb937ba58e3","time":"2014-06-14T15:51:45Z","shared":"no","toread":"no"}

View file

@ -216,3 +216,73 @@ def test_atom(tmp_path, process, disable_extractors_dict):
tags = list(map(lambda x: x[0], tags))
assert "Tag1" in tags
assert "Tag2" in tags
def test_jsonl(tmp_path, process, disable_extractors_dict):
with open('../../mock_server/templates/example.jsonl', 'r', encoding='utf-8') as f:
arg_process = subprocess.run(
["archivebox", "add", "--index-only", "--parser=jsonl"],
stdin=f,
capture_output=True,
env=disable_extractors_dict,
)
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
urls = c.execute("SELECT url from core_snapshot").fetchall()
tags = c.execute("SELECT name from core_tag").fetchall()
conn.commit()
conn.close()
urls = list(map(lambda x: x[0], urls))
assert "http://127.0.0.1:8080/static/example.com.html" in urls
assert "http://127.0.0.1:8080/static/iana.org.html" in urls
assert "http://127.0.0.1:8080/static/shift_jis.html" in urls
assert "http://127.0.0.1:8080/static/title_og_with_html" in urls
# if the following URL appears, we must have fallen back to another parser
assert not "http://www.example.com/should-not-exist" in urls
tags = list(map(lambda x: x[0], tags))
assert "Tag1" in tags
assert "Tag2" in tags
assert "Tag3" in tags
assert "Tag4 with Space" in tags
assert "Tag5" in tags
assert "Tag6 with Space" in tags
def test_jsonl_single(tmp_path, process, disable_extractors_dict):
with open('../../mock_server/templates/example-single.jsonl', 'r', encoding='utf-8') as f:
arg_process = subprocess.run(
["archivebox", "add", "--index-only", "--parser=jsonl"],
stdin=f,
capture_output=True,
env=disable_extractors_dict,
)
conn = sqlite3.connect("index.sqlite3")
c = conn.cursor()
urls = c.execute("SELECT url from core_snapshot").fetchall()
tags = c.execute("SELECT name from core_tag").fetchall()
conn.commit()
conn.close()
urls = list(map(lambda x: x[0], urls))
assert "http://127.0.0.1:8080/static/example.com.html" in urls
# if the following URL appears, we must have fallen back to another parser
assert not "http://www.example.com/should-not-exist" in urls
tags = list(map(lambda x: x[0], tags))
assert "Tag1" in tags
assert "Tag2" in tags
# make sure that JSON parser rejects a single line of JSONL which is valid
# JSON but not our expected format
def test_json_single(tmp_path, process, disable_extractors_dict):
with open('../../mock_server/templates/example-single.jsonl', 'r', encoding='utf-8') as f:
arg_process = subprocess.run(
["archivebox", "add", "--index-only", "--parser=json"],
stdin=f,
capture_output=True,
env=disable_extractors_dict,
)
assert 'expects list of objects' in arg_process.stderr.decode("utf-8")