searxng/searx/engines/loc.py

98 lines
2.5 KiB
Python
Raw Normal View History

2021-02-07 21:10:20 +00:00
# SPDX-License-Identifier: AGPL-3.0-or-later
2023-09-11 06:22:32 +00:00
"""Library of Congress: query Photo, Print and Drawing from API endpoint_
``photos``.
.. _endpoint: https://www.loc.gov/apis/json-and-yaml/requests/endpoints/
.. note::
2021-02-07 21:10:20 +00:00
2023-09-11 06:22:32 +00:00
Beside the ``photos`` endpoint_ there are more endpoints available / we are
looking forward for contributions implementing more endpoints.
2021-02-07 21:10:20 +00:00
"""
from urllib.parse import urlencode
2023-09-11 06:22:32 +00:00
from searx.network import raise_for_httperror
2021-02-07 21:10:20 +00:00
about = {
"website": 'https://www.loc.gov/pictures/',
"wikidata_id": 'Q131454',
2023-09-11 06:22:32 +00:00
"official_api_documentation": 'https://www.loc.gov/api',
2021-02-07 21:10:20 +00:00
"use_official_api": True,
"require_api_key": False,
"results": 'JSON',
}
categories = ['images']
paging = True
2023-09-11 06:22:32 +00:00
endpoint = 'photos'
base_url = 'https://www.loc.gov'
2023-09-11 06:22:32 +00:00
search_string = "/{endpoint}/?sp={page}&{query}&fo=json"
2021-02-07 21:10:20 +00:00
def request(query, params):
2023-09-11 06:22:32 +00:00
search_path = search_string.format(
endpoint=endpoint,
query=urlencode({'q': query}),
page=params['pageno'],
)
2021-02-07 21:10:20 +00:00
params['url'] = base_url + search_path
2023-09-11 06:22:32 +00:00
params['raise_for_httperror'] = False
2021-02-07 21:10:20 +00:00
return params
def response(resp):
2023-09-11 06:22:32 +00:00
2021-02-07 21:10:20 +00:00
results = []
2023-09-11 06:22:32 +00:00
json_data = resp.json()
2021-02-07 21:10:20 +00:00
2023-09-11 06:22:32 +00:00
json_results = json_data.get('results')
if not json_results:
# when a search term has none results, loc sends a JSON in a HTTP 404
# response and the HTTP status code is set in the 'status' element.
if json_data.get('status') == 404:
return results
raise_for_httperror(resp)
for result in json_results:
url = result["item"].get("link")
if not url:
continue
img_list = result.get('image_url')
if not img_list:
2023-09-11 06:22:32 +00:00
continue
title = result['title']
if title.startswith('['):
title = title.strip('[]')
content_items = [
result['item'].get('created_published_date'),
result['item'].get('summary', [None])[0],
result['item'].get('notes', [None])[0],
result['item'].get('part_of', [None])[0],
]
author = None
if result['item'].get('creators'):
author = result['item']['creators'][0]['title']
2021-02-07 21:10:20 +00:00
results.append(
{
'template': 'images.html',
2023-09-11 06:22:32 +00:00
'url': url,
'title': title,
'content': ' / '.join([i for i in content_items if i]),
'img_src': img_list[-1],
'thumbnail_src': img_list[0],
2023-09-11 06:22:32 +00:00
'author': author,
}
)
2021-02-07 21:10:20 +00:00
return results