2021-01-13 10:31:25 +00:00
|
|
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
2022-11-05 14:10:52 +00:00
|
|
|
"""
|
|
|
|
DuckDuckGo Instant Answer API
|
|
|
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
|
|
|
|
|
|
The `DDG-API <https://duckduckgo.com/api>`__ is no longer documented but from
|
|
|
|
reverse engineering we can see that some services (e.g. instant answers) still
|
|
|
|
in use from the DDG search engine.
|
|
|
|
|
|
|
|
As far we can say the *instant answers* API does not support languages, or at
|
|
|
|
least we could not find out how language support should work. It seems that
|
|
|
|
most of the features are based on English terms.
|
2021-05-24 12:41:03 +00:00
|
|
|
|
2019-11-29 17:56:29 +00:00
|
|
|
"""
|
|
|
|
|
2022-11-05 14:10:52 +00:00
|
|
|
from typing import TYPE_CHECKING
|
|
|
|
|
2020-12-06 09:14:09 +00:00
|
|
|
from urllib.parse import urlencode, urlparse, urljoin
|
2014-09-28 14:51:41 +00:00
|
|
|
from lxml import html
|
2020-10-26 18:25:28 +00:00
|
|
|
|
|
|
|
from searx.data import WIKIDATA_UNITS
|
2022-11-05 14:10:52 +00:00
|
|
|
from searx.utils import extract_text, html_to_text, get_string_replaces_function
|
2020-10-26 18:25:28 +00:00
|
|
|
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
|
2025-01-27 15:43:43 +00:00
|
|
|
from searx.result_types import EngineResults
|
2020-10-26 18:25:28 +00:00
|
|
|
|
2022-11-05 14:10:52 +00:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
import logging
|
|
|
|
|
|
|
|
logger: logging.Logger
|
|
|
|
|
2021-01-13 10:31:25 +00:00
|
|
|
# about
|
|
|
|
about = {
|
|
|
|
"website": 'https://duckduckgo.com/',
|
|
|
|
"wikidata_id": 'Q12805',
|
|
|
|
"official_api_documentation": 'https://duckduckgo.com/api',
|
|
|
|
"use_official_api": True,
|
|
|
|
"require_api_key": False,
|
|
|
|
"results": 'JSON',
|
|
|
|
}
|
|
|
|
|
2022-08-01 15:01:59 +00:00
|
|
|
send_accept_language_header = True
|
|
|
|
|
2021-12-27 08:26:22 +00:00
|
|
|
URL = 'https://api.duckduckgo.com/' + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
|
2014-12-07 15:36:20 +00:00
|
|
|
|
2021-12-27 08:26:22 +00:00
|
|
|
WIKIDATA_PREFIX = ['http://www.wikidata.org/entity/', 'https://www.wikidata.org/entity/']
|
2020-10-26 18:25:28 +00:00
|
|
|
|
|
|
|
replace_http_by_https = get_string_replaces_function({'http:': 'https:'})
|
|
|
|
|
|
|
|
|
|
|
|
def is_broken_text(text):
|
2022-11-05 14:10:52 +00:00
|
|
|
"""duckduckgo may return something like ``<a href="xxxx">http://somewhere Related website<a/>``
|
2016-04-18 15:52:16 +00:00
|
|
|
|
2020-10-26 18:25:28 +00:00
|
|
|
The href URL is broken, the "Related website" may contains some HTML.
|
2013-10-14 21:54:33 +00:00
|
|
|
|
2020-10-26 18:25:28 +00:00
|
|
|
The best solution seems to ignore these results.
|
|
|
|
"""
|
|
|
|
return text.startswith('http') and ' ' in text
|
|
|
|
|
|
|
|
|
|
|
|
def result_to_text(text, htmlResult):
|
2021-05-24 12:41:03 +00:00
|
|
|
# TODO : remove result ending with "Meaning" or "Category" # pylint: disable=fixme
|
2020-10-26 18:25:28 +00:00
|
|
|
result = None
|
2014-09-28 14:51:41 +00:00
|
|
|
dom = html.fromstring(htmlResult)
|
|
|
|
a = dom.xpath('//a')
|
2014-12-07 15:36:20 +00:00
|
|
|
if len(a) >= 1:
|
2020-10-26 18:25:28 +00:00
|
|
|
result = extract_text(a[0])
|
2014-09-28 14:51:41 +00:00
|
|
|
else:
|
2020-10-26 18:25:28 +00:00
|
|
|
result = text
|
|
|
|
if not is_broken_text(result):
|
|
|
|
return result
|
|
|
|
return None
|
2014-09-28 14:51:41 +00:00
|
|
|
|
2014-12-07 15:36:20 +00:00
|
|
|
|
2013-10-14 21:54:33 +00:00
|
|
|
def request(query, params):
|
2020-10-26 18:25:28 +00:00
|
|
|
params['url'] = URL.format(query=urlencode({'q': query}))
|
2013-10-14 21:54:33 +00:00
|
|
|
return params
|
|
|
|
|
|
|
|
|
2025-01-27 15:43:43 +00:00
|
|
|
def response(resp) -> EngineResults:
|
2021-05-24 12:41:03 +00:00
|
|
|
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
|
2025-01-27 15:43:43 +00:00
|
|
|
results = EngineResults()
|
2014-09-28 14:51:41 +00:00
|
|
|
|
2022-11-05 14:10:52 +00:00
|
|
|
search_res = resp.json()
|
2015-02-09 17:28:08 +00:00
|
|
|
|
2020-10-26 18:25:28 +00:00
|
|
|
# search_res.get('Entity') possible values (not exhaustive) :
|
|
|
|
# * continent / country / department / location / waterfall
|
|
|
|
# * actor / musician / artist
|
|
|
|
# * book / performing art / film / television / media franchise / concert tour / playwright
|
|
|
|
# * prepared food
|
|
|
|
# * website / software / os / programming language / file format / software engineer
|
2022-09-27 15:01:00 +00:00
|
|
|
# * company
|
2020-10-26 18:25:28 +00:00
|
|
|
|
2014-09-28 14:51:41 +00:00
|
|
|
content = ''
|
|
|
|
heading = search_res.get('Heading', '')
|
|
|
|
attributes = []
|
|
|
|
urls = []
|
|
|
|
infobox_id = None
|
|
|
|
relatedTopics = []
|
|
|
|
|
|
|
|
# add answer if there is one
|
|
|
|
answer = search_res.get('Answer', '')
|
2019-11-29 17:56:29 +00:00
|
|
|
if answer:
|
[refactor] typification of SearXNG (initial) / result items (part 1)
Typification of SearXNG
=======================
This patch introduces the typing of the results. The why and how is described
in the documentation, please generate the documentation ..
$ make docs.clean docs.live
and read the following articles in the "Developer documentation":
- result types --> http://0.0.0.0:8000/dev/result_types/index.html
The result types are available from the `searx.result_types` module. The
following have been implemented so far:
- base result type: `searx.result_type.Result`
--> http://0.0.0.0:8000/dev/result_types/base_result.html
- answer results
--> http://0.0.0.0:8000/dev/result_types/answer.html
including the type for translations (inspired by #3925). For all other
types (which still need to be set up in subsequent PRs), template documentation
has been created for the transition period.
Doc of the fields used in Templates
===================================
The template documentation is the basis for the typing and is the first complete
documentation of the results (needed for engine development). It is the
"working paper" (the plan) with which further typifications can be implemented
in subsequent PRs.
- https://github.com/searxng/searxng/issues/357
Answer Templates
================
With the new (sub) types for `Answer`, the templates for the answers have also
been revised, `Translation` are now displayed with collapsible entries (inspired
by #3925).
!en-de dog
Plugins & Answerer
==================
The implementation for `Plugin` and `Answer` has been revised, see
documentation:
- Plugin: http://0.0.0.0:8000/dev/plugins/index.html
- Answerer: http://0.0.0.0:8000/dev/answerers/index.html
With `AnswerStorage` and `AnswerStorage` to manage those items (in follow up
PRs, `ArticleStorage`, `InfoStorage` and .. will be implemented)
Autocomplete
============
The autocompletion had a bug where the results from `Answer` had not been shown
in the past. To test activate autocompletion and try search terms for which we
have answerers
- statistics: type `min 1 2 3` .. in the completion list you should find an
entry like `[de] min(1, 2, 3) = 1`
- random: type `random uuid` .. in the completion list, the first item is a
random UUID
Extended Types
==============
SearXNG extends e.g. the request and response types of flask and httpx, a module
has been set up for type extensions:
- Extended Types
--> http://0.0.0.0:8000/dev/extended_types.html
Unit-Tests
==========
The unit tests have been completely revised. In the previous implementation,
the runtime (the global variables such as `searx.settings`) was not initialized
before each test, so the runtime environment with which a test ran was always
determined by the tests that ran before it. This was also the reason why we
sometimes had to observe non-deterministic errors in the tests in the past:
- https://github.com/searxng/searxng/issues/2988 is one example for the Runtime
issues, with non-deterministic behavior ..
- https://github.com/searxng/searxng/pull/3650
- https://github.com/searxng/searxng/pull/3654
- https://github.com/searxng/searxng/pull/3642#issuecomment-2226884469
- https://github.com/searxng/searxng/pull/3746#issuecomment-2300965005
Why msgspec.Struct
==================
We have already discussed typing based on e.g. `TypeDict` or `dataclass` in the past:
- https://github.com/searxng/searxng/pull/1562/files
- https://gist.github.com/dalf/972eb05e7a9bee161487132a7de244d2
- https://github.com/searxng/searxng/pull/1412/files
- https://github.com/searxng/searxng/pull/1356
In my opinion, TypeDict is unsuitable because the objects are still dictionaries
and not instances of classes / the `dataclass` are classes but ...
The `msgspec.Struct` combine the advantages of typing, runtime behaviour and
also offer the option of (fast) serializing (incl. type check) the objects.
Currently not possible but conceivable with `msgspec`: Outsourcing the engines
into separate processes, what possibilities this opens up in the future is left
to the imagination!
Internally, we have already defined that it is desirable to decouple the
development of the engines from the development of the SearXNG core / The
serialization of the `Result` objects is a prerequisite for this.
HINT: The threads listed above were the template for this PR, even though the
implementation here is based on msgspec. They should also be an inspiration for
the following PRs of typification, as the models and implementations can provide
a good direction.
Why just one commit?
====================
I tried to create several (thematically separated) commits, but gave up at some
point ... there are too many things to tackle at once / The comprehensibility of
the commits would not be improved by a thematic separation. On the contrary, we
would have to make multiple changes at the same places and the goal of a change
would be vaguely recognizable in the fog of the commits.
Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
2024-12-15 08:59:50 +00:00
|
|
|
answer_type = search_res.get('AnswerType')
|
|
|
|
logger.debug('AnswerType="%s" Answer="%s"', answer_type, answer)
|
|
|
|
if isinstance(answer, str) and answer_type not in ['calc', 'ip']:
|
2025-01-27 15:43:43 +00:00
|
|
|
results.add(
|
|
|
|
results.types.Answer(
|
|
|
|
answer=html_to_text(answer),
|
|
|
|
url=search_res.get('AbstractURL', ''),
|
|
|
|
)
|
|
|
|
)
|
2014-09-28 14:51:41 +00:00
|
|
|
|
|
|
|
# add infobox
|
2013-10-14 21:54:33 +00:00
|
|
|
if 'Definition' in search_res:
|
2014-12-07 15:36:20 +00:00
|
|
|
content = content + search_res.get('Definition', '')
|
2014-09-28 14:51:41 +00:00
|
|
|
|
|
|
|
if 'Abstract' in search_res:
|
|
|
|
content = content + search_res.get('Abstract', '')
|
|
|
|
|
|
|
|
# image
|
2020-10-26 18:25:28 +00:00
|
|
|
image = search_res.get('Image')
|
2014-09-28 14:51:41 +00:00
|
|
|
image = None if image == '' else image
|
2020-12-06 09:14:09 +00:00
|
|
|
if image is not None and urlparse(image).netloc == '':
|
|
|
|
image = urljoin('https://duckduckgo.com', image)
|
2014-09-28 14:51:41 +00:00
|
|
|
|
|
|
|
# urls
|
2020-10-26 18:25:28 +00:00
|
|
|
# Official website, Wikipedia page
|
2014-09-28 14:51:41 +00:00
|
|
|
for ddg_result in search_res.get('Results', []):
|
2020-10-26 18:25:28 +00:00
|
|
|
firstURL = ddg_result.get('FirstURL')
|
|
|
|
text = ddg_result.get('Text')
|
|
|
|
if firstURL is not None and text is not None:
|
2014-12-07 15:36:20 +00:00
|
|
|
urls.append({'title': text, 'url': firstURL})
|
|
|
|
results.append({'title': heading, 'url': firstURL})
|
2014-09-28 14:51:41 +00:00
|
|
|
|
|
|
|
# related topics
|
2015-02-09 17:28:08 +00:00
|
|
|
for ddg_result in search_res.get('RelatedTopics', []):
|
2014-09-28 14:51:41 +00:00
|
|
|
if 'FirstURL' in ddg_result:
|
2020-10-26 18:25:28 +00:00
|
|
|
firstURL = ddg_result.get('FirstURL')
|
|
|
|
text = ddg_result.get('Text')
|
|
|
|
if not is_broken_text(text):
|
2021-12-27 08:26:22 +00:00
|
|
|
suggestion = result_to_text(text, ddg_result.get('Result'))
|
2020-10-26 18:25:28 +00:00
|
|
|
if suggestion != heading and suggestion is not None:
|
|
|
|
results.append({'suggestion': suggestion})
|
2014-09-28 14:51:41 +00:00
|
|
|
elif 'Topics' in ddg_result:
|
|
|
|
suggestions = []
|
2021-12-27 08:26:22 +00:00
|
|
|
relatedTopics.append({'name': ddg_result.get('Name', ''), 'suggestions': suggestions})
|
2014-09-28 14:51:41 +00:00
|
|
|
for topic_result in ddg_result.get('Topics', []):
|
2021-12-27 08:26:22 +00:00
|
|
|
suggestion = result_to_text(topic_result.get('Text'), topic_result.get('Result'))
|
2020-10-26 18:25:28 +00:00
|
|
|
if suggestion != heading and suggestion is not None:
|
2014-09-28 14:51:41 +00:00
|
|
|
suggestions.append(suggestion)
|
|
|
|
|
|
|
|
# abstract
|
|
|
|
abstractURL = search_res.get('AbstractURL', '')
|
|
|
|
if abstractURL != '':
|
|
|
|
# add as result ? problem always in english
|
|
|
|
infobox_id = abstractURL
|
2021-12-27 08:26:22 +00:00
|
|
|
urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL, 'official': True})
|
|
|
|
results.append({'url': abstractURL, 'title': heading})
|
2014-09-28 14:51:41 +00:00
|
|
|
|
|
|
|
# definition
|
|
|
|
definitionURL = search_res.get('DefinitionURL', '')
|
|
|
|
if definitionURL != '':
|
|
|
|
# add as result ? as answer ? problem always in english
|
|
|
|
infobox_id = definitionURL
|
2021-12-27 08:26:22 +00:00
|
|
|
urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL})
|
2014-09-28 14:51:41 +00:00
|
|
|
|
2016-04-17 21:21:44 +00:00
|
|
|
# to merge with wikidata's infobox
|
|
|
|
if infobox_id:
|
2020-10-26 18:25:28 +00:00
|
|
|
infobox_id = replace_http_by_https(infobox_id)
|
|
|
|
|
|
|
|
# attributes
|
|
|
|
# some will be converted to urls
|
|
|
|
if 'Infobox' in search_res:
|
|
|
|
infobox = search_res.get('Infobox')
|
|
|
|
if 'content' in infobox:
|
|
|
|
osm_zoom = 17
|
|
|
|
coordinates = None
|
|
|
|
for info in infobox.get('content'):
|
|
|
|
data_type = info.get('data_type')
|
|
|
|
data_label = info.get('label')
|
|
|
|
data_value = info.get('value')
|
|
|
|
|
|
|
|
# Workaround: ddg may return a double quote
|
|
|
|
if data_value == '""':
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Is it an external URL ?
|
|
|
|
# * imdb_id / facebook_profile / youtube_channel / youtube_video / twitter_profile
|
|
|
|
# * instagram_profile / rotten_tomatoes / spotify_artist_id / itunes_artist_id / soundcloud_id
|
|
|
|
# * netflix_id
|
|
|
|
external_url = get_external_url(data_type, data_value)
|
|
|
|
if external_url is not None:
|
2021-12-27 08:26:22 +00:00
|
|
|
urls.append({'title': data_label, 'url': external_url})
|
2020-10-26 18:25:28 +00:00
|
|
|
elif data_type in ['instance', 'wiki_maps_trigger', 'google_play_artist_id']:
|
|
|
|
# ignore instance: Wikidata value from "Instance Of" (Qxxxx)
|
|
|
|
# ignore wiki_maps_trigger: reference to a javascript
|
|
|
|
# ignore google_play_artist_id: service shutdown
|
|
|
|
pass
|
|
|
|
elif data_type == 'string' and data_label == 'Website':
|
|
|
|
# There is already an URL for the website
|
|
|
|
pass
|
|
|
|
elif data_type == 'area':
|
2021-12-27 08:26:22 +00:00
|
|
|
attributes.append({'label': data_label, 'value': area_to_str(data_value), 'entity': 'P2046'})
|
2020-10-26 18:25:28 +00:00
|
|
|
osm_zoom = area_to_osm_zoom(data_value.get('amount'))
|
|
|
|
elif data_type == 'coordinates':
|
|
|
|
if data_value.get('globe') == 'http://www.wikidata.org/entity/Q2':
|
|
|
|
# coordinate on Earth
|
|
|
|
# get the zoom information from the area
|
|
|
|
coordinates = info
|
|
|
|
else:
|
|
|
|
# coordinate NOT on Earth
|
2021-12-27 08:26:22 +00:00
|
|
|
attributes.append({'label': data_label, 'value': data_value, 'entity': 'P625'})
|
2020-10-26 18:25:28 +00:00
|
|
|
elif data_type == 'string':
|
2021-12-27 08:26:22 +00:00
|
|
|
attributes.append({'label': data_label, 'value': data_value})
|
2020-10-26 18:25:28 +00:00
|
|
|
|
|
|
|
if coordinates:
|
|
|
|
data_label = coordinates.get('label')
|
|
|
|
data_value = coordinates.get('value')
|
|
|
|
latitude = data_value.get('latitude')
|
|
|
|
longitude = data_value.get('longitude')
|
|
|
|
url = get_earth_coordinates_url(latitude, longitude, osm_zoom)
|
2021-12-27 08:26:22 +00:00
|
|
|
urls.append({'title': 'OpenStreetMap', 'url': url, 'entity': 'P625'})
|
2014-09-28 14:51:41 +00:00
|
|
|
|
2014-12-07 15:36:20 +00:00
|
|
|
if len(heading) > 0:
|
2021-05-24 12:41:03 +00:00
|
|
|
# TODO get infobox.meta.value where .label='article_title' # pylint: disable=fixme
|
2021-12-27 08:26:22 +00:00
|
|
|
if image is None and len(attributes) == 0 and len(urls) == 1 and len(relatedTopics) == 0 and len(content) == 0:
|
|
|
|
results.append({'url': urls[0]['url'], 'title': heading, 'content': content})
|
2014-10-11 13:49:50 +00:00
|
|
|
else:
|
2021-12-27 08:26:22 +00:00
|
|
|
results.append(
|
|
|
|
{
|
|
|
|
'infobox': heading,
|
|
|
|
'id': infobox_id,
|
|
|
|
'content': content,
|
|
|
|
'img_src': image,
|
|
|
|
'attributes': attributes,
|
|
|
|
'urls': urls,
|
|
|
|
'relatedTopics': relatedTopics,
|
|
|
|
}
|
|
|
|
)
|
2013-10-14 21:54:33 +00:00
|
|
|
|
|
|
|
return results
|
2020-10-26 18:25:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
def unit_to_str(unit):
|
|
|
|
for prefix in WIKIDATA_PREFIX:
|
|
|
|
if unit.startswith(prefix):
|
2021-12-27 08:26:22 +00:00
|
|
|
wikidata_entity = unit[len(prefix) :]
|
2024-04-07 14:17:11 +00:00
|
|
|
real_unit = WIKIDATA_UNITS.get(wikidata_entity)
|
|
|
|
if real_unit is None:
|
|
|
|
return unit
|
|
|
|
return real_unit['symbol']
|
2020-10-26 18:25:28 +00:00
|
|
|
return unit
|
|
|
|
|
|
|
|
|
|
|
|
def area_to_str(area):
|
2022-11-05 14:10:52 +00:00
|
|
|
"""parse ``{'unit': 'https://www.wikidata.org/entity/Q712226', 'amount': '+20.99'}``"""
|
2020-10-26 18:25:28 +00:00
|
|
|
unit = unit_to_str(area.get('unit'))
|
|
|
|
if unit is not None:
|
|
|
|
try:
|
|
|
|
amount = float(area.get('amount'))
|
|
|
|
return '{} {}'.format(amount, unit)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
return '{} {}'.format(area.get('amount', ''), area.get('unit', ''))
|