searxng/searx/engines/yahoo.py
Markus Heiser 86b4d2f2d0 [mod] activate pyright checks (in CI)
We have been using a static type checker (pyright) for a long time, but its
check was not yet a prerequisite for passing the quality gate.  It was checked
in the CI, but the error messages were only logged.

As is always the case in life, with checks that you have to do but which have no
consequences; you neglect them :-)

We didn't activate the checks back then because we (even today) have too much
monkey patching in our code (not only in the engines, httpx and others objects
are also affected).

We want to replace monkey patching with clear interfaces for a long time, the
basis for this is increased typing and we can only achieve this if we make type
checking an integral part of the quality gate.

  This PR activates the type check; in order to pass the check, a few typings
  were corrected in the code, but most type inconsistencies were deactivated via
  inline comments.

This was particularly necessary in places where the code uses properties that
stick to the objects (monkey patching).  The sticking of properties only happens
in a few places, but the access to these properties extends over the entire
code, which is why there are many `# type: ignore` markers in the code ... which
we will hopefully be able to remove again successively in the future.

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
2024-04-27 18:31:52 +02:00

182 lines
4.9 KiB
Python

# SPDX-License-Identifier: AGPL-3.0-or-later
"""Yahoo Search (Web)
Languages are supported by mapping the language to a domain. If domain is not
found in :py:obj:`lang2domain` URL ``<lang>.search.yahoo.com`` is used.
"""
from urllib.parse import (
unquote,
urlencode,
)
from lxml import html
from searx.utils import (
eval_xpath_getindex,
eval_xpath_list,
extract_text,
)
from searx.enginelib.traits import EngineTraits
# about
about = {
"website": 'https://search.yahoo.com/',
"wikidata_id": None,
"official_api_documentation": 'https://developer.yahoo.com/api/',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['general', 'web']
paging = True
time_range_support = True
# send_accept_language_header = True
time_range_dict = {
'day': ('1d', 'd'),
'week': ('1w', 'w'),
'month': ('1m', 'm'),
}
lang2domain = {
'zh_chs': 'hk.search.yahoo.com',
'zh_cht': 'tw.search.yahoo.com',
'any': 'search.yahoo.com',
'en': 'search.yahoo.com',
'bg': 'search.yahoo.com',
'cs': 'search.yahoo.com',
'da': 'search.yahoo.com',
'el': 'search.yahoo.com',
'et': 'search.yahoo.com',
'he': 'search.yahoo.com',
'hr': 'search.yahoo.com',
'ja': 'search.yahoo.com',
'ko': 'search.yahoo.com',
'sk': 'search.yahoo.com',
'sl': 'search.yahoo.com',
}
"""Map language to domain"""
locale_aliases = {
'zh': 'zh_Hans',
'zh-HK': 'zh_Hans',
'zh-CN': 'zh_Hans', # dead since 2015 / routed to hk.search.yahoo.com
'zh-TW': 'zh_Hant',
}
def request(query, params):
"""build request"""
lang = locale_aliases.get(params['language'], None)
if not lang:
lang = params['language'].split('-')[0]
lang = traits.get_language(lang, traits.all_locale)
offset = (params['pageno'] - 1) * 7 + 1
age, btf = time_range_dict.get(params['time_range'], ('', ''))
args = urlencode(
{
'p': query,
'ei': 'UTF-8',
'fl': 1,
'vl': 'lang_' + lang, # type: ignore
'btf': btf,
'fr2': 'time',
'age': age,
'b': offset,
'xargs': 0,
}
)
domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang) # type: ignore
params['url'] = 'https://%s/search?%s' % (domain, args)
return params
def parse_url(url_string):
"""remove yahoo-specific tracking-url"""
endings = ['/RS', '/RK']
endpositions = []
start = url_string.find('http', url_string.find('/RU=') + 1)
for ending in endings:
endpos = url_string.rfind(ending)
if endpos > -1:
endpositions.append(endpos)
if start == 0 or len(endpositions) == 0:
return url_string
end = min(endpositions)
return unquote(url_string[start:end])
def response(resp):
"""parse response"""
results = []
dom = html.fromstring(resp.text)
# parse results
for result in eval_xpath_list(dom, '//div[contains(@class,"algo-sr")]'):
url = eval_xpath_getindex(result, './/h3/a/@href', 0, default=None)
if url is None:
continue
url = parse_url(url)
title = eval_xpath_getindex(result, './/h3//a/@aria-label', 0, default='')
title = extract_text(title)
content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
content = extract_text(content, allow_none=True)
# append result
results.append({'url': url, 'title': title, 'content': content})
for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]//table//a'):
# append suggestion
results.append({'suggestion': extract_text(suggestion)})
return results
def fetch_traits(engine_traits: EngineTraits):
"""Fetch languages from yahoo"""
# pylint: disable=import-outside-toplevel
import babel
from searx import network
from searx.locales import language_tag
engine_traits.all_locale = 'any'
resp = network.get('https://search.yahoo.com/preferences/languages')
if not resp.ok: # type: ignore
print("ERROR: response from yahoo is not OK.")
dom = html.fromstring(resp.text)
offset = len('lang_')
eng2sxng = {'zh_chs': 'zh_Hans', 'zh_cht': 'zh_Hant'}
for val in eval_xpath_list(dom, '//div[contains(@class, "lang-item")]/input/@value'):
eng_tag = val[offset:]
try:
sxng_tag = language_tag(babel.Locale.parse(eng2sxng.get(eng_tag, eng_tag)))
except babel.UnknownLocaleError:
print('ERROR: unknown language --> %s' % eng_tag)
continue
conflict = engine_traits.languages.get(sxng_tag)
if conflict:
if conflict != eng_tag:
print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
continue
engine_traits.languages[sxng_tag] = eng_tag