mirror of
https://github.com/searxng/searxng.git
synced 2024-05-11 08:02:38 +00:00
Compare commits
3 commits
84b38b6983
...
efde0643f2
Author | SHA1 | Date | |
---|---|---|---|
efde0643f2 | |||
ecee56533c | |||
86b4d2f2d0 |
|
@ -1,2 +1,2 @@
|
|||
python 3.12.0
|
||||
python 3.8.18
|
||||
shellcheck 0.9.0
|
||||
|
|
2
Makefile
2
Makefile
|
@ -69,7 +69,7 @@ test.shell:
|
|||
utils/searx.sh \
|
||||
utils/filtron.sh \
|
||||
utils/morty.sh
|
||||
$(Q)$(MTOOLS) build_msg TEST "$@ OK"
|
||||
$(Q)$(MTOOLS) build_msg TEST "[shellcheck] $@ OK"
|
||||
|
||||
|
||||
# wrap ./manage script
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"dependencies": {
|
||||
"eslint": "^9.0.0",
|
||||
"pyright": "^1.1.329"
|
||||
"pyright": "^1.1.353"
|
||||
},
|
||||
"scripts": {
|
||||
"clean": "rm -Rf node_modules package-lock.json"
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"venvPath": "local",
|
||||
"venv": "py3",
|
||||
"include": [
|
||||
"searx",
|
||||
"searxng_extra",
|
||||
"tests"
|
||||
],
|
||||
"typeCheckingMode": "off"
|
||||
}
|
|
@ -1,9 +1,13 @@
|
|||
{
|
||||
"venvPath": "local",
|
||||
"venv": "py3",
|
||||
"stubPath": "searx/engines/__builtins__.pyi",
|
||||
"include": [
|
||||
"searx",
|
||||
"searxng_extra",
|
||||
"tests"
|
||||
]
|
||||
],
|
||||
"reportPossiblyUnboundVariable": false,
|
||||
"reportArgumentType": false,
|
||||
"reportOptionalMemberAccess": false
|
||||
}
|
||||
|
|
|
@ -47,8 +47,8 @@ def brave(query, _lang):
|
|||
|
||||
results = []
|
||||
|
||||
if resp.ok:
|
||||
data = resp.json()
|
||||
if resp.ok: # type: ignore
|
||||
data = resp.json() # type: ignore
|
||||
for item in data[1]:
|
||||
results.append(item)
|
||||
return results
|
||||
|
@ -62,8 +62,8 @@ def dbpedia(query, _lang):
|
|||
|
||||
results = []
|
||||
|
||||
if response.ok:
|
||||
dom = lxml.etree.fromstring(response.content)
|
||||
if response.ok: # type: ignore
|
||||
dom = lxml.etree.fromstring(response.content) # type: ignore
|
||||
results = dom.xpath('//Result/Label//text()')
|
||||
|
||||
return results
|
||||
|
@ -82,8 +82,8 @@ def duckduckgo(query, sxng_locale):
|
|||
resp = get(url)
|
||||
|
||||
ret_val = []
|
||||
if resp.ok:
|
||||
j = resp.json()
|
||||
if resp.ok: # type: ignore
|
||||
j = resp.json() # type: ignore
|
||||
if len(j) > 1:
|
||||
ret_val = j[1]
|
||||
return ret_val
|
||||
|
@ -110,11 +110,11 @@ def google_complete(query, sxng_locale):
|
|||
)
|
||||
results = []
|
||||
resp = get(url.format(subdomain=google_info['subdomain'], args=args))
|
||||
if resp.ok:
|
||||
json_txt = resp.text[resp.text.find('[') : resp.text.find(']', -3) + 1]
|
||||
if resp.ok: # type: ignore
|
||||
json_txt = resp.text[resp.text.find('[') : resp.text.find(']', -3) + 1] # type: ignore
|
||||
data = json.loads(json_txt)
|
||||
for item in data[0]:
|
||||
results.append(lxml.html.fromstring(item[0]).text_content())
|
||||
results.append(lxml.html.fromstring(item[0]).text_content()) # type: ignore
|
||||
return results
|
||||
|
||||
|
||||
|
@ -124,7 +124,7 @@ def mwmbl(query, _lang):
|
|||
# mwmbl autocompleter
|
||||
url = 'https://api.mwmbl.org/search/complete?{query}'
|
||||
|
||||
results = get(url.format(query=urlencode({'q': query}))).json()[1]
|
||||
results = get(url.format(query=urlencode({'q': query}))).json()[1] # type: ignore
|
||||
|
||||
# results starting with `go:` are direct urls and not useful for auto completion
|
||||
return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
|
||||
|
@ -142,10 +142,10 @@ def seznam(query, _lang):
|
|||
)
|
||||
)
|
||||
|
||||
if not resp.ok:
|
||||
if not resp.ok: # type: ignore
|
||||
return []
|
||||
|
||||
data = resp.json()
|
||||
data = resp.json() # type: ignore
|
||||
return [
|
||||
''.join([part.get('text', '') for part in item.get('text', [])])
|
||||
for item in data.get('result', [])
|
||||
|
@ -159,10 +159,10 @@ def stract(query, _lang):
|
|||
|
||||
resp = post(url)
|
||||
|
||||
if not resp.ok:
|
||||
if not resp.ok: # type: ignore
|
||||
return []
|
||||
|
||||
return [suggestion['raw'] for suggestion in resp.json()]
|
||||
return [suggestion['raw'] for suggestion in resp.json()] # type: ignore
|
||||
|
||||
|
||||
def startpage(query, sxng_locale):
|
||||
|
@ -170,7 +170,7 @@ def startpage(query, sxng_locale):
|
|||
lui = engines['startpage'].traits.get_language(sxng_locale, 'english')
|
||||
url = 'https://startpage.com/suggestions?{query}'
|
||||
resp = get(url.format(query=urlencode({'q': query, 'segment': 'startpage.udog', 'lui': lui})))
|
||||
data = resp.json()
|
||||
data = resp.json() # type: ignore
|
||||
return [e['text'] for e in data.get('suggestions', []) if 'text' in e]
|
||||
|
||||
|
||||
|
@ -178,7 +178,7 @@ def swisscows(query, _lang):
|
|||
# swisscows autocompleter
|
||||
url = 'https://swisscows.ch/api/suggest?{query}&itemsCount=5'
|
||||
|
||||
resp = json.loads(get(url.format(query=urlencode({'query': query}))).text)
|
||||
resp = json.loads(get(url.format(query=urlencode({'query': query}))).text) # type: ignore
|
||||
return resp
|
||||
|
||||
|
||||
|
@ -190,8 +190,8 @@ def qwant(query, sxng_locale):
|
|||
url = 'https://api.qwant.com/v3/suggest?{query}'
|
||||
resp = get(url.format(query=urlencode({'q': query, 'locale': locale, 'version': '2'})))
|
||||
|
||||
if resp.ok:
|
||||
data = resp.json()
|
||||
if resp.ok: # type: ignore
|
||||
data = resp.json() # type: ignore
|
||||
if data['status'] == 'success':
|
||||
for item in data['data']['items']:
|
||||
results.append(item['value'])
|
||||
|
@ -204,7 +204,7 @@ def wikipedia(query, sxng_locale):
|
|||
results = []
|
||||
eng_traits = engines['wikipedia'].traits
|
||||
wiki_lang = eng_traits.get_language(sxng_locale, 'en')
|
||||
wiki_netloc = eng_traits.custom['wiki_netloc'].get(wiki_lang, 'en.wikipedia.org')
|
||||
wiki_netloc = eng_traits.custom['wiki_netloc'].get(wiki_lang, 'en.wikipedia.org') # type: ignore
|
||||
|
||||
url = 'https://{wiki_netloc}/w/api.php?{args}'
|
||||
args = urlencode(
|
||||
|
@ -218,8 +218,8 @@ def wikipedia(query, sxng_locale):
|
|||
}
|
||||
)
|
||||
resp = get(url.format(args=args, wiki_netloc=wiki_netloc))
|
||||
if resp.ok:
|
||||
data = resp.json()
|
||||
if resp.ok: # type: ignore
|
||||
data = resp.json() # type: ignore
|
||||
if len(data) > 1:
|
||||
results = data[1]
|
||||
|
||||
|
@ -230,7 +230,7 @@ def yandex(query, _lang):
|
|||
# yandex autocompleter
|
||||
url = "https://suggest.yandex.com/suggest-ff.cgi?{0}"
|
||||
|
||||
resp = json.loads(get(url.format(urlencode(dict(part=query)))).text)
|
||||
resp = json.loads(get(url.format(urlencode(dict(part=query)))).text) # type: ignore
|
||||
if len(resp) > 1:
|
||||
return resp[1]
|
||||
return []
|
||||
|
|
|
@ -147,7 +147,7 @@ def get_token() -> str:
|
|||
return '12345678'
|
||||
token = redis_client.get(TOKEN_KEY)
|
||||
if token:
|
||||
token = token.decode('UTF-8')
|
||||
token = token.decode('UTF-8') # type: ignore
|
||||
else:
|
||||
token = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(16))
|
||||
redis_client.set(TOKEN_KEY, token, ex=TOKEN_LIVE_TIME)
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import List, Callable, TYPE_CHECKING
|
||||
from typing import List, TYPE_CHECKING, Callable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from searx.enginelib import traits
|
||||
|
@ -76,7 +76,7 @@ class Engine: # pylint: disable=too-few-public-methods
|
|||
|
||||
# settings.yml
|
||||
|
||||
categories: List[str]
|
||||
categories: list[str]
|
||||
"""Specifies to which :ref:`engine categories` the engine should be added."""
|
||||
|
||||
name: str
|
||||
|
@ -139,6 +139,6 @@ class Engine: # pylint: disable=too-few-public-methods
|
|||
the user is used to build and send a ``Accept-Language`` header in the
|
||||
request to the origin search engine."""
|
||||
|
||||
tokens: List[str]
|
||||
tokens: list[str]
|
||||
"""A list of secret tokens to make this engine *private*, more details see
|
||||
:ref:`private engines`."""
|
||||
|
|
|
@ -10,10 +10,12 @@ used.
|
|||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from collections.abc import Callable
|
||||
|
||||
import json
|
||||
import dataclasses
|
||||
import types
|
||||
from typing import Dict, Literal, Iterable, Union, Callable, Optional, TYPE_CHECKING
|
||||
from typing import Dict, Literal, Iterable, Union, Optional, TYPE_CHECKING
|
||||
|
||||
from searx import locales
|
||||
from searx.data import data_dir, ENGINE_TRAITS
|
||||
|
|
|
@ -41,7 +41,7 @@ def response(resp):
|
|||
seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]'))
|
||||
leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]'))
|
||||
filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()'))
|
||||
filesize, filesize_multiplier = filesize_info.split()
|
||||
filesize, filesize_multiplier = filesize_info.split() # type: ignore
|
||||
filesize = get_torrent_size(filesize, filesize_multiplier)
|
||||
|
||||
results.append(
|
||||
|
|
18
searx/engines/__builtins__.pyi
Normal file
18
searx/engines/__builtins__.pyi
Normal file
|
@ -0,0 +1,18 @@
|
|||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# pylint: disable=missing-module-docstring
|
||||
|
||||
# Ugly hack to avoid errors from pyright when checking the engiens / sadly this
|
||||
# *bultins* are now available in all modules !?!
|
||||
#
|
||||
# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md
|
||||
|
||||
import searx
|
||||
import searx.enginelib.traits
|
||||
|
||||
logger = searx.logger
|
||||
traits = searx.enginelib.traits.EngineTraits()
|
||||
supported_languages = None
|
||||
language_aliases = None
|
||||
categories = []
|
||||
|
||||
del searx
|
|
@ -14,16 +14,14 @@ import sys
|
|||
import copy
|
||||
from os.path import realpath, dirname
|
||||
|
||||
from typing import TYPE_CHECKING, Dict
|
||||
from typing import Dict
|
||||
import types
|
||||
import inspect
|
||||
|
||||
from searx import logger, settings
|
||||
from searx.enginelib import Engine
|
||||
from searx.utils import load_module
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from searx.enginelib import Engine
|
||||
|
||||
logger = logger.getChild('engines')
|
||||
ENGINE_DIR = dirname(realpath(__file__))
|
||||
ENGINE_DEFAULT_ARGS = {
|
||||
|
|
|
@ -51,7 +51,7 @@ def response(resp):
|
|||
|
||||
link = eval_xpath_getindex(result, './/h5/a', 0)
|
||||
|
||||
url = base_url + link.attrib.get('href') + '#downloads'
|
||||
url = base_url + link.attrib.get('href') + '#downloads' # type: ignore
|
||||
title = extract_text(link)
|
||||
img_src = base_url + eval_xpath_getindex(result, './/img/@src', 0)
|
||||
res = {'url': url, 'title': title, 'img_src': img_src}
|
||||
|
|
|
@ -8,7 +8,6 @@ Arch Wiki blocks access to it.
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from urllib.parse import urlencode, urljoin, urlparse
|
||||
import lxml
|
||||
import babel
|
||||
|
@ -17,13 +16,6 @@ from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
|
|||
from searx.enginelib.traits import EngineTraits
|
||||
from searx.locales import language_tag
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
|
||||
about = {
|
||||
"website": 'https://wiki.archlinux.org/',
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
from datetime import datetime
|
||||
|
||||
from lxml import etree
|
||||
from lxml import etree # type: ignore
|
||||
from lxml.etree import XPath
|
||||
from searx.utils import eval_xpath, eval_xpath_list, eval_xpath_getindex
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"""Ask.com"""
|
||||
|
||||
from urllib.parse import urlencode
|
||||
import dateutil
|
||||
import dateutil.parser
|
||||
from lxml import html
|
||||
from searx import utils
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ from datetime import datetime
|
|||
import re
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from lxml import etree
|
||||
from lxml import etree # type: ignore
|
||||
from searx.utils import searx_useragent
|
||||
|
||||
# about
|
||||
|
|
|
@ -26,7 +26,6 @@ category for the Chinese market.
|
|||
"""
|
||||
# pylint: disable=too-many-branches, invalid-name
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
import base64
|
||||
import re
|
||||
import time
|
||||
|
@ -39,12 +38,6 @@ from searx.utils import eval_xpath, extract_text, eval_xpath_list, eval_xpath_ge
|
|||
from searx.locales import language_tag, region_tag
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
about = {
|
||||
"website": 'https://www.bing.com',
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
# pylint: disable=invalid-name
|
||||
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
import json
|
||||
from urllib.parse import urlencode
|
||||
|
||||
|
@ -15,11 +14,6 @@ from searx.engines.bing import set_bing_cookies
|
|||
from searx.engines.bing import fetch_traits # pylint: disable=unused-import
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from lxml import html
|
||||
|
@ -18,13 +17,6 @@ from searx.utils import eval_xpath, extract_text, eval_xpath_list, eval_xpath_ge
|
|||
from searx.enginelib.traits import EngineTraits
|
||||
from searx.engines.bing import set_bing_cookies
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
|
||||
# about
|
||||
about = {
|
||||
|
|
|
@ -3,24 +3,15 @@
|
|||
"""Bing-Videos: description see :py:obj:`searx.engines.bing`.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
import json
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from lxml import html
|
||||
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
from searx.engines.bing import set_bing_cookies
|
||||
from searx.engines.bing import fetch_traits # pylint: disable=unused-import
|
||||
from searx.engines.bing_images import time_map
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
|
||||
about = {
|
||||
"website": 'https://www.bing.com/videos',
|
||||
|
|
|
@ -118,7 +118,7 @@ Implementations
|
|||
|
||||
"""
|
||||
|
||||
from typing import Any, TYPE_CHECKING
|
||||
from typing import Any
|
||||
|
||||
from urllib.parse import (
|
||||
urlencode,
|
||||
|
@ -139,13 +139,6 @@ from searx.utils import (
|
|||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
about = {
|
||||
"website": 'https://search.brave.com/',
|
||||
"wikidata_id": 'Q22906900',
|
||||
|
@ -228,10 +221,10 @@ def request(query, params):
|
|||
params['cookies']['useLocation'] = '0'
|
||||
params['cookies']['summarizer'] = '0'
|
||||
|
||||
engine_region = traits.get_region(params['searxng_locale'], 'all')
|
||||
engine_region = traits.get_region(params['searxng_locale'], 'all') # type: ignore
|
||||
params['cookies']['country'] = engine_region.split('-')[-1].lower() # type: ignore
|
||||
|
||||
ui_lang = locales.get_engine_locale(params['searxng_locale'], traits.custom["ui_lang"], 'en-us')
|
||||
ui_lang = locales.get_engine_locale(params['searxng_locale'], traits.custom["ui_lang"], 'en-us') # type: ignore
|
||||
params['cookies']['ui_lang'] = ui_lang
|
||||
|
||||
logger.debug("cookies %s", params['cookies'])
|
||||
|
|
|
@ -40,7 +40,7 @@ import re
|
|||
from datetime import datetime
|
||||
from urllib.parse import quote
|
||||
|
||||
from lxml import etree
|
||||
from lxml import etree # type: ignore
|
||||
|
||||
from searx.utils import get_torrent_size
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ def response(resp):
|
|||
content = html.tostring(excerpt, encoding='unicode', method='text', with_tail=False)
|
||||
# it is better to emit <br/> instead of |, but html tags are verboten
|
||||
content = content.strip().replace('\n', ' | ')
|
||||
content = ' '.join(content.split())
|
||||
content = ' '.join(content.split()) # type: ignore
|
||||
|
||||
filesize = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[0]
|
||||
filesize_multiplier = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[1]
|
||||
|
|
|
@ -10,8 +10,6 @@ Dailymotion (Videos)
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from urllib.parse import urlencode
|
||||
import time
|
||||
|
@ -23,14 +21,7 @@ from searx.exceptions import SearxEngineAPIException
|
|||
from searx.locales import region_tag, language_tag
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://www.dailymotion.com',
|
||||
"wikidata_id": 'Q769222',
|
||||
|
|
|
@ -57,10 +57,10 @@ def response(resp):
|
|||
|
||||
results.append(
|
||||
{
|
||||
'url': base_url + "/" + extract_text(eval_xpath(result, url_xpath)),
|
||||
'url': base_url + "/" + extract_text(eval_xpath(result, url_xpath)), # type: ignore
|
||||
'title': extract_text(eval_xpath(result, title_xpath)),
|
||||
'content': extract_text(eval_xpath(result, content_xpath)),
|
||||
'metadata': ', '.join(metadata),
|
||||
'metadata': ', '.join(metadata), # type: ignore
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ DuckDuckGo Lite
|
|||
~~~~~~~~~~~~~~~
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
import re
|
||||
from urllib.parse import urlencode
|
||||
import json
|
||||
|
@ -25,13 +24,6 @@ from searx.network import get # see https://github.com/searxng/searxng/issues/7
|
|||
from searx import redisdb
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
about = {
|
||||
"website": 'https://lite.duckduckgo.com/lite/',
|
||||
"wikidata_id": 'Q12805',
|
||||
|
@ -110,7 +102,7 @@ def get_vqd(query):
|
|||
key = 'SearXNG_ddg_web_vqd' + redislib.secret_hash(query)
|
||||
value = c.get(key)
|
||||
if value or value == b'':
|
||||
value = value.decode('utf-8')
|
||||
value = value.decode('utf-8') # type: ignore
|
||||
logger.debug("re-use cached vqd value: %s", value)
|
||||
return value
|
||||
|
||||
|
@ -129,7 +121,7 @@ def get_vqd(query):
|
|||
return value
|
||||
|
||||
|
||||
def get_ddg_lang(eng_traits: EngineTraits, sxng_locale, default='en_US'):
|
||||
def get_ddg_lang(eng_traits: EngineTraits, sxng_locale, default: str = 'en_US') -> str:
|
||||
"""Get DuckDuckGo's language identifier from SearXNG's locale.
|
||||
|
||||
DuckDuckGo defines its languages by region codes (see
|
||||
|
|
|
@ -13,8 +13,6 @@ most of the features are based on English terms.
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from urllib.parse import urlencode, urlparse, urljoin
|
||||
from lxml import html
|
||||
|
||||
|
@ -22,12 +20,7 @@ from searx.data import WIKIDATA_UNITS
|
|||
from searx.utils import extract_text, html_to_text, get_string_replaces_function
|
||||
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://duckduckgo.com/',
|
||||
"wikidata_id": 'Q12805',
|
||||
|
|
|
@ -5,7 +5,6 @@ DuckDuckGo Extra (images, videos, news)
|
|||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from searx.engines.duckduckgo import fetch_traits # pylint: disable=unused-import
|
||||
|
@ -13,16 +12,8 @@ from searx.engines.duckduckgo import (
|
|||
get_ddg_lang,
|
||||
get_vqd,
|
||||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://duckduckgo.com/',
|
||||
"wikidata_id": 'Q12805',
|
||||
|
|
|
@ -4,7 +4,6 @@ DuckDuckGo Weather
|
|||
~~~~~~~~~~~~~~~~~~
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from json import loads
|
||||
from urllib.parse import quote
|
||||
|
||||
|
@ -13,15 +12,6 @@ from flask_babel import gettext
|
|||
|
||||
from searx.engines.duckduckgo import fetch_traits # pylint: disable=unused-import
|
||||
from searx.engines.duckduckgo import get_ddg_lang
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
|
||||
about = {
|
||||
"website": 'https://duckduckgo.com/',
|
||||
|
|
|
@ -3,20 +3,13 @@
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import json
|
||||
from time import time
|
||||
import re
|
||||
from urllib.parse import urlencode
|
||||
from searx.utils import ecma_unescape, html_to_text
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://www.flickr.com',
|
||||
"wikidata_id": 'Q103204',
|
||||
|
|
|
@ -46,7 +46,7 @@ def response(resp):
|
|||
for result in eval_xpath_list(dom, results_xpath):
|
||||
results.append(
|
||||
{
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)),
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
|
||||
'title': extract_text(eval_xpath(result, title_xpath)),
|
||||
'img_src': extract_text(eval_xpath(result, thumbnail_xpath)),
|
||||
'content': extract_text(eval_xpath(result, info_text_xpath)),
|
||||
|
|
|
@ -11,8 +11,6 @@ engines:
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import re
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
|
@ -26,14 +24,6 @@ from searx.network import get # see https://github.com/searxng/searxng/issues/7
|
|||
from searx.exceptions import SearxEngineCaptchaException
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://www.google.com',
|
||||
|
|
|
@ -13,8 +13,6 @@ This internal API offer results in
|
|||
.. _Protobuf: https://en.wikipedia.org/wiki/Protocol_Buffers
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from json import loads
|
||||
|
||||
|
@ -25,15 +23,7 @@ from searx.engines.google import (
|
|||
detect_google_sorry,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
logger: logging.Logger
|
||||
traits: EngineTraits
|
||||
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://images.google.com',
|
||||
"wikidata_id": 'Q521550',
|
||||
|
|
|
@ -24,8 +24,6 @@ The google news API ignores some parameters from the common :ref:`google API`:
|
|||
.. _save: https://developers.google.com/custom-search/docs/xml_results#safesp
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from urllib.parse import urlencode
|
||||
import base64
|
||||
from lxml import html
|
||||
|
@ -46,13 +44,6 @@ from searx.engines.google import (
|
|||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://news.google.com',
|
||||
|
@ -301,4 +292,4 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
print("ERROR: %s -> %s is unknown by babel" % (ceid, sxng_locale))
|
||||
continue
|
||||
|
||||
engine_traits.custom['ceid'][locales.region_tag(locale)] = ceid
|
||||
engine_traits.custom['ceid'][locales.region_tag(locale)] = ceid # type: ignore
|
||||
|
|
|
@ -7,7 +7,6 @@ can make use of the :ref:`google API` to assemble the arguments of the GET
|
|||
request.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Optional
|
||||
|
||||
from urllib.parse import urlencode
|
||||
|
@ -28,14 +27,6 @@ from searx.engines.google import (
|
|||
get_google_info,
|
||||
time_range_dict,
|
||||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
|
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
|
||||
|
@ -33,14 +31,6 @@ from searx.engines.google import (
|
|||
suggestion_xpath,
|
||||
detect_google_sorry,
|
||||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
|
|
|
@ -55,7 +55,7 @@ def response(resp):
|
|||
results.append(
|
||||
{
|
||||
'template': 'images.html',
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)),
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
|
||||
'title': extract_text(eval_xpath(result, title_xpath)),
|
||||
'img_src': img_src,
|
||||
'thumbnail_src': thumbnail_src,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
"""
|
||||
INA (Videos)
|
||||
"""INA (Videos)
|
||||
|
||||
"""
|
||||
|
||||
from html import unescape
|
||||
|
@ -58,7 +58,7 @@ def response(resp):
|
|||
thumbnail = extract_text(eval_xpath(result, thumbnail_xpath))
|
||||
content = extract_text(eval_xpath(result, publishedDate_xpath)) + extract_text(
|
||||
eval_xpath(result, content_xpath)
|
||||
)
|
||||
) # type: ignore
|
||||
|
||||
# append result
|
||||
results.append(
|
||||
|
|
|
@ -17,7 +17,7 @@ from urllib.parse import urlencode
|
|||
from searx.utils import to_string, html_to_text
|
||||
|
||||
|
||||
search_url = None
|
||||
search_url: str = ''
|
||||
url_query = None
|
||||
url_prefix = ""
|
||||
content_query = None
|
||||
|
|
|
@ -33,22 +33,13 @@ Implementations
|
|||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from datetime import datetime
|
||||
from urllib.parse import urlencode, quote
|
||||
|
||||
from searx.utils import html_to_text
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": None,
|
||||
"wikidata_id": None,
|
||||
|
|
|
@ -20,7 +20,6 @@ Otherwise, follow instructions provided by Mullvad for enabling the VPN on Linux
|
|||
update of SearXNG!
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from httpx import Response
|
||||
from lxml import html
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
@ -28,18 +27,10 @@ from searx.locales import region_tag, get_official_locales
|
|||
from searx.utils import eval_xpath, extract_text, eval_xpath_list
|
||||
from searx.exceptions import SearxEngineResponseException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
use_cache: bool = True # non-cache use only has 100 searches per day!
|
||||
|
||||
search_url = "https://leta.mullvad.net"
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": search_url,
|
||||
"wikidata_id": 'Q47008412', # the Mullvad id - not leta, but related
|
||||
|
@ -145,7 +136,7 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
if not isinstance(resp, Response):
|
||||
print("ERROR: failed to get response from mullvad-leta. Are you connected to the VPN?")
|
||||
return
|
||||
if not resp.ok:
|
||||
if not resp.ok: # type: ignore
|
||||
print("ERROR: response from mullvad-leta is not OK. Are you connected to the VPN?")
|
||||
return
|
||||
dom = html.fromstring(resp.text)
|
||||
|
|
|
@ -14,8 +14,6 @@ from searx.network import get
|
|||
from searx.locales import language_tag
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# Engine metadata
|
||||
about = {
|
||||
"website": "https://odysee.com/",
|
||||
|
@ -122,11 +120,11 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
timeout=60,
|
||||
)
|
||||
|
||||
if not resp.ok:
|
||||
if not resp.ok: # type: ignore
|
||||
print("ERROR: can't determine languages from Odysee")
|
||||
return
|
||||
|
||||
for line in resp.text.split("\n")[1:-4]:
|
||||
for line in resp.text.split("\n")[1:-4]: # type: ignore
|
||||
lang_tag = line.strip().split(": ")[0].replace("'", "")
|
||||
|
||||
try:
|
||||
|
|
|
@ -17,8 +17,6 @@ from searx.locales import language_tag
|
|||
from searx.utils import html_to_text
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
about = {
|
||||
# pylint: disable=line-too-long
|
||||
"website": 'https://joinpeertube.org',
|
||||
|
|
|
@ -5,7 +5,7 @@ import re
|
|||
from urllib.parse import urlencode
|
||||
from dateutil import parser
|
||||
|
||||
import babel
|
||||
import babel.numbers
|
||||
import flask_babel
|
||||
from lxml import html
|
||||
from searx.utils import eval_xpath, eval_xpath_list, extract_text
|
||||
|
@ -69,14 +69,18 @@ def response(resp):
|
|||
results.append(
|
||||
{
|
||||
'template': 'packages.html',
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)),
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
|
||||
'title': extract_text(eval_xpath(result, title_xpath)),
|
||||
'content': extract_text(eval_xpath(result, content_xpath)),
|
||||
'package_name': re.sub(r"\(|\)", "", extract_text(eval_xpath(result, package_name_xpath))),
|
||||
'package_name': re.sub(
|
||||
r"\(|\)",
|
||||
"",
|
||||
extract_text(eval_xpath(result, package_name_xpath)),
|
||||
), # type: ignore
|
||||
'version': extract_text(eval_xpath(result, version_xpath)),
|
||||
'popularity': popularity,
|
||||
'license_name': extract_text(eval_xpath(result, license_name_xpath)),
|
||||
'license_url': base_url + extract_text(eval_xpath(result, license_url_xpath)),
|
||||
'license_url': base_url + extract_text(eval_xpath(result, license_url_xpath)), # type: ignore
|
||||
'publishedDate': publishedDate,
|
||||
}
|
||||
)
|
||||
|
|
|
@ -63,7 +63,7 @@ def search(query, params):
|
|||
query_params = {'query': query}
|
||||
query_to_run = query_str + ' LIMIT {0} OFFSET {1}'.format(limit, (params['pageno'] - 1) * limit)
|
||||
|
||||
with _connection:
|
||||
with _connection: # type: ignore
|
||||
with _connection.cursor() as cur:
|
||||
cur.execute(query_to_run, query_params)
|
||||
return _fetch_results(cur)
|
||||
|
|
|
@ -128,7 +128,7 @@ def _get_request_id(query, params):
|
|||
# performs an IP-based geolocation of the user, we don't want that in
|
||||
# SearXNG ;-)
|
||||
|
||||
if l.territory:
|
||||
if l and l.territory:
|
||||
headers['Accept-Language'] = f"{l.language}-{l.territory},{l.language};" "q=0.9,*;" "q=0.5"
|
||||
|
||||
resp_text = get(url, headers=headers).text # type: ignore
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
from datetime import datetime
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from lxml import etree
|
||||
from lxml import etree # type: ignore
|
||||
from searx.network import get
|
||||
from searx.utils import (
|
||||
eval_xpath_getindex,
|
||||
|
@ -77,8 +77,8 @@ def response(resp): # pylint: disable=too-many-locals
|
|||
for entry in eval_xpath_list(search_results, '//PubmedArticle'):
|
||||
medline = eval_xpath_getindex(entry, './MedlineCitation', 0)
|
||||
|
||||
title = eval_xpath_getindex(medline, './/Article/ArticleTitle', 0).text
|
||||
pmid = eval_xpath_getindex(medline, './/PMID', 0).text
|
||||
title = eval_xpath_getindex(medline, './/Article/ArticleTitle', 0).text # type: ignore
|
||||
pmid = eval_xpath_getindex(medline, './/PMID', 0).text # type: ignore
|
||||
url = pubmed_url + pmid
|
||||
content = extract_text(
|
||||
eval_xpath_getindex(medline, './/Abstract/AbstractText//text()', 0, default=None), allow_none=True
|
||||
|
@ -120,7 +120,7 @@ def response(resp): # pylint: disable=too-many-locals
|
|||
day = eval_xpath_getindex(accepted_date, './Day', 0)
|
||||
try:
|
||||
publishedDate = datetime.strptime(
|
||||
year.text + '-' + month.text + '-' + day.text,
|
||||
year.text + '-' + month.text + '-' + day.text, # type: ignore
|
||||
'%Y-%m-%d',
|
||||
)
|
||||
res_dict['publishedDate'] = publishedDate
|
||||
|
|
|
@ -47,7 +47,7 @@ from json import loads
|
|||
from urllib.parse import urlencode
|
||||
from flask_babel import gettext
|
||||
import babel
|
||||
import lxml
|
||||
import lxml.html
|
||||
|
||||
from searx.exceptions import SearxEngineAPIException, SearxEngineTooManyRequestsException
|
||||
from searx.network import raise_for_httperror
|
||||
|
@ -59,8 +59,6 @@ from searx.utils import (
|
|||
extract_text,
|
||||
)
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://www.qwant.com/',
|
||||
|
|
|
@ -66,9 +66,9 @@ paging = True
|
|||
time_range_support = True
|
||||
|
||||
# parameters from settings.yml
|
||||
base_url = None
|
||||
base_url: str = ''
|
||||
search_dir = ''
|
||||
mount_prefix = None
|
||||
mount_prefix: str = ''
|
||||
dl_prefix = None
|
||||
|
||||
# embedded
|
||||
|
|
|
@ -69,7 +69,7 @@ def search(query, _params):
|
|||
|
||||
ret = _redis_client.hgetall(query)
|
||||
if ret:
|
||||
ret['template'] = result_template
|
||||
ret['template'] = result_template # type: ignore
|
||||
return [ret]
|
||||
|
||||
if ' ' in query:
|
||||
|
@ -98,7 +98,7 @@ def search_keys(query):
|
|||
res = dict(enumerate(_redis_client.lrange(key, 0, -1)))
|
||||
|
||||
if res:
|
||||
res['template'] = result_template
|
||||
res['redis_key'] = key
|
||||
res['template'] = result_template # type: ignore
|
||||
res['redis_key'] = key # type: ignore
|
||||
ret.append(res)
|
||||
return ret
|
||||
|
|
|
@ -55,7 +55,7 @@ def response(resp):
|
|||
return []
|
||||
|
||||
for result_dom in results_dom:
|
||||
url = base_url + extract_text(result_dom.xpath(url_xpath))
|
||||
url = base_url + extract_text(result_dom.xpath(url_xpath)) # type: ignore
|
||||
thumbnail = extract_text(result_dom.xpath(thumbnail_xpath))
|
||||
title = extract_text(result_dom.xpath(title_xpath))
|
||||
p_date = extract_text(result_dom.xpath(published_date))
|
||||
|
|
|
@ -5,8 +5,6 @@ peertube engines.
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from datetime import datetime
|
||||
|
||||
|
@ -17,14 +15,6 @@ from searx.engines.peertube import (
|
|||
safesearch_table,
|
||||
time_range_table,
|
||||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
about = {
|
||||
# pylint: disable=line-too-long
|
||||
|
|
|
@ -44,7 +44,7 @@ guest_client_id = ''
|
|||
def get_client_id():
|
||||
resp = http_get("https://soundcloud.com")
|
||||
|
||||
if resp.ok:
|
||||
if resp.ok: # type: ignore
|
||||
tree = html.fromstring(resp.content)
|
||||
# script_tags has been moved from /assets/app/ to /assets/ path. I
|
||||
# found client_id in https://a-v2.sndcdn.com/assets/49-a0c01933-3.js
|
||||
|
@ -55,7 +55,7 @@ def get_client_id():
|
|||
for app_js_url in app_js_urls[::-1]:
|
||||
# gets app_js and searches for the clientid
|
||||
resp = http_get(app_js_url)
|
||||
if resp.ok:
|
||||
if resp.ok: # type: ignore
|
||||
cids = cid_re.search(resp.content.decode())
|
||||
if cids is not None and len(cids.groups()):
|
||||
return cids.groups()[0]
|
||||
|
|
|
@ -79,7 +79,6 @@ Startpage's category (for Web-search, News, Videos, ..) is set by
|
|||
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from collections import OrderedDict
|
||||
import re
|
||||
from unicodedata import normalize, combining
|
||||
|
@ -96,14 +95,7 @@ from searx.exceptions import SearxEngineCaptchaException
|
|||
from searx.locales import region_tag
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://startpage.com',
|
||||
"wikidata_id": 'Q2333295',
|
||||
|
|
|
@ -36,7 +36,7 @@ def response(resp):
|
|||
results.append(
|
||||
{
|
||||
'template': 'images.html',
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)),
|
||||
'url': base_url + extract_text(eval_xpath(result, url_xpath)), # type: ignore
|
||||
'title': extract_text(eval_xpath(result, title_xpath)).replace(" SVG File", "").replace("Show ", ""),
|
||||
'img_src': extract_text(eval_xpath(result, img_src_xpath)),
|
||||
}
|
||||
|
|
|
@ -15,16 +15,11 @@ This SearXNG engine uses the `/api2u/search`_ API.
|
|||
.. _OpenAPI: https://swagger.io/specification/
|
||||
|
||||
"""
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from datetime import datetime
|
||||
from urllib.parse import urlencode
|
||||
import re
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
about = {
|
||||
'website': "https://tagesschau.de",
|
||||
|
|
|
@ -166,7 +166,7 @@ def response(resp):
|
|||
|
||||
message = 'HTTP status: %s' % resp.status_code
|
||||
error = json_data.get('error')
|
||||
s_key = json_data.get('suggestions', {}).get('key', '')
|
||||
s_key = json_data.get('suggestions', {}).get('key', '') # type: ignore
|
||||
|
||||
if error and s_key:
|
||||
message = "%s (%s)" % (error, s_key)
|
||||
|
|
|
@ -48,21 +48,16 @@ Implementations
|
|||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from typing import List, Dict, Any
|
||||
from datetime import datetime
|
||||
from urllib.parse import quote
|
||||
|
||||
from lxml import etree # type: ignore
|
||||
import httpx
|
||||
|
||||
from searx.exceptions import SearxEngineAPIException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import httpx
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
# engine settings
|
||||
about: Dict[str, Any] = {
|
||||
"website": None,
|
||||
|
|
|
@ -5,7 +5,6 @@ from :ref:`wikipedia engine`.
|
|||
"""
|
||||
# pylint: disable=missing-class-docstring
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
from hashlib import md5
|
||||
from urllib.parse import urlencode, unquote
|
||||
from json import loads
|
||||
|
@ -23,14 +22,6 @@ from searx.engines.wikipedia import (
|
|||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://wikidata.org/',
|
||||
"wikidata_id": 'Q2013',
|
||||
|
@ -142,7 +133,7 @@ def get_headers():
|
|||
return {'Accept': 'application/sparql-results+json', 'User-Agent': searx_useragent()}
|
||||
|
||||
|
||||
def get_label_for_entity(entity_id, language):
|
||||
def get_label_for_entity(entity_id, language): # type: ignore
|
||||
name = WIKIDATA_PROPERTIES.get(entity_id)
|
||||
if name is None:
|
||||
name = WIKIDATA_PROPERTIES.get((entity_id, language))
|
||||
|
@ -497,7 +488,7 @@ class WDAttribute:
|
|||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def get_select(self):
|
||||
def get_select(self) -> str:
|
||||
return '(group_concat(distinct ?{name};separator=", ") as ?{name}s)'.replace('{name}', self.name)
|
||||
|
||||
def get_label(self, language):
|
||||
|
@ -506,10 +497,10 @@ class WDAttribute:
|
|||
def get_where(self):
|
||||
return "OPTIONAL { ?item wdt:{name} ?{name} . }".replace('{name}', self.name)
|
||||
|
||||
def get_wikibase_label(self):
|
||||
def get_wikibase_label(self) -> str:
|
||||
return ""
|
||||
|
||||
def get_group_by(self):
|
||||
def get_group_by(self) -> str:
|
||||
return ""
|
||||
|
||||
def get_str(self, result, language): # pylint: disable=unused-argument
|
||||
|
@ -702,7 +693,7 @@ class WDDateAttribute(WDAttribute):
|
|||
# precision: minute
|
||||
return (
|
||||
get_datetime_format(format, locale=locale)
|
||||
.replace("'", "")
|
||||
.replace("'", "") # type: ignore
|
||||
.replace('{0}', format_time(timestamp, 'full', tzinfo=None, locale=locale))
|
||||
.replace('{1}', format_date(timestamp, 'short', locale=locale))
|
||||
)
|
||||
|
|
|
@ -64,8 +64,6 @@ from searx import network as _network
|
|||
from searx import locales
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://www.wikipedia.org/',
|
||||
|
@ -277,7 +275,7 @@ def fetch_wikimedia_traits(engine_traits: EngineTraits):
|
|||
engine_traits.regions[sxng_tag] = eng_tag
|
||||
|
||||
resp = _network.get(list_of_wikipedias)
|
||||
if not resp.ok:
|
||||
if not resp.ok: # type: ignore
|
||||
print("ERROR: response from Wikipedia is not OK.")
|
||||
|
||||
dom = html.fromstring(resp.text)
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
from urllib.parse import urlencode
|
||||
|
||||
from lxml import etree
|
||||
from lxml import etree # type: ignore
|
||||
|
||||
# about
|
||||
about = {
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"""
|
||||
|
||||
from urllib.parse import urlencode, urljoin
|
||||
from lxml import html, etree
|
||||
from lxml import html, etree # type: ignore
|
||||
|
||||
from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ from lxml import html
|
|||
from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
|
||||
from searx.network import raise_for_httperror
|
||||
|
||||
search_url = None
|
||||
search_url = ''
|
||||
"""
|
||||
Search URL of the engine. Example::
|
||||
|
||||
|
@ -270,7 +270,9 @@ def response(resp): # pylint: disable=too-many-branches
|
|||
|
||||
# add alternative cached url if available
|
||||
if cached_xpath:
|
||||
tmp_result['cached_url'] = cached_url + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
|
||||
tmp_result['cached_url'] = cached_url + extract_text(
|
||||
eval_xpath_list(result, cached_xpath, min_len=1)
|
||||
) # type: ignore
|
||||
|
||||
if is_onion:
|
||||
tmp_result['is_onion'] = True
|
||||
|
@ -290,7 +292,7 @@ def response(resp): # pylint: disable=too-many-branches
|
|||
'url': url,
|
||||
'title': title,
|
||||
'content': content,
|
||||
'cached_url': cached_url + cached,
|
||||
'cached_url': cached_url + cached, # type: ignore
|
||||
'is_onion': is_onion,
|
||||
}
|
||||
)
|
||||
|
|
|
@ -19,8 +19,6 @@ from searx.utils import (
|
|||
)
|
||||
from searx.enginelib.traits import EngineTraits
|
||||
|
||||
traits: EngineTraits
|
||||
|
||||
# about
|
||||
about = {
|
||||
"website": 'https://search.yahoo.com/',
|
||||
|
@ -86,7 +84,7 @@ def request(query, params):
|
|||
'p': query,
|
||||
'ei': 'UTF-8',
|
||||
'fl': 1,
|
||||
'vl': 'lang_' + lang,
|
||||
'vl': 'lang_' + lang, # type: ignore
|
||||
'btf': btf,
|
||||
'fr2': 'time',
|
||||
'age': age,
|
||||
|
@ -95,7 +93,7 @@ def request(query, params):
|
|||
}
|
||||
)
|
||||
|
||||
domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)
|
||||
domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang) # type: ignore
|
||||
params['url'] = 'https://%s/search?%s' % (domain, args)
|
||||
return params
|
||||
|
||||
|
@ -158,7 +156,7 @@ def fetch_traits(engine_traits: EngineTraits):
|
|||
engine_traits.all_locale = 'any'
|
||||
|
||||
resp = network.get('https://search.yahoo.com/preferences/languages')
|
||||
if not resp.ok:
|
||||
if not resp.ok: # type: ignore
|
||||
print("ERROR: response from yahoo is not OK.")
|
||||
|
||||
dom = html.fromstring(resp.text)
|
||||
|
|
|
@ -82,7 +82,7 @@ def response(resp):
|
|||
item = {'url': url, 'title': title, 'content': content, 'img_src': img_src}
|
||||
|
||||
pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]'))
|
||||
ago = AGO_RE.search(pub_date)
|
||||
ago = AGO_RE.search(pub_date) # type: ignore
|
||||
if ago:
|
||||
number = int(ago.group(1))
|
||||
delta = AGO_TIMEDELTA[ago.group(2)]
|
||||
|
|
|
@ -32,11 +32,13 @@ Implementations
|
|||
===============
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime
|
||||
from urllib.parse import quote
|
||||
|
||||
import httpx
|
||||
from lxml import html
|
||||
from flask_babel import gettext
|
||||
|
||||
|
@ -44,13 +46,7 @@ from searx.utils import extract_text, eval_xpath, eval_xpath_list
|
|||
from searx.enginelib.traits import EngineTraits
|
||||
from searx.data import ENGINE_TRAITS
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import httpx
|
||||
import logging
|
||||
|
||||
logger: logging.Logger
|
||||
|
||||
# about
|
||||
about: Dict[str, Any] = {
|
||||
"website": "https://zlibrary-global.se",
|
||||
"wikidata_id": "Q104863992",
|
||||
|
|
|
@ -61,7 +61,7 @@ class SearxEngineAccessDeniedException(SearxEngineResponseException):
|
|||
"""This settings contains the default suspended time (default 86400 sec / 1
|
||||
day)."""
|
||||
|
||||
def __init__(self, suspended_time: int = None, message: str = 'Access denied'):
|
||||
def __init__(self, suspended_time: int = 0, message: str = 'Access denied'):
|
||||
"""Generic exception to raise when an engine denies access to the results.
|
||||
|
||||
:param suspended_time: How long the engine is going to be suspended in
|
||||
|
@ -75,10 +75,10 @@ class SearxEngineAccessDeniedException(SearxEngineResponseException):
|
|||
self.suspended_time = suspended_time
|
||||
self.message = message
|
||||
|
||||
def _get_default_suspended_time(self):
|
||||
def _get_default_suspended_time(self) -> int:
|
||||
from searx import get_setting # pylint: disable=C0415
|
||||
|
||||
return get_setting(self.SUSPEND_TIME_SETTING)
|
||||
return get_setting(self.SUSPEND_TIME_SETTING) # type: ignore
|
||||
|
||||
|
||||
class SearxEngineCaptchaException(SearxEngineAccessDeniedException):
|
||||
|
|
|
@ -56,7 +56,7 @@ def get_external_url(url_id, item_id, alternative="default"):
|
|||
def get_earth_coordinates_url(latitude, longitude, osm_zoom, alternative='default'):
|
||||
url = (
|
||||
get_external_url('map', None, alternative)
|
||||
.replace('${latitude}', str(latitude))
|
||||
.replace('${latitude}', str(latitude)) # type: ignore
|
||||
.replace('${longitude}', str(longitude))
|
||||
.replace('${zoom}', str(osm_zoom))
|
||||
)
|
||||
|
|
|
@ -85,7 +85,7 @@ Kong."""
|
|||
def localeselector():
|
||||
locale = 'en'
|
||||
if has_request_context():
|
||||
value = flask.request.preferences.get_value('locale')
|
||||
value = flask.request.preferences.get_value('locale') # type: ignore
|
||||
if value:
|
||||
locale = value
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ class ErrorContext: # pylint: disable=missing-class-docstring
|
|||
def add_error_context(engine_name: str, error_context: ErrorContext) -> None:
|
||||
errors_for_engine = errors_per_engines.setdefault(engine_name, {})
|
||||
errors_for_engine[error_context] = errors_for_engine.get(error_context, 0) + 1
|
||||
engines[engine_name].logger.warning('%s', str(error_context))
|
||||
engines[engine_name].logger.warning('%s', str(error_context)) # type: ignore
|
||||
|
||||
|
||||
def get_trace(traces):
|
||||
|
@ -102,9 +102,9 @@ def get_trace(traces):
|
|||
|
||||
def get_hostname(exc: HTTPError) -> typing.Optional[None]:
|
||||
url = exc.request.url
|
||||
if url is None and exc.response is not None:
|
||||
url = exc.response.url
|
||||
return urlparse(url).netloc
|
||||
if url is None and exc.response is not None: # type: ignore
|
||||
url = exc.response.url # type: ignore
|
||||
return urlparse(url).netloc # type: ignore
|
||||
|
||||
|
||||
def get_request_exception_messages(
|
||||
|
@ -118,8 +118,8 @@ def get_request_exception_messages(
|
|||
# exc.request is property that raise an RuntimeException
|
||||
# if exc._request is not defined.
|
||||
url = exc.request.url
|
||||
if url is None and hasattr(exc, 'response') and exc.response is not None:
|
||||
url = exc.response.url
|
||||
if url is None and hasattr(exc, 'response') and exc.response is not None: # type: ignore
|
||||
url = exc.response.url # type: ignore
|
||||
if url is not None:
|
||||
hostname = url.host
|
||||
if isinstance(exc, HTTPStatusError):
|
||||
|
|
|
@ -70,7 +70,7 @@ class Histogram: # pylint: disable=missing-class-docstring
|
|||
# use Decimal to avoid rounding errors
|
||||
x = decimal.Decimal(0)
|
||||
width = decimal.Decimal(self._width)
|
||||
width_exponent = -width.as_tuple().exponent
|
||||
width_exponent = -width.as_tuple().exponent # type: ignore
|
||||
with self._lock:
|
||||
if self._count > 0:
|
||||
for y in self._quartiles:
|
||||
|
|
|
@ -166,9 +166,8 @@ class Network:
|
|||
for transport in client._mounts.values(): # pylint: disable=protected-access
|
||||
if isinstance(transport, AsyncHTTPTransportNoHttp):
|
||||
continue
|
||||
if getattr(transport, "_pool") and getattr(
|
||||
transport._pool, "_rdns", False # pylint: disable=protected-access
|
||||
):
|
||||
# pylint: disable=protected-access
|
||||
if getattr(transport, "_pool") and getattr(transport._pool, "_rdns", False): # type: ignore
|
||||
continue
|
||||
return False
|
||||
response = await client.get("https://check.torproject.org/api/ip", timeout=60)
|
||||
|
@ -238,7 +237,7 @@ class Network:
|
|||
if isinstance(response, httpx.Response):
|
||||
# requests compatibility (response is not streamed)
|
||||
# see also https://www.python-httpx.org/compatibility/#checking-for-4xx5xx-responses
|
||||
response.ok = not response.is_error
|
||||
response.ok = not response.is_error # type: ignore
|
||||
|
||||
# raise an exception
|
||||
if do_raise_for_httperror:
|
||||
|
|
|
@ -128,10 +128,9 @@ def load_plugin(plugin_module_name, external):
|
|||
return None
|
||||
|
||||
# difference with searx: use module name instead of the user name
|
||||
plugin.id = plugin_module_name
|
||||
plugin.id = plugin_module_name # type: ignore
|
||||
|
||||
#
|
||||
plugin.logger = getLogger(plugin_module_name)
|
||||
plugin.logger = getLogger(plugin_module_name) # type: ignore
|
||||
|
||||
for plugin_attr, plugin_attr_type in required_attrs:
|
||||
if not hasattr(plugin, plugin_attr):
|
||||
|
@ -152,7 +151,7 @@ def load_plugin(plugin_module_name, external):
|
|||
setattr(plugin, plugin_attr, plugin_attr_type())
|
||||
|
||||
if not hasattr(plugin, "preference_section"):
|
||||
plugin.preference_section = "general"
|
||||
plugin.preference_section = "general" # type: ignore
|
||||
|
||||
# query plugin
|
||||
if plugin.preference_section == "query":
|
||||
|
@ -163,7 +162,9 @@ def load_plugin(plugin_module_name, external):
|
|||
|
||||
if settings.get("enabled_plugins"):
|
||||
# searx compatibility: plugin.name in settings['enabled_plugins']
|
||||
plugin.default_on = plugin.name in settings["enabled_plugins"] or plugin.id in settings["enabled_plugins"]
|
||||
plugin.default_on = ( # type: ignore
|
||||
plugin.name in settings["enabled_plugins"] or plugin.id in settings["enabled_plugins"]
|
||||
)
|
||||
|
||||
# copy resources if this is an external plugin
|
||||
if external:
|
||||
|
|
|
@ -9,7 +9,7 @@ description = "Filter out onion results that appear in Ahmia's blacklist. (See h
|
|||
default_on = True
|
||||
preference_section = 'onions'
|
||||
|
||||
ahmia_blacklist = None
|
||||
ahmia_blacklist = []
|
||||
|
||||
|
||||
def on_result(_request, _search, result):
|
||||
|
|
|
@ -324,7 +324,7 @@ class ClientPref:
|
|||
|
||||
# hint: searx.webapp.get_client_settings should be moved into this class
|
||||
|
||||
locale: babel.Locale
|
||||
locale: Optional[babel.Locale]
|
||||
"""Locale prefered by the client."""
|
||||
|
||||
def __init__(self, locale: Optional[babel.Locale] = None):
|
||||
|
@ -359,7 +359,7 @@ class ClientPref:
|
|||
try:
|
||||
qvalue = float(qvalue.split('=')[-1])
|
||||
locale = babel.Locale.parse(lang, sep='-')
|
||||
except (ValueError, babel.core.UnknownLocaleError):
|
||||
except (ValueError, babel.core.UnknownLocaleError): # type: ignore
|
||||
continue
|
||||
pairs.append((locale, qvalue))
|
||||
|
||||
|
@ -548,7 +548,7 @@ class Preferences:
|
|||
self.tokens.parse_form(user_setting)
|
||||
else:
|
||||
self.unknown_params[user_setting_name] = user_setting
|
||||
self.key_value_settings['categories'].parse_form(enabled_categories)
|
||||
self.key_value_settings['categories'].parse_form(enabled_categories) # type: ignore
|
||||
self.engines.parse_form(disabled_engines)
|
||||
self.plugins.parse_form(disabled_plugins)
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# pylint: disable=invalid-name, missing-module-docstring, missing-class-docstring
|
||||
|
||||
from typing import Any
|
||||
from abc import abstractmethod, ABC
|
||||
import re
|
||||
|
||||
|
@ -18,7 +19,7 @@ class QueryPartParser(ABC):
|
|||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def check(raw_value):
|
||||
def check(raw_value) -> Any:
|
||||
"""Check if raw_value can be parsed"""
|
||||
|
||||
def __init__(self, raw_text_query, enable_autocomplete):
|
||||
|
@ -26,7 +27,7 @@ class QueryPartParser(ABC):
|
|||
self.enable_autocomplete = enable_autocomplete
|
||||
|
||||
@abstractmethod
|
||||
def __call__(self, raw_value):
|
||||
def __call__(self, raw_value) -> Any:
|
||||
"""Try to parse raw_value: set the self.raw_text_query properties
|
||||
|
||||
return True if raw_value has been parsed
|
||||
|
@ -309,7 +310,7 @@ class RawTextQuery:
|
|||
self.autocomplete_location = last_index_location
|
||||
|
||||
def get_autocomplete_full_query(self, text):
|
||||
qlist, position = self.autocomplete_location
|
||||
qlist, position = self.autocomplete_location # type: ignore
|
||||
qlist[position] = text
|
||||
return self.getFullQuery()
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
def client() -> redis.Redis:
|
||||
return _CLIENT
|
||||
return _CLIENT # type: ignore
|
||||
|
||||
|
||||
def initialize():
|
||||
|
@ -43,7 +43,7 @@ def initialize():
|
|||
return False
|
||||
try:
|
||||
# create a client, but no connection is done
|
||||
_CLIENT = redis.Redis.from_url(redis_url)
|
||||
_CLIENT = redis.Redis.from_url(redis_url) # type: ignore
|
||||
|
||||
# log the parameters as seen by the redis lib, without the password
|
||||
kwargs = _CLIENT.get_connection_kwargs().copy()
|
||||
|
@ -57,11 +57,11 @@ def initialize():
|
|||
# no error: the redis connection is working
|
||||
logger.info("connected to Redis")
|
||||
return True
|
||||
except redis.exceptions.RedisError as e:
|
||||
except redis.exceptions.RedisError as e: # type: ignore
|
||||
_CLIENT = None
|
||||
_pw = pwd.getpwuid(os.getuid())
|
||||
logger.exception("[%s (%s)] can't connect redis DB ...", _pw.pw_name, _pw.pw_uid)
|
||||
if redis_url == OLD_REDIS_URL_DEFAULT_URL and isinstance(e, redis.exceptions.ConnectionError):
|
||||
if redis_url == OLD_REDIS_URL_DEFAULT_URL and isinstance(e, redis.exceptions.ConnectionError): # type: ignore
|
||||
logger.info(
|
||||
"You can safely ignore the above Redis error if you don't use Redis. "
|
||||
"You can remove this error by setting redis.url to false in your settings.yml."
|
||||
|
|
|
@ -83,7 +83,7 @@ def secret_hash(name: str):
|
|||
:type name: str
|
||||
"""
|
||||
m = hmac.new(bytes(name, encoding='utf-8'), digestmod='sha256')
|
||||
m.update(bytes(get_setting('server.secret_key'), encoding='utf-8'))
|
||||
m.update(bytes(get_setting('server.secret_key'), encoding='utf-8')) # type: ignore
|
||||
return m.hexdigest()
|
||||
|
||||
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# pylint: disable=missing-module-docstring
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import List, NamedTuple, Set, Callable, Any
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from operator import itemgetter
|
||||
from threading import RLock
|
||||
from typing import List, NamedTuple, Set
|
||||
from urllib.parse import urlparse, unquote
|
||||
|
||||
from searx import logger
|
||||
|
@ -61,11 +63,11 @@ def compare_urls(url_a, url_b):
|
|||
def merge_two_infoboxes(infobox1, infobox2): # pylint: disable=too-many-branches, too-many-statements
|
||||
# get engines weights
|
||||
if hasattr(engines[infobox1['engine']], 'weight'):
|
||||
weight1 = engines[infobox1['engine']].weight
|
||||
weight1 = engines[infobox1['engine']].weight # type: ignore
|
||||
else:
|
||||
weight1 = 1
|
||||
if hasattr(engines[infobox2['engine']], 'weight'):
|
||||
weight2 = engines[infobox2['engine']].weight
|
||||
weight2 = engines[infobox2['engine']].weight # type: ignore
|
||||
else:
|
||||
weight2 = 1
|
||||
|
||||
|
@ -135,7 +137,7 @@ def result_score(result):
|
|||
|
||||
for result_engine in result['engines']:
|
||||
if hasattr(engines[result_engine], 'weight'):
|
||||
weight *= float(engines[result_engine].weight)
|
||||
weight *= float(engines[result_engine].weight) # type: ignore
|
||||
|
||||
occurrences = len(result['positions'])
|
||||
|
||||
|
@ -187,8 +189,8 @@ class ResultContainer:
|
|||
self.paging = False
|
||||
self.unresponsive_engines: Set[UnresponsiveEngine] = set()
|
||||
self.timings: List[Timing] = []
|
||||
self.redirect_url = None
|
||||
self.on_result = lambda _: True
|
||||
self.redirect_url: str | None = None
|
||||
self.on_result: Callable[[dict], Any] = lambda _: True
|
||||
self._lock = RLock()
|
||||
|
||||
def extend(self, engine_name, results): # pylint: disable=too-many-branches
|
||||
|
|
|
@ -50,8 +50,8 @@ class Search:
|
|||
super().__init__()
|
||||
self.search_query = search_query
|
||||
self.result_container = ResultContainer()
|
||||
self.start_time = None
|
||||
self.actual_timeout = None
|
||||
self.start_time: float = 0
|
||||
self.actual_timeout: float = 0
|
||||
|
||||
def search_external_bang(self):
|
||||
"""
|
||||
|
@ -146,8 +146,8 @@ class Search:
|
|||
args=(query, request_params, self.result_container, self.start_time, self.actual_timeout),
|
||||
name=search_id,
|
||||
)
|
||||
th._timeout = False
|
||||
th._engine_name = engine_name
|
||||
th._timeout = False # type: ignore
|
||||
th._engine_name = engine_name # type: ignore
|
||||
th.start()
|
||||
|
||||
for th in threading.enumerate(): # pylint: disable=invalid-name
|
||||
|
@ -155,9 +155,9 @@ class Search:
|
|||
remaining_time = max(0.0, self.actual_timeout - (default_timer() - self.start_time))
|
||||
th.join(remaining_time)
|
||||
if th.is_alive():
|
||||
th._timeout = True
|
||||
self.result_container.add_unresponsive_engine(th._engine_name, 'timeout')
|
||||
PROCESSORS[th._engine_name].logger.error('engine timeout')
|
||||
th._timeout = True # type: ignore
|
||||
self.result_container.add_unresponsive_engine(th._engine_name, 'timeout') # type: ignore
|
||||
PROCESSORS[th._engine_name].logger.error('engine timeout') # type: ignore
|
||||
|
||||
def search_standard(self):
|
||||
"""
|
||||
|
@ -197,7 +197,7 @@ class SearchWithPlugins(Search):
|
|||
# * https://github.com/pallets/werkzeug/blob/3c5d3c9bd0d9ce64590f0af8997a38f3823b368d/src/werkzeug/local.py#L548-L559
|
||||
# * https://werkzeug.palletsprojects.com/en/2.0.x/local/#werkzeug.local.LocalProxy._get_current_object
|
||||
# pylint: enable=line-too-long
|
||||
self.request = request._get_current_object()
|
||||
self.request = request._get_current_object() # type: ignore[attr-defined]
|
||||
|
||||
def _on_result(self, result):
|
||||
return plugins.call(self.ordered_plugin_list, 'on_result', self.request, self, result)
|
||||
|
|
|
@ -81,7 +81,7 @@ def get_result() -> CheckerResult:
|
|||
if client is None:
|
||||
# without Redis, the checker is disabled
|
||||
return {'status': 'disabled'}
|
||||
serialized_result: Optional[bytes] = client.get(REDIS_RESULT_KEY)
|
||||
serialized_result: Optional[bytes] = client.get(REDIS_RESULT_KEY) # type: ignore
|
||||
if serialized_result is None:
|
||||
# the Redis key does not exist
|
||||
return {'status': 'unknown'}
|
||||
|
|
|
@ -263,7 +263,7 @@ class ResultContainerTests: # pylint: disable=missing-class-docstring
|
|||
def check_basic(self):
|
||||
if len(self.result_container.unresponsive_engines) > 0:
|
||||
for message in self.result_container.unresponsive_engines:
|
||||
self._record_error(message[1] + ' ' + (message[2] or ''))
|
||||
self._record_error(message.error_type + ' ' + (str(message.suspended) if message.suspended else ''))
|
||||
self.stop_test = True
|
||||
return
|
||||
|
||||
|
|
|
@ -11,10 +11,10 @@ This scheduler is not generic on purpose: if more feature are required, a dedica
|
|||
(= a better scheduler should not use the web workers)
|
||||
"""
|
||||
|
||||
from typing import Callable
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Callable
|
||||
|
||||
from searx.redisdb import client as get_redis_client
|
||||
from searx.redislib import lua_script_storage
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# pylint: disable=missing-module-docstring
|
||||
|
||||
import typing
|
||||
import babel
|
||||
import babel.core
|
||||
|
||||
|
||||
class EngineRef:
|
||||
|
|
|
@ -77,6 +77,6 @@ def initialize(engine_list):
|
|||
processor = get_processor(engine, engine_name)
|
||||
initialize_processor(processor)
|
||||
if processor is None:
|
||||
engine.logger.error('Error get processor for engine %s', engine_name)
|
||||
engine.logger.error('Error get processor for engine %s', engine_name) # type: ignore
|
||||
else:
|
||||
PROCESSORS[engine_name] = processor
|
||||
|
|
|
@ -63,7 +63,7 @@ class EngineProcessor(ABC):
|
|||
def __init__(self, engine, engine_name: str):
|
||||
self.engine = engine
|
||||
self.engine_name = engine_name
|
||||
self.logger = engines[engine_name].logger
|
||||
self.logger = engines[engine_name].logger # type: ignore
|
||||
key = get_network(self.engine_name)
|
||||
key = id(key) if key else self.engine_name
|
||||
self.suspended_status = SUSPENDED_STATUS.setdefault(key, SuspendedStatus())
|
||||
|
|
|
@ -147,7 +147,7 @@ class OnlineProcessor(EngineProcessor):
|
|||
response = self._send_http_request(params)
|
||||
|
||||
# parse the response
|
||||
response.search_params = params
|
||||
response.search_params = params # type: ignore
|
||||
return self.engine.response(response)
|
||||
|
||||
def search(self, query, params, result_container, start_time, timeout_limit):
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import typing
|
||||
import numbers
|
||||
import errno
|
||||
|
@ -49,7 +50,7 @@ class SettingsValue:
|
|||
self,
|
||||
type_definition: typing.Union[None, typing.Any, typing.Tuple[typing.Any]] = None,
|
||||
default: typing.Any = None,
|
||||
environ_name: str = None,
|
||||
environ_name: str | None = None,
|
||||
):
|
||||
self.type_definition = (
|
||||
type_definition if type_definition is None or isinstance(type_definition, tuple) else (type_definition,)
|
||||
|
@ -59,13 +60,13 @@ class SettingsValue:
|
|||
|
||||
@property
|
||||
def type_definition_repr(self):
|
||||
types_str = [t.__name__ if isinstance(t, type) else repr(t) for t in self.type_definition]
|
||||
types_str = [t.__name__ if isinstance(t, type) else repr(t) for t in self.type_definition] # type: ignore
|
||||
return ', '.join(types_str)
|
||||
|
||||
def check_type_definition(self, value: typing.Any) -> None:
|
||||
if value in self.type_definition:
|
||||
return
|
||||
type_list = tuple(t for t in self.type_definition if isinstance(t, type))
|
||||
type_list = tuple(t for t in self.type_definition if isinstance(t, type)) # type: ignore
|
||||
if not isinstance(value, type_list):
|
||||
raise ValueError('The value has to be one of these types/values: {}'.format(self.type_definition_repr))
|
||||
|
||||
|
@ -89,7 +90,7 @@ class SettingSublistValue(SettingsValue):
|
|||
if not isinstance(value, list):
|
||||
raise ValueError('The value has to a list')
|
||||
for item in value:
|
||||
if not item in self.type_definition[0]:
|
||||
if not item in self.type_definition[0]: # type: ignore
|
||||
raise ValueError('{} not in {}'.format(item, self.type_definition))
|
||||
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ or manually by executing the searx/webapp.py file? -->
|
|||
<input type="checkbox" id="step1">
|
||||
<label for="step1">{{ _('Start submiting a new issue on GitHub') }}</label>
|
||||
<div class="step1 step_content">
|
||||
<p><a href="{{ get_setting('brand.issue_url') }}?q=is%3Aissue+Bug:%20{{ engine_name }}" target="_blank" rel="noreferrer noreferrer">{{ _('Please check for existing bugs about this engine on GitHub') }}</a></p>
|
||||
<p><a href="{{ get_setting('brand.issue_url') }}?q=is%3Aissue+Bug:%20{{ engine_name }} {{ technical_report }}" target="_blank" rel="noreferrer noreferrer">{{ _('Please check for existing bugs about this engine on GitHub') }}</a></p>
|
||||
</div>
|
||||
<input class="step1 step1_delay" type="checkbox" id="step2">
|
||||
<label class="step1 step1_delay" for="step2" >{{ _('I confirm there is no existing bug about the issue I encounter') }}</label>
|
||||
|
|
|
@ -52,7 +52,7 @@ _STORAGE_UNIT_VALUE: Dict[str, int] = {
|
|||
}
|
||||
|
||||
_XPATH_CACHE: Dict[str, XPath] = {}
|
||||
_LANG_TO_LC_CACHE: Dict[str, Dict[str, str]] = {}
|
||||
_LANG_TO_LC_CACHE: Dict[str, Dict[str, str]] = {} # type: ignore
|
||||
|
||||
_FASTTEXT_MODEL: Optional["fasttext.FastText._FastText"] = None # type: ignore
|
||||
"""fasttext model to predict laguage of a search term"""
|
||||
|
@ -214,7 +214,7 @@ def extract_text(xpath_results, allow_none: bool = False) -> Optional[str]:
|
|||
return result.strip()
|
||||
if isinstance(xpath_results, ElementBase):
|
||||
# it's a element
|
||||
text: str = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False)
|
||||
text: str = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False) # type: ignore
|
||||
text = text.strip().replace('\n', ' ')
|
||||
return ' '.join(text.split())
|
||||
if isinstance(xpath_results, (str, Number, bool)):
|
||||
|
@ -564,7 +564,7 @@ def eval_xpath_list(element: ElementBase, xpath_spec: XPathSpecType, min_len: Op
|
|||
return result
|
||||
|
||||
|
||||
def eval_xpath_getindex(elements: ElementBase, xpath_spec: XPathSpecType, index: int, default=_NOTSET):
|
||||
def eval_xpath_getindex(elements: ElementBase, xpath_spec: XPathSpecType, index: int, default=_NOTSET) -> ElementBase:
|
||||
"""Call eval_xpath_list then get one element using the index parameter.
|
||||
If the index does not exist, either raise an exception is default is not set,
|
||||
other return the default value (can be None).
|
||||
|
@ -599,7 +599,7 @@ def _get_fasttext_model() -> "fasttext.FastText._FastText": # type: ignore
|
|||
import fasttext # pylint: disable=import-outside-toplevel
|
||||
|
||||
# Monkey patch: prevent fasttext from showing a (useless) warning when loading a model.
|
||||
fasttext.FastText.eprint = lambda x: None
|
||||
fasttext.FastText.eprint = lambda x: None # type: ignore
|
||||
_FASTTEXT_MODEL = fasttext.load_model(str(data_dir / 'lid.176.ftz'))
|
||||
return _FASTTEXT_MODEL
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ def parse_pageno(form: Dict[str, str]) -> int:
|
|||
|
||||
def parse_lang(preferences: Preferences, form: Dict[str, str], raw_text_query: RawTextQuery) -> str:
|
||||
if is_locked('language'):
|
||||
return preferences.get_value('language')
|
||||
return preferences.get_value('language') # type: ignore
|
||||
# get language
|
||||
# set specific language if set on request, query or preferences
|
||||
# search with multiple languages is not supported (by most engines)
|
||||
|
@ -67,15 +67,15 @@ def parse_lang(preferences: Preferences, form: Dict[str, str], raw_text_query: R
|
|||
query_lang = preferences.get_value('language')
|
||||
|
||||
# check language
|
||||
if not VALID_LANGUAGE_CODE.match(query_lang) and query_lang != 'auto':
|
||||
if not VALID_LANGUAGE_CODE.match(query_lang) and query_lang != 'auto': # type: ignore
|
||||
raise SearxParameterException('language', query_lang)
|
||||
|
||||
return query_lang
|
||||
return query_lang # type: ignore
|
||||
|
||||
|
||||
def parse_safesearch(preferences: Preferences, form: Dict[str, str]) -> int:
|
||||
if is_locked('safesearch'):
|
||||
return preferences.get_value('safesearch')
|
||||
return preferences.get_value('safesearch') # type: ignore
|
||||
|
||||
if 'safesearch' in form:
|
||||
query_safesearch = form.get('safesearch')
|
||||
|
@ -87,10 +87,10 @@ def parse_safesearch(preferences: Preferences, form: Dict[str, str]) -> int:
|
|||
query_safesearch = preferences.get_value('safesearch')
|
||||
|
||||
# safesearch : second check
|
||||
if query_safesearch < 0 or query_safesearch > 2:
|
||||
if query_safesearch < 0 or query_safesearch > 2: # type: ignore
|
||||
raise SearxParameterException('safesearch', query_safesearch)
|
||||
|
||||
return query_safesearch
|
||||
return query_safesearch # type: ignore
|
||||
|
||||
|
||||
def parse_time_range(form: Dict[str, str]) -> Optional[str]:
|
||||
|
@ -145,7 +145,7 @@ def get_selected_categories(preferences: Preferences, form: Optional[Dict[str, s
|
|||
# (is stored in cookie)
|
||||
if not selected_categories:
|
||||
cookie_categories = preferences.get_value('categories')
|
||||
for ccateg in cookie_categories:
|
||||
for ccateg in cookie_categories: # type: ignore
|
||||
selected_categories.append(ccateg)
|
||||
|
||||
# if still no category is specified, using general
|
||||
|
|
|
@ -171,7 +171,7 @@ class ExtendedRequest(flask.Request):
|
|||
preferences: Preferences
|
||||
errors: List[str]
|
||||
user_plugins: List[Plugin]
|
||||
form: Dict[str, str]
|
||||
form: Dict[str, str] # type: ignore
|
||||
start_time: float
|
||||
render_time: float
|
||||
timings: List[Timing]
|
||||
|
@ -1161,6 +1161,21 @@ def stats():
|
|||
reliability_order = 1 - reliability_order
|
||||
return (reliability_order, key, engine_stat['name'])
|
||||
|
||||
technical_report = []
|
||||
for error in engine_reliabilities.get(selected_engine_name, {}).get('errors', []):
|
||||
technical_report.append(
|
||||
f"\
|
||||
Error: {error['exception_classname'] or error['log_message']} \
|
||||
Parameters: {error['log_parameters']} \
|
||||
File name: {error['filename'] }:{ error['line_no'] } \
|
||||
Error Function: {error['function']} \
|
||||
Code: {error['code']} \
|
||||
".replace(
|
||||
' ' * 12, ''
|
||||
).strip()
|
||||
)
|
||||
technical_report = ' '.join(technical_report)
|
||||
|
||||
engine_stats['time'] = sorted(engine_stats['time'], reverse=reverse, key=get_key)
|
||||
return render(
|
||||
# fmt: off
|
||||
|
@ -1170,6 +1185,7 @@ def stats():
|
|||
engine_reliabilities = engine_reliabilities,
|
||||
selected_engine_name = selected_engine_name,
|
||||
searx_git_branch = GIT_BRANCH,
|
||||
technical_report = technical_report,
|
||||
# fmt: on
|
||||
)
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ def get_search_query(
|
|||
) -> searx.search.SearchQuery:
|
||||
"""Get search results for the query"""
|
||||
if engine_categories is None:
|
||||
engine_categories = list(searx.engines.categories.keys())
|
||||
engine_categories = list(searx.engines.categories.keys()) # type: ignore
|
||||
try:
|
||||
category = args.category.decode('utf-8')
|
||||
except AttributeError:
|
||||
|
@ -68,7 +68,7 @@ def get_search_query(
|
|||
"language": args.lang,
|
||||
"time_range": args.timerange,
|
||||
}
|
||||
preferences = searx.preferences.Preferences(['simple'], engine_categories, searx.engines.engines, [])
|
||||
preferences = searx.preferences.Preferences(['simple'], engine_categories, searx.engines.engines, []) # type: ignore
|
||||
preferences.key_value_settings['safesearch'].parse(args.safesearch)
|
||||
|
||||
search_query = searx.webadapter.get_search_query_from_webapp(preferences, form)[0]
|
||||
|
@ -141,7 +141,7 @@ def parse_argument(
|
|||
Namespace(category='general', lang='all', pageno=1, query='rain', safesearch='0', timerange=None)
|
||||
""" # noqa: E501
|
||||
if not category_choices:
|
||||
category_choices = list(searx.engines.categories.keys())
|
||||
category_choices = list(searx.engines.categories.keys()) # type: ignore
|
||||
parser = argparse.ArgumentParser(description='Standalone searx.')
|
||||
parser.add_argument('query', type=str, help='Text query')
|
||||
parser.add_argument(
|
||||
|
@ -166,7 +166,7 @@ def parse_argument(
|
|||
if __name__ == '__main__':
|
||||
settings_engines = searx.settings['engines']
|
||||
searx.search.load_engines(settings_engines)
|
||||
engine_cs = list(searx.engines.categories.keys())
|
||||
engine_cs = list(searx.engines.categories.keys()) # type: ignore
|
||||
prog_args = parse_argument(category_choices=engine_cs)
|
||||
searx.search.initialize_network(settings_engines, searx.settings['outgoing'])
|
||||
searx.search.check_network_configuration()
|
||||
|
|
|
@ -178,7 +178,7 @@ def get_website_description(url, lang1, lang2=None):
|
|||
def initialize():
|
||||
global IDS, LANGUAGES_SPARQL
|
||||
searx.search.initialize()
|
||||
wikipedia_engine = searx.engines.engines['wikipedia']
|
||||
wikipedia_engine = searx.engines.engines['wikipedia'] # type: ignore
|
||||
|
||||
locale2lang = {'nl-BE': 'nl'}
|
||||
for sxng_ui_lang in LOCALE_NAMES:
|
||||
|
@ -196,7 +196,7 @@ def initialize():
|
|||
WIKIPEDIA_LANGUAGES[sxng_ui_lang] = wiki_lang
|
||||
|
||||
LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values()))
|
||||
for engine_name, engine in searx.engines.engines.items():
|
||||
for engine_name, engine in searx.engines.engines.items(): # type: ignore
|
||||
descriptions[engine_name] = {}
|
||||
wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
|
||||
if wikidata_id is not None:
|
||||
|
@ -209,7 +209,7 @@ def fetch_wikidata_descriptions():
|
|||
print('Fetching wikidata descriptions')
|
||||
searx.network.set_timeout_for_thread(60)
|
||||
result = wikidata.send_wikidata_query(
|
||||
SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
|
||||
SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL) # type: ignore
|
||||
)
|
||||
if result is not None:
|
||||
for binding in result['results']['bindings']:
|
||||
|
@ -230,7 +230,7 @@ def fetch_wikidata_descriptions():
|
|||
def fetch_wikipedia_descriptions():
|
||||
print('Fetching wikipedia descriptions')
|
||||
result = wikidata.send_wikidata_query(
|
||||
SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
|
||||
SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL) # type: ignore
|
||||
)
|
||||
if result is not None:
|
||||
for binding in result['results']['bindings']:
|
||||
|
@ -313,7 +313,7 @@ def fetch_website_description(engine_name, website):
|
|||
|
||||
def fetch_website_descriptions():
|
||||
print('Fetching website descriptions')
|
||||
for engine_name, engine in searx.engines.engines.items():
|
||||
for engine_name, engine in searx.engines.engines.items(): # type: ignore
|
||||
website = getattr(engine, "about", {}).get('website')
|
||||
if website is None and hasattr(engine, "search_url"):
|
||||
website = normalize_url(getattr(engine, "search_url"))
|
||||
|
|
|
@ -44,7 +44,7 @@ class SearxRobotLayer:
|
|||
[exe, webapp], stdout=subprocess.PIPE, stderr=subprocess.STDOUT
|
||||
)
|
||||
if hasattr(self.server.stdout, 'read1'):
|
||||
print(self.server.stdout.read1(1024).decode())
|
||||
print(self.server.stdout.read1(1024).decode()) # type: ignore
|
||||
|
||||
def tearDown(self):
|
||||
os.kill(self.server.pid, 9)
|
||||
|
@ -55,7 +55,11 @@ class SearxRobotLayer:
|
|||
def run_robot_tests(tests):
|
||||
print('Running {0} tests'.format(len(tests)))
|
||||
for test in tests:
|
||||
with Browser('firefox', headless=True, profile_preferences={'intl.accept_languages': 'en'}) as browser:
|
||||
with Browser(
|
||||
'firefox',
|
||||
headless=True,
|
||||
profile_preferences={'intl.accept_languages': 'en'},
|
||||
) as browser: # type: ignore
|
||||
test(browser)
|
||||
|
||||
|
||||
|
|
|
@ -168,8 +168,8 @@ commit '''
|
|||
git_log_engine.result_separator = '\n\ncommit '
|
||||
git_log_engine.delimiter = {}
|
||||
git_log_engine.parse_regex = {
|
||||
'commit': '\w{40}',
|
||||
'author': '[\w* ]* <\w*@?\w*\.?\w*>',
|
||||
'commit': r'\w{40}',
|
||||
'author': r'[\w* ]* <\w*@?\w*\.?\w*>',
|
||||
'date': 'Date: .*',
|
||||
'message': '\n\n.*$',
|
||||
}
|
||||
|
|
|
@ -10,13 +10,13 @@ from tests import SearxTestCase
|
|||
class TestXpathEngine(SearxTestCase):
|
||||
def test_request(self):
|
||||
xpath.search_url = 'https://url.com/{query}'
|
||||
xpath.categories = []
|
||||
xpath.categories = [] # type: ignore
|
||||
xpath.paging = False
|
||||
query = 'test_query'
|
||||
dicto = defaultdict(dict)
|
||||
params = xpath.request(query, dicto)
|
||||
self.assertIn('url', params)
|
||||
self.assertEquals('https://url.com/test_query', params['url'])
|
||||
self.assertEquals('https://url.com/test_query', params['url']) # type: ignore
|
||||
|
||||
xpath.search_url = 'https://url.com/q={query}&p={pageno}'
|
||||
xpath.paging = True
|
||||
|
@ -25,7 +25,7 @@ class TestXpathEngine(SearxTestCase):
|
|||
dicto['pageno'] = 1
|
||||
params = xpath.request(query, dicto)
|
||||
self.assertIn('url', params)
|
||||
self.assertEquals('https://url.com/q=test_query&p=1', params['url'])
|
||||
self.assertEquals('https://url.com/q=test_query&p=1', params['url']) # type: ignore
|
||||
|
||||
def test_response(self):
|
||||
# without results_xpath
|
||||
|
@ -41,7 +41,7 @@ class TestXpathEngine(SearxTestCase):
|
|||
response = mock.Mock(text='<html></html>')
|
||||
self.assertEqual(xpath.response(response), [])
|
||||
|
||||
html = u"""
|
||||
html = """
|
||||
<div>
|
||||
<div class="search_result">
|
||||
<a class="result" href="https://result1.com">Result 1</a>
|
||||
|
@ -76,7 +76,7 @@ class TestXpathEngine(SearxTestCase):
|
|||
self.assertFalse(results[0].get('is_onion', False))
|
||||
|
||||
# results are onion urls (no results_xpath)
|
||||
xpath.categories = ['onions']
|
||||
xpath.categories = ['onions'] # type: ignore
|
||||
results = xpath.response(response)
|
||||
self.assertTrue(results[0]['is_onion'])
|
||||
|
||||
|
@ -86,7 +86,7 @@ class TestXpathEngine(SearxTestCase):
|
|||
xpath.title_xpath = './/a[@class="result"]'
|
||||
xpath.content_xpath = './/p[@class="content"]'
|
||||
xpath.cached_xpath = None
|
||||
xpath.categories = []
|
||||
xpath.categories = [] # type: ignore
|
||||
|
||||
self.assertRaises(AttributeError, xpath.response, None)
|
||||
self.assertRaises(AttributeError, xpath.response, [])
|
||||
|
@ -117,6 +117,6 @@ class TestXpathEngine(SearxTestCase):
|
|||
self.assertFalse(results[0].get('is_onion', False))
|
||||
|
||||
# results are onion urls (with results_xpath)
|
||||
xpath.categories = ['onions']
|
||||
xpath.categories = ['onions'] # type: ignore
|
||||
results = xpath.response(response)
|
||||
self.assertTrue(results[0]['is_onion'])
|
||||
|
|
|
@ -54,7 +54,7 @@ class TestEnginesInit(SearxTestCase): # pylint: disable=missing-class-docstring
|
|||
self.assertIn('engine1', engines.engines)
|
||||
self.assertIn('engine2', engines.engines)
|
||||
self.assertIn('onions', engines.categories)
|
||||
self.assertIn('http://engine1.onion', engines.engines['engine1'].search_url)
|
||||
self.assertIn('http://engine1.onion', engines.engines['engine1'].search_url) # type: ignore
|
||||
self.assertEqual(engines.engines['engine1'].timeout, 120.0)
|
||||
|
||||
def test_missing_name_field(self):
|
||||
|
|
|
@ -42,10 +42,12 @@ class PluginStoreTest(SearxTestCase): # pylint: disable=missing-class-docstring
|
|||
request = Mock()
|
||||
store.call([], 'asdf', request, Mock())
|
||||
|
||||
self.assertFalse(testplugin.asdf.called) # pylint: disable=E1101
|
||||
# pylint: disable=no-member
|
||||
self.assertFalse(testplugin.asdf.called) # type: ignore
|
||||
|
||||
store.call([testplugin], 'asdf', request, Mock())
|
||||
self.assertTrue(testplugin.asdf.called) # pylint: disable=E1101
|
||||
self.assertTrue(testplugin.asdf.called) # type: ignore
|
||||
# pylint: enable=no-member
|
||||
|
||||
|
||||
class SelfIPTest(SearxTestCase): # pylint: disable=missing-class-docstring
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue