searxng/searx/engines/dictzone.py

61 lines
1.4 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
2016-09-06 14:36:04 +00:00
"""
Dictzone
"""
from urllib.parse import urljoin
2016-09-06 09:47:27 +00:00
from lxml import html
from searx.utils import eval_xpath
2016-09-06 09:47:27 +00:00
# about
about = {
"website": 'https://dictzone.com/',
"wikidata_id": None,
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
engine_type = 'online_dictionnary'
categories = ['general']
url = 'https://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}'
2016-09-06 09:47:27 +00:00
weight = 100
results_xpath = './/table[@id="r"]/tr'
https_support = True
2016-09-06 09:47:27 +00:00
def request(query, params):
params['url'] = url.format(from_lang=params['from_lang'][2],
to_lang=params['to_lang'][2],
query=params['query'])
2016-09-06 09:47:27 +00:00
return params
2016-09-06 09:47:27 +00:00
def response(resp):
results = []
dom = html.fromstring(resp.text)
for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
2016-09-06 09:47:27 +00:00
try:
from_result, to_results_raw = eval_xpath(result, './td')
2016-09-06 09:47:27 +00:00
except:
continue
to_results = []
for to_result in eval_xpath(to_results_raw, './p/a'):
2016-09-06 09:47:27 +00:00
t = to_result.text_content()
if t.strip():
to_results.append(to_result.text_content())
results.append({
2016-09-06 10:37:26 +00:00
'url': urljoin(resp.url, '?%d' % k),
'title': from_result.text_content(),
'content': '; '.join(to_results)
2016-09-06 09:47:27 +00:00
})
return results