From 6631f1130512f371332242a13a4d0672a70e803f Mon Sep 17 00:00:00 2001 From: Plague Doctor Date: Thu, 8 Apr 2021 10:21:54 +1000 Subject: [PATCH 1/2] Add new engine: SJP --- searx/engines/sjp.py | 82 ++++++++++++++++++++++++++++++++++++++++++++ searx/settings.yml | 8 +++++ 2 files changed, 90 insertions(+) create mode 100644 searx/engines/sjp.py diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py new file mode 100644 index 000000000..e5b4ad2f9 --- /dev/null +++ b/searx/engines/sjp.py @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Słownik Języka Polskiego (general) + +""" + +from lxml.html import fromstring +from searx import logger +from searx.utils import extract_text +from searx.raise_for_httperror import raise_for_httperror + +logger = logger.getChild('sjp engine') + +# about +about = { + "website": 'https://sjp.pwn.pl', + "wikidata_id": 'Q55117369', + "official_api_documentation": None, + "use_official_api": False, + "require_api_key": False, + "results": 'HTML', +} + +categories = ['general'] +paging = False + +URL = 'https://sjp.pwn.pl' +SEARCH_URL = URL + '/szukaj/{query}.html' + + +def request(query, params): + params['url'] = SEARCH_URL.format(query=query) + logger.debug(f"query_url --> {params['url']}") + return params + + +def response(resp): + results = [] + + raise_for_httperror(resp) + dom = fromstring(resp.text) + word = extract_text(dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div')) + + definitions = [] + for src in dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div/div/div/div'): + src_text = extract_text(src.xpath('./h1/span[@class="entry-head-title"]/text()')).strip() + + src_defs = [] + for def_item in src.xpath('./div/div[contains(@class, "ribbon-element")]'): + if def_item.xpath('./div[@class="znacz"]'): + sub_defs = [] + for def_sub_item in def_item.xpath('./div[@class="znacz"]'): + def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ') + sub_defs.append(def_sub_text) + src_defs.append((word, sub_defs)) + else: + def_text = extract_text(def_item).strip() + src_defs.append((def_text, '')) + + definitions.append((src_text, src_defs)) + + if not definitions: + return results + + infobox = '' + for src in definitions: + infobox += f"
{src[0]}" + infobox += "
" + + results.append({ + 'infobox': word, + 'content': infobox, + }) + + return results diff --git a/searx/settings.yml b/searx/settings.yml index 3428b2ec5..81ef737db 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -1271,6 +1271,14 @@ engines: categories: videos disabled : True + - name: słownik języka polskiego + engine: sjp + shortcut: sjp + base_url: https://sjp.pwn.pl/ + categories: general + timeout: 5.0 + disabled: True + # Doku engine lets you access to any Doku wiki instance: # A public one or a privete/corporate one. # - name : ubuntuwiki From d275d7a35e2fa2544304bb6ee1577a1421c64c2e Mon Sep 17 00:00:00 2001 From: Plague Doctor Date: Fri, 16 Apr 2021 12:23:27 +1000 Subject: [PATCH 2/2] Code refactoring. --- searx/engines/sjp.py | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py index e5b4ad2f9..eff7b7092 100644 --- a/searx/engines/sjp.py +++ b/searx/engines/sjp.py @@ -26,6 +26,11 @@ paging = False URL = 'https://sjp.pwn.pl' SEARCH_URL = URL + '/szukaj/{query}.html' +word_xpath = '//div[@class="query"]' +dict_xpath = ['//div[@class="wyniki sjp-so-wyniki sjp-so-anchor"]', + '//div[@class="wyniki sjp-wyniki sjp-anchor"]', + '//div[@class="wyniki sjp-doroszewski-wyniki sjp-doroszewski-anchor"]'] + def request(query, params): params['url'] = SEARCH_URL.format(query=query) @@ -38,25 +43,30 @@ def response(resp): raise_for_httperror(resp) dom = fromstring(resp.text) - word = extract_text(dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div')) + word = extract_text(dom.xpath(word_xpath)) definitions = [] - for src in dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div/div/div/div'): - src_text = extract_text(src.xpath('./h1/span[@class="entry-head-title"]/text()')).strip() - src_defs = [] - for def_item in src.xpath('./div/div[contains(@class, "ribbon-element")]'): - if def_item.xpath('./div[@class="znacz"]'): - sub_defs = [] - for def_sub_item in def_item.xpath('./div[@class="znacz"]'): - def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ') - sub_defs.append(def_sub_text) - src_defs.append((word, sub_defs)) - else: - def_text = extract_text(def_item).strip() - src_defs.append((def_text, '')) + for dict_src in dict_xpath: + for src in dom.xpath(dict_src): + src_text = extract_text(src.xpath('.//span[@class="entry-head-title"]/text()')).strip() - definitions.append((src_text, src_defs)) + src_defs = [] + for def_item in src.xpath('.//div[contains(@class, "ribbon-element")]'): + if def_item.xpath('./div[@class="znacz"]'): + sub_defs = [] + for def_sub_item in def_item.xpath('./div[@class="znacz"]'): + def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ') + sub_defs.append(def_sub_text) + src_defs.append((word, sub_defs)) + else: + def_text = extract_text(def_item).strip() + def_link = def_item.xpath('./span/a/@href') + if 'doroszewski' in def_link[0]: + def_text = f"{def_text}" + src_defs.append((def_text, '')) + + definitions.append((src_text, src_defs)) if not definitions: return results