From 6631f1130512f371332242a13a4d0672a70e803f Mon Sep 17 00:00:00 2001 From: Plague Doctor Date: Thu, 8 Apr 2021 10:21:54 +1000 Subject: [PATCH 1/7] Add new engine: SJP --- searx/engines/sjp.py | 82 ++++++++++++++++++++++++++++++++++++++++++++ searx/settings.yml | 8 +++++ 2 files changed, 90 insertions(+) create mode 100644 searx/engines/sjp.py diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py new file mode 100644 index 000000000..e5b4ad2f9 --- /dev/null +++ b/searx/engines/sjp.py @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Słownik Języka Polskiego (general) + +""" + +from lxml.html import fromstring +from searx import logger +from searx.utils import extract_text +from searx.raise_for_httperror import raise_for_httperror + +logger = logger.getChild('sjp engine') + +# about +about = { + "website": 'https://sjp.pwn.pl', + "wikidata_id": 'Q55117369', + "official_api_documentation": None, + "use_official_api": False, + "require_api_key": False, + "results": 'HTML', +} + +categories = ['general'] +paging = False + +URL = 'https://sjp.pwn.pl' +SEARCH_URL = URL + '/szukaj/{query}.html' + + +def request(query, params): + params['url'] = SEARCH_URL.format(query=query) + logger.debug(f"query_url --> {params['url']}") + return params + + +def response(resp): + results = [] + + raise_for_httperror(resp) + dom = fromstring(resp.text) + word = extract_text(dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div')) + + definitions = [] + for src in dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div/div/div/div'): + src_text = extract_text(src.xpath('./h1/span[@class="entry-head-title"]/text()')).strip() + + src_defs = [] + for def_item in src.xpath('./div/div[contains(@class, "ribbon-element")]'): + if def_item.xpath('./div[@class="znacz"]'): + sub_defs = [] + for def_sub_item in def_item.xpath('./div[@class="znacz"]'): + def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ') + sub_defs.append(def_sub_text) + src_defs.append((word, sub_defs)) + else: + def_text = extract_text(def_item).strip() + src_defs.append((def_text, '')) + + definitions.append((src_text, src_defs)) + + if not definitions: + return results + + infobox = '' + for src in definitions: + infobox += f"
{src[0]}" + infobox += "
" + + results.append({ + 'infobox': word, + 'content': infobox, + }) + + return results diff --git a/searx/settings.yml b/searx/settings.yml index 3428b2ec5..81ef737db 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -1271,6 +1271,14 @@ engines: categories: videos disabled : True + - name: słownik języka polskiego + engine: sjp + shortcut: sjp + base_url: https://sjp.pwn.pl/ + categories: general + timeout: 5.0 + disabled: True + # Doku engine lets you access to any Doku wiki instance: # A public one or a privete/corporate one. # - name : ubuntuwiki From dfc66ff0f0643c49dc9a19b716bb795b5f347d3f Mon Sep 17 00:00:00 2001 From: Robin Schneider Date: Sun, 11 Apr 2021 22:12:53 +0200 Subject: [PATCH 2/7] Fix grammar mistake in debug log output --- searx/engines/google.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searx/engines/google.py b/searx/engines/google.py index 8c20029a3..8f5b700eb 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -243,7 +243,7 @@ def response(resp): if answer: results.append({'answer': ' '.join(answer)}) else: - logger.debug("did not found 'answer'") + logger.debug("did not find 'answer'") # results --> number_of_results try: From 4652ef0f06235d18cd5ccae7735f5b61d21dba77 Mon Sep 17 00:00:00 2001 From: Mikayel Mardanyan Petrosyan <48054735+mikamp116@users.noreply.github.com> Date: Tue, 13 Apr 2021 22:26:45 +0200 Subject: [PATCH 3/7] Update standalone_searx.py Fix bug for 'FileNotFoundError: [Errno 2] No such file or directory: 'utils/standalone_searx.py' ' in example to run standalone_searx.py from python --- searx_extra/standalone_searx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searx_extra/standalone_searx.py b/searx_extra/standalone_searx.py index f52b7e80c..b30762d3f 100755 --- a/searx_extra/standalone_searx.py +++ b/searx_extra/standalone_searx.py @@ -31,7 +31,7 @@ Example to run it from python: ... engine_cs = list(searx.engines.categories.keys()) ... # load module ... spec = importlib.util.spec_from_file_location( -... 'utils.standalone_searx', 'utils/standalone_searx.py') +... 'utils.standalone_searx', 'searx_extra/standalone_searx.py') ... sas = importlib.util.module_from_spec(spec) ... spec.loader.exec_module(sas) ... # use function from module From 4d3c399ee985385e888ba068d973e4653d9f50b9 Mon Sep 17 00:00:00 2001 From: Kyle Anthony Williams Date: Sun, 27 Dec 2020 23:46:11 -0500 Subject: [PATCH 4/7] [feat] add bandcamp engine --- searx/engines/bandcamp.py | 72 +++++++++++++++++++++++++++++++++++++++ searx/settings.yml | 5 +++ 2 files changed, 77 insertions(+) create mode 100644 searx/engines/bandcamp.py diff --git a/searx/engines/bandcamp.py b/searx/engines/bandcamp.py new file mode 100644 index 000000000..b1b5214fe --- /dev/null +++ b/searx/engines/bandcamp.py @@ -0,0 +1,72 @@ +""" +Bandcamp (Music) + +@website https://bandcamp.com/ +@provide-api no +@results HTML +@parse url, title, content, publishedDate, embedded, thumbnail +""" + +from urllib.parse import urlencode, urlparse, parse_qs +from dateutil.parser import parse as dateparse +from lxml import html +from searx.utils import extract_text + +categories = ['music'] +paging = True + +base_url = "https://bandcamp.com/" +search_string = search_string = 'search?{query}&page={page}' +embedded_url = '''''' + + +def request(query, params): + '''pre-request callback + params: + method : POST/GET + headers : {} + data : {} # if method == POST + url : '' + category: 'search category' + pageno : 1 # number of the requested page + ''' + + search_path = search_string.format( + query=urlencode({'q': query}), + page=params['pageno']) + + params['url'] = base_url + search_path + + return params + + +def response(resp): + '''post-response callback + resp: requests response object + ''' + results = [] + tree = html.fromstring(resp.text) + search_results = tree.xpath('//li[contains(@class, "searchresult")]') + for result in search_results: + link = result.xpath('//div[@class="itemurl"]/a')[0] + result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0] + title = result.xpath('//div[@class="heading"]/a/text()')[0] + date = dateparse(result.xpath('//div[@class="released"]/text()')[0].replace("released ", "")) + content = result.xpath('//div[@class="subhead"]/text()')[0] + thumbnail = result.xpath('//div[@class="art"]/img/@src')[0] + new_result = { + "url": extract_text(link), + "title": title, + "content": content, + "publishedDate": date, + "thumbnail": thumbnail, + } + if "album" in result.classes: + new_result["embedded"] = embedded_url.format(type='album', result_id=result_id) + elif "track" in result.classes: + new_result["embedded"] = embedded_url.format(type='track', result_id=result_id) + results.append(new_result) + return results diff --git a/searx/settings.yml b/searx/settings.yml index c289cde5c..058867092 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -197,6 +197,11 @@ engines: # engine : base # shortcut : bs + - name: bandcamp + engine: bandcamp + shortcut: bc + categories: music + - name : wikipedia engine : wikipedia shortcut : wp From 062d589f865cf736620f4ff5d6a8476dfe980ba7 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Tue, 29 Dec 2020 15:49:41 +0100 Subject: [PATCH 5/7] [fix] xpath expressions to grap all items from bandcamp's response I also found some items missing a thumbnail and I used text_extract for content and title, to remove unneeded whitespaces. BTW: added bandcamp's favicon Signed-off-by: Markus Heiser --- searx/engines/bandcamp.py | 15 ++++++++------- searx/static/themes/oscar/img/icons/bandcamp.png | Bin 0 -> 919 bytes 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 searx/static/themes/oscar/img/icons/bandcamp.png diff --git a/searx/engines/bandcamp.py b/searx/engines/bandcamp.py index b1b5214fe..dafb3ee16 100644 --- a/searx/engines/bandcamp.py +++ b/searx/engines/bandcamp.py @@ -51,19 +51,20 @@ def response(resp): tree = html.fromstring(resp.text) search_results = tree.xpath('//li[contains(@class, "searchresult")]') for result in search_results: - link = result.xpath('//div[@class="itemurl"]/a')[0] + link = result.xpath('.//div[@class="itemurl"]/a')[0] result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0] - title = result.xpath('//div[@class="heading"]/a/text()')[0] + title = result.xpath('.//div[@class="heading"]/a/text()') date = dateparse(result.xpath('//div[@class="released"]/text()')[0].replace("released ", "")) - content = result.xpath('//div[@class="subhead"]/text()')[0] - thumbnail = result.xpath('//div[@class="art"]/img/@src')[0] + content = result.xpath('.//div[@class="subhead"]/text()') new_result = { "url": extract_text(link), - "title": title, - "content": content, + "title": extract_text(title), + "content": extract_text(content), "publishedDate": date, - "thumbnail": thumbnail, } + thumbnail = result.xpath('.//div[@class="art"]/img/@src') + if thumbnail: + new_result['thumbnail'] = thumbnail[0] if "album" in result.classes: new_result["embedded"] = embedded_url.format(type='album', result_id=result_id) elif "track" in result.classes: diff --git a/searx/static/themes/oscar/img/icons/bandcamp.png b/searx/static/themes/oscar/img/icons/bandcamp.png new file mode 100644 index 0000000000000000000000000000000000000000..2de405afe839369f85250a04b21595bdf646aee5 GIT binary patch literal 919 zcmeAS@N?(olHy`uVBq!ia0vp^3LwnE3?yBabR7dyEa{HEjtmSN`?>!lvVtU&J%W50 z7^>757#dm_7=8hT8eT9klo~KFyh>nTu$sZZAYL$MSD+10;!J=~h%1nmU2s?qh!!4^ zTW~~n{vql4hvXI?m0x&7X8s|W1&8Do9g$rKlszi5;IRCnqY8_U$Spc5x9~7f7eNG6 zPf!iW_Jv2VD4Tx>iy+KWpkvTv|Ns9lH9gV;=pUz&AirQBjSiSu7(-JM5|g8S{!U^% z`SbU$Tm12V(z$M2WGfC6d-m@DW4kkx@~3sPjb<^57F{~QH95ZxXar-Dx4VmT`UT!3 zAcwug)7O>#F^eESm*kb7K?i}-;+`&!Ar`042Hgx|G8DLD*7acRNhT(4p*)wAiNPTy z|NbwYqk8&uT9&--Z^h7+FTX5Fx8nH6&yb<)u>7mALW&G>o$tvVBca36IAhucpB+cIUiYeOc*-Tbz~mk)cS^ZWK;xt0=dV9^^Jh%uh*`Vg zD{IoNH9udP3Y$zjG(lH4A&*baj=R`1;oH%7qQWN24)N>0t*mOdyzjSfsbasomd|$s zk?mL7_Z7FqO}Dyfy8Y$yP^)LFH6uD!9()$$wD;N5HBp+gfB$Jv z)-$#__NVTjUIPp@)e_f;l9a@fRIB8oR3OD*WMF8iYha>lXdYr{YGr6(Wn`*tU}R-r zuxGxXK8l9i{FKbJO57UG9{M&Js3E*6B%&lJv0R~`C_gPTCsm=OvLICRr~FT!@q8M@z^u%zm&`4!EbKj5gjra@rNQKI3bXR&5QWn>uADe>=7`J@ g_R|d>3%v9gUWp5qd~!0K3bca3)78&qol`;+0IuU@#sB~S literal 0 HcmV?d00001 From f637bfc63530c36ff7d92284807f5fff5ff58bf9 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Tue, 29 Dec 2020 15:58:18 +0100 Subject: [PATCH 6/7] [mod] oscar's "default" template should make use of result.thumbnail Some engine do have set result.img_src, other return a result.thumbnail. If result.img_src is unset and a result.thumbnail is given, show it to the UI. Signed-off-by: Markus Heiser --- searx/templates/oscar/result_templates/default.html | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/searx/templates/oscar/result_templates/default.html b/searx/templates/oscar/result_templates/default.html index d743f928e..53cfee5cb 100644 --- a/searx/templates/oscar/result_templates/default.html +++ b/searx/templates/oscar/result_templates/default.html @@ -13,10 +13,10 @@ {%- endif -%} -{%- if result.img_src -%} +{%- if result.img_src or result.thumbnail -%}
{{- "" -}}
{{- "" -}} - + {%- if result.content %}

{{ result.content|safe }}

{% endif -%}
{{- "" -}}
From d275d7a35e2fa2544304bb6ee1577a1421c64c2e Mon Sep 17 00:00:00 2001 From: Plague Doctor Date: Fri, 16 Apr 2021 12:23:27 +1000 Subject: [PATCH 7/7] Code refactoring. --- searx/engines/sjp.py | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py index e5b4ad2f9..eff7b7092 100644 --- a/searx/engines/sjp.py +++ b/searx/engines/sjp.py @@ -26,6 +26,11 @@ paging = False URL = 'https://sjp.pwn.pl' SEARCH_URL = URL + '/szukaj/{query}.html' +word_xpath = '//div[@class="query"]' +dict_xpath = ['//div[@class="wyniki sjp-so-wyniki sjp-so-anchor"]', + '//div[@class="wyniki sjp-wyniki sjp-anchor"]', + '//div[@class="wyniki sjp-doroszewski-wyniki sjp-doroszewski-anchor"]'] + def request(query, params): params['url'] = SEARCH_URL.format(query=query) @@ -38,25 +43,30 @@ def response(resp): raise_for_httperror(resp) dom = fromstring(resp.text) - word = extract_text(dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div')) + word = extract_text(dom.xpath(word_xpath)) definitions = [] - for src in dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div/div/div/div'): - src_text = extract_text(src.xpath('./h1/span[@class="entry-head-title"]/text()')).strip() - src_defs = [] - for def_item in src.xpath('./div/div[contains(@class, "ribbon-element")]'): - if def_item.xpath('./div[@class="znacz"]'): - sub_defs = [] - for def_sub_item in def_item.xpath('./div[@class="znacz"]'): - def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ') - sub_defs.append(def_sub_text) - src_defs.append((word, sub_defs)) - else: - def_text = extract_text(def_item).strip() - src_defs.append((def_text, '')) + for dict_src in dict_xpath: + for src in dom.xpath(dict_src): + src_text = extract_text(src.xpath('.//span[@class="entry-head-title"]/text()')).strip() - definitions.append((src_text, src_defs)) + src_defs = [] + for def_item in src.xpath('.//div[contains(@class, "ribbon-element")]'): + if def_item.xpath('./div[@class="znacz"]'): + sub_defs = [] + for def_sub_item in def_item.xpath('./div[@class="znacz"]'): + def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ') + sub_defs.append(def_sub_text) + src_defs.append((word, sub_defs)) + else: + def_text = extract_text(def_item).strip() + def_link = def_item.xpath('./span/a/@href') + if 'doroszewski' in def_link[0]: + def_text = f"{def_text}" + src_defs.append((def_text, '')) + + definitions.append((src_text, src_defs)) if not definitions: return results