mirror of
https://github.com/searxng/searxng.git
synced 2025-03-12 23:42:40 +00:00
[feat] add 360search engine for searxng
Co-authored-by: Bnyro <bnyro@tutanota.com>
This commit is contained in:
parent
80f5fad16e
commit
71d1504e57
5 changed files with 159 additions and 1 deletions
|
@ -33,6 +33,7 @@
|
||||||
``autocomplete``:
|
``autocomplete``:
|
||||||
Existing autocomplete backends, leave blank to turn it off.
|
Existing autocomplete backends, leave blank to turn it off.
|
||||||
|
|
||||||
|
- ``360search``
|
||||||
- ``baidu``
|
- ``baidu``
|
||||||
- ``brave``
|
- ``brave``
|
||||||
- ``dbpedia``
|
- ``dbpedia``
|
||||||
|
|
|
@ -148,6 +148,21 @@ def mwmbl(query, _lang):
|
||||||
return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
|
return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
|
||||||
|
|
||||||
|
|
||||||
|
def qihu360search(query, _lang):
|
||||||
|
# 360Search search autocompleter
|
||||||
|
url = f"https://sug.so.360.cn/suggest?{urlencode({'format': 'json', 'word': query})}"
|
||||||
|
response = get(url)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
|
if response.ok:
|
||||||
|
data = response.json()
|
||||||
|
if 'result' in data:
|
||||||
|
for item in data['result']:
|
||||||
|
results.append(item['word'])
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
def seznam(query, _lang):
|
def seznam(query, _lang):
|
||||||
# seznam search autocompleter
|
# seznam search autocompleter
|
||||||
url = 'https://suggest.seznam.cz/fulltext/cs?{query}'
|
url = 'https://suggest.seznam.cz/fulltext/cs?{query}'
|
||||||
|
@ -246,6 +261,7 @@ def yandex(query, _lang):
|
||||||
|
|
||||||
|
|
||||||
backends = {
|
backends = {
|
||||||
|
'360search': qihu360search,
|
||||||
'baidu': baidu,
|
'baidu': baidu,
|
||||||
'brave': brave,
|
'brave': brave,
|
||||||
'dbpedia': dbpedia,
|
'dbpedia': dbpedia,
|
||||||
|
|
67
searx/engines/360search.py
Normal file
67
searx/engines/360search.py
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
# pylint: disable=invalid-name
|
||||||
|
"""360Search search engine for searxng"""
|
||||||
|
|
||||||
|
from urllib.parse import urlencode
|
||||||
|
from lxml import html
|
||||||
|
|
||||||
|
from searx.utils import extract_text
|
||||||
|
|
||||||
|
# Metadata
|
||||||
|
about = {
|
||||||
|
"website": "https://www.so.com/",
|
||||||
|
"wikidata_id": "Q10846064",
|
||||||
|
"use_official_api": False,
|
||||||
|
"require_api_key": False,
|
||||||
|
"results": "HTML",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Engine Configuration
|
||||||
|
categories = ["general"]
|
||||||
|
paging = True
|
||||||
|
time_range_support = True
|
||||||
|
|
||||||
|
time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
|
||||||
|
|
||||||
|
# Base URL
|
||||||
|
base_url = "https://www.so.com"
|
||||||
|
|
||||||
|
|
||||||
|
def request(query, params):
|
||||||
|
query_params = {
|
||||||
|
"pn": params["pageno"],
|
||||||
|
"q": query,
|
||||||
|
}
|
||||||
|
|
||||||
|
if time_range_dict.get(params['time_range']):
|
||||||
|
query_params["adv_t"] = time_range_dict.get(params['time_range'])
|
||||||
|
|
||||||
|
params["url"] = f"{base_url}/s?{urlencode(query_params)}"
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def response(resp):
|
||||||
|
dom = html.fromstring(resp.text)
|
||||||
|
results = []
|
||||||
|
|
||||||
|
for item in dom.xpath('//li[contains(@class, "res-list")]'):
|
||||||
|
title = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a'))
|
||||||
|
|
||||||
|
url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@data-mdurl'))
|
||||||
|
if not url:
|
||||||
|
url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@href'))
|
||||||
|
|
||||||
|
content = extract_text(item.xpath('.//p[@class="res-desc"]'))
|
||||||
|
if not content:
|
||||||
|
content = extract_text(item.xpath('.//span[@class="res-list-summary"]'))
|
||||||
|
|
||||||
|
if title and url:
|
||||||
|
results.append(
|
||||||
|
{
|
||||||
|
"title": title,
|
||||||
|
"url": url,
|
||||||
|
"content": content,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
64
searx/engines/360search_videos.py
Normal file
64
searx/engines/360search_videos.py
Normal file
|
@ -0,0 +1,64 @@
|
||||||
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
# pylint: disable=invalid-name
|
||||||
|
"""360Search-Videos: A search engine for retrieving videos from 360Search."""
|
||||||
|
|
||||||
|
from urllib.parse import urlencode
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
from searx.exceptions import SearxEngineAPIException
|
||||||
|
from searx.utils import html_to_text
|
||||||
|
|
||||||
|
about = {
|
||||||
|
"website": "https://tv.360kan.com/",
|
||||||
|
"use_official_api": False,
|
||||||
|
"require_api_key": False,
|
||||||
|
"results": "JSON",
|
||||||
|
}
|
||||||
|
|
||||||
|
paging = True
|
||||||
|
results_per_page = 10
|
||||||
|
categories = ["videos"]
|
||||||
|
|
||||||
|
base_url = "https://tv.360kan.com"
|
||||||
|
|
||||||
|
|
||||||
|
def request(query, params):
|
||||||
|
query_params = {"count": 10, "q": query, "start": params["pageno"] * 10}
|
||||||
|
|
||||||
|
params["url"] = f"{base_url}/v1/video/list?{urlencode(query_params)}"
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def response(resp):
|
||||||
|
try:
|
||||||
|
data = resp.json()
|
||||||
|
except Exception as e:
|
||||||
|
raise SearxEngineAPIException(f"Invalid response: {e}") from e
|
||||||
|
results = []
|
||||||
|
|
||||||
|
if "data" not in data or "result" not in data["data"]:
|
||||||
|
raise SearxEngineAPIException("Invalid response")
|
||||||
|
|
||||||
|
for entry in data["data"]["result"]:
|
||||||
|
if not entry.get("title") or not entry.get("play_url"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
published_date = None
|
||||||
|
if entry.get("publish_time"):
|
||||||
|
try:
|
||||||
|
published_date = datetime.fromtimestamp(int(entry["publish_time"]))
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
published_date = None
|
||||||
|
|
||||||
|
results.append(
|
||||||
|
{
|
||||||
|
'url': entry["play_url"],
|
||||||
|
'title': html_to_text(entry["title"]),
|
||||||
|
'content': html_to_text(entry["description"]),
|
||||||
|
'template': 'videos.html',
|
||||||
|
'publishedDate': published_date,
|
||||||
|
'thumbnail': entry["cover_img"],
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return results
|
|
@ -33,7 +33,7 @@ brand:
|
||||||
search:
|
search:
|
||||||
# Filter results. 0: None, 1: Moderate, 2: Strict
|
# Filter results. 0: None, 1: Moderate, 2: Strict
|
||||||
safe_search: 0
|
safe_search: 0
|
||||||
# Existing autocomplete backends: "baidu", "brave", "dbpedia", "duckduckgo", "google", "yandex",
|
# Existing autocomplete backends: "360search", "baidu", "brave", "dbpedia", "duckduckgo", "google", "yandex",
|
||||||
# "mwmbl", "seznam", "stract", "swisscows", "qwant", "wikipedia" -
|
# "mwmbl", "seznam", "stract", "swisscows", "qwant", "wikipedia" -
|
||||||
# leave blank to turn it off by default.
|
# leave blank to turn it off by default.
|
||||||
autocomplete: ""
|
autocomplete: ""
|
||||||
|
@ -337,6 +337,16 @@ categories_as_tabs:
|
||||||
social media:
|
social media:
|
||||||
|
|
||||||
engines:
|
engines:
|
||||||
|
- name: 360search
|
||||||
|
engine: 360search
|
||||||
|
shortcut: 360so
|
||||||
|
disabled: true
|
||||||
|
|
||||||
|
- name: 360search videos
|
||||||
|
engine: 360search_videos
|
||||||
|
shortcut: 360sov
|
||||||
|
disabled: true
|
||||||
|
|
||||||
- name: 9gag
|
- name: 9gag
|
||||||
engine: 9gag
|
engine: 9gag
|
||||||
shortcut: 9g
|
shortcut: 9g
|
||||||
|
|
Loading…
Reference in a new issue