This commit is contained in:
Zhijie He 2025-04-14 06:14:31 +08:00 committed by GitHub
commit 0dc202872f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 246 additions and 18 deletions

View file

@ -41,6 +41,7 @@
- ``duckduckgo``
- ``google``
- ``mwmbl``
- ``naver``
- ``quark``
- ``qwant``
- ``seznam``

View file

@ -149,6 +149,21 @@ def mwmbl(query, _lang):
return [result for result in results if not result.startswith("go: ") and not result.startswith("search: ")]
def naver(query, _lang):
# Naver search autocompleter
url = f"https://ac.search.naver.com/nx/ac?{urlencode({'q': query, 'r_format': 'json', 'st': 0})}"
response = get(url)
results = []
if response.ok:
data = response.json()
if data.get('items'):
for item in data['items'][0]:
results.append(item[0])
return results
def qihu360search(query, _lang):
# 360Search search autocompleter
url = f"https://sug.so.360.cn/suggest?{urlencode({'format': 'json', 'word': query})}"
@ -300,6 +315,7 @@ backends = {
'duckduckgo': duckduckgo,
'google': google_complete,
'mwmbl': mwmbl,
'naver': naver,
'quark': quark,
'qwant': qwant,
'seznam': seznam,

200
searx/engines/naver.py Normal file
View file

@ -0,0 +1,200 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Naver search engine for searxng"""
from urllib.parse import urlencode
from lxml import html
from searx.exceptions import SearxEngineAPIException, SearxEngineXPathException
from searx.utils import (
eval_xpath_getindex,
eval_xpath_list,
eval_xpath,
extract_text,
extr,
html_to_text,
parse_duration_string,
js_variable_to_python,
)
# engine metadata
about = {
"website": "https://search.naver.com",
"wikidata_id": "Q485639",
"use_official_api": False,
"require_api_key": False,
"results": "HTML",
"language": "ko",
}
categories = []
paging = True
time_range_support = True
time_range_dict = {"day": "1d", "week": "1w", "month": "1m", "year": "1y"}
base_url = "https://search.naver.com"
naver_category = "general"
"""Naver supports general, images, news, videos search.
- ``general``: search for general
- ``images``: search for images
- ``news``: search for news
- ``videos``: search for videos
"""
# Naver cannot set the number of results on one page, set default value for paging
naver_category_dict = {
"general": {
"start": 15,
"where": "web",
},
"images": {
"start": 50,
"where": "image",
},
"news": {
"start": 10,
"where": "news",
},
"videos": {
"start": 48,
"where": "video",
},
}
def init(_):
if naver_category not in ('general', 'images', 'news', 'videos'):
raise SearxEngineAPIException(f"Unsupported category: {naver_category}")
def request(query, params):
query_params = {
"query": query,
}
if naver_category in naver_category_dict:
query_params["start"] = (params["pageno"] - 1) * naver_category_dict[naver_category]["start"] + 1
query_params["where"] = naver_category_dict[naver_category]["where"]
if params["time_range"] in time_range_dict:
query_params["nso"] = f"p:{time_range_dict[params['time_range']]}"
params["url"] = f"{base_url}/search.naver?{urlencode(query_params)}"
return params
def response(resp):
parsers = {'general': parse_general, 'images': parse_images, 'news': parse_news, 'videos': parse_videos}
return parsers[naver_category](resp.text)
def parse_general(data):
results = []
dom = html.fromstring(data)
for item in eval_xpath_list(dom, "//ul[contains(@class, 'lst_total')]/li[contains(@class, 'bx')]"):
thumbnail = None
try:
thumbnail = eval_xpath_getindex(item, ".//div[contains(@class, 'thumb_single')]//img/@data-lazysrc", 0)
except (ValueError, TypeError, SearxEngineXPathException):
pass
results.append(
{
"title": extract_text(eval_xpath(item, ".//a[contains(@class, 'link_tit')]")),
"url": eval_xpath_getindex(item, ".//a[contains(@class, 'link_tit')]/@href", 0),
"content": extract_text(
eval_xpath(item, ".//div[contains(@class, 'total_dsc_wrap')]//a[contains(@class, 'api_txt_lines')]")
),
"thumbnail": thumbnail,
}
)
return results
def parse_images(data):
results = []
match = extr(data, '<script>var imageSearchTabData=', '</script>')
if match:
json = js_variable_to_python(match.strip())
items = json.get('content', {}).get('items', [])
for item in items:
results.append(
{
"template": "images.html",
"url": item.get('link'),
"thumbnail_src": item.get('thumb'),
"img_src": item.get('originalUrl'),
"title": html_to_text(item.get('title')),
"source": item.get('source'),
"resolution": f"{item.get('orgWidth')} x {item.get('orgHeight')}",
}
)
return results
def parse_news(data):
results = []
dom = html.fromstring(data)
for item in eval_xpath_list(dom, "//ul[contains(@class, 'list_news')]/li[contains(@class, 'bx')]"):
thumbnail = None
try:
thumbnail = eval_xpath_getindex(item, ".//a[contains(@class, 'dsc_thumb')]/img/@data-lazysrc", 0)
except (ValueError, TypeError, SearxEngineXPathException):
pass
results.append(
{
"title": extract_text(eval_xpath(item, ".//a[contains(@class, 'news_tit')]")),
"url": eval_xpath_getindex(item, ".//a[contains(@class, 'news_tit')]/@href", 0),
"content": html_to_text(
extract_text(
eval_xpath(item, ".//div[contains(@class, 'news_dsc')]//a[contains(@class, 'api_txt_lines')]")
)
),
"thumbnail": thumbnail,
}
)
return results
def parse_videos(data):
results = []
dom = html.fromstring(data)
for item in eval_xpath_list(dom, "//li[contains(@class, 'video_item')]"):
thumbnail = None
try:
thumbnail = eval_xpath_getindex(item, ".//img[contains(@class, 'thumb')]/@src", 0)
except (ValueError, TypeError, SearxEngineXPathException):
pass
length = None
try:
length = parse_duration_string(extract_text(eval_xpath(item, ".//span[contains(@class, 'time')]")))
except (ValueError, TypeError):
pass
results.append(
{
"template": "videos.html",
"title": extract_text(eval_xpath(item, ".//a[contains(@class, 'info_title')]")),
"url": eval_xpath_getindex(item, ".//a[contains(@class, 'info_title')]/@href", 0),
"thumbnail": thumbnail,
'length': length,
}
)
return results

View file

@ -34,7 +34,7 @@ search:
# Filter results. 0: None, 1: Moderate, 2: Strict
safe_search: 0
# Existing autocomplete backends: "360search", "baidu", "brave", "dbpedia", "duckduckgo", "google", "yandex",
# "mwmbl", "seznam", "sogou", "stract", "swisscows", "quark", "qwant", "wikipedia" -
# "mwmbl", "naver", "seznam", "sogou", "stract", "swisscows", "quark", "qwant", "wikipedia" -
# leave blank to turn it off by default.
autocomplete: ""
# minimun characters to type before autocompleter starts
@ -2343,25 +2343,31 @@ engines:
disabled: true
- name: naver
shortcut: nvr
categories: [general, web]
engine: xpath
paging: true
search_url: https://search.naver.com/search.naver?where=webkr&sm=osp_hty&ie=UTF-8&query={query}&start={pageno}
url_xpath: //a[@class="link_tit"]/@href
title_xpath: //a[@class="link_tit"]
content_xpath: //div[@class="total_dsc_wrap"]/a
first_page_num: 1
page_size: 10
engine: naver
shortcut: nvr
disabled: true
- name: naver images
naver_category: images
categories: [images]
engine: naver
shortcut: nvri
disabled: true
- name: naver news
naver_category: news
categories: [news]
engine: naver
shortcut: nvrn
disabled: true
- name: naver videos
naver_category: videos
categories: [videos]
engine: naver
shortcut: nvrv
disabled: true
about:
website: https://www.naver.com/
wikidata_id: Q485639
official_api_documentation: https://developers.naver.com/docs/nmt/examples/
use_official_api: false
require_api_key: false
results: HTML
language: ko
- name: rubygems
shortcut: rbg

View file

@ -830,6 +830,11 @@ def js_variable_to_python(js_variable):
s = _JS_DECIMAL_RE.sub(":0.", s)
# replace the surogate character by colon
s = s.replace(chr(1), ':')
# replace single-quote followed by comma with double-quote and comma
# {"a": "\"12\"',"b": "13"}
# becomes
# {"a": "\"12\"","b": "13"}
s = s.replace("',", "\",")
# load the JSON and return the result
return json.loads(s)