mirror of
https://github.com/searxng/searxng.git
synced 2024-11-14 14:11:05 +00:00
2499899554
Partial reverse engineering of the Google engines including a improved language and region handling based on the engine.traits_v1 data. When ever possible the implementations of the Google engines try to make use of the async REST APIs. The get_lang_info() has been generalized to a get_google_info() function / especially the region handling has been improved by adding the cr parameter. searx/data/engine_traits.json Add data type "traits_v1" generated by the fetch_traits() functions from: - Google (WEB), - Google images, - Google news, - Google scholar and - Google videos and remove data from obsolete data type "supported_languages". A traits.custom type that maps region codes to *supported_domains* is fetched from https://www.google.com/supported_domains searx/autocomplete.py: Reversed engineered autocomplete from Google WEB. Supports Google's languages and subdomains. The old API suggestqueries.google.com/complete has been replaced by the async REST API: https://{subdomain}/complete/search?{args} searx/engines/google.py Reverse engineering and extensive testing .. - fetch_traits(): Fetch languages & regions from Google properties. - always use the async REST API (formally known as 'use_mobile_ui') - use *supported_domains* from traits - improved the result list by fetching './/div[@data-content-feature]' and parsing the type of the various *content features* --> thumbnails are added searx/engines/google_images.py Reverse engineering and extensive testing .. - fetch_traits(): Fetch languages & regions from Google properties. - use *supported_domains* from traits - if exists, freshness_date is added to the result - issue 1864: result list has been improved a lot (due to the new cr parameter) searx/engines/google_news.py Reverse engineering and extensive testing .. - fetch_traits(): Fetch languages & regions from Google properties. *supported_domains* is not needed but a ceid list has been added. - different region handling compared to Google WEB - fixed for various languages & regions (due to the new ceid parameter) / avoid CONSENT page - Google News do no longer support time range - result list has been fixed: XPath of pub_date and pub_origin searx/engines/google_videos.py - fetch_traits(): Fetch languages & regions from Google properties. - use *supported_domains* from traits - add paging support - implement a async request ('asearch': 'arc' & 'async': 'use_ac:true,_fmt:html') - simplified code (thanks to '_fmt:html' request) - issue 1359: fixed xpath of video length data searx/engines/google_scholar.py - fetch_traits(): Fetch languages & regions from Google properties. - use *supported_domains* from traits - request(): include patents & citations - response(): fixed CAPTCHA detection (Scholar has its own CATCHA manager) - hardening XPath to iterate over results - fixed XPath of pub_type (has been change from gs_ct1 to gs_cgt2 class) - issue 1769 fixed: new request implementation is no longer incompatible Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
139 lines
3.6 KiB
Python
139 lines
3.6 KiB
Python
# SPDX-License-Identifier: AGPL-3.0-or-later
|
|
# lint: pylint
|
|
"""This is the implementation of the Google Videos engine.
|
|
|
|
.. admonition:: Content-Security-Policy (CSP)
|
|
|
|
This engine needs to allow images from the `data URLs`_ (prefixed with the
|
|
``data:`` scheme)::
|
|
|
|
Header set Content-Security-Policy "img-src 'self' data: ;"
|
|
|
|
.. _data URLs:
|
|
https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs
|
|
|
|
"""
|
|
|
|
from typing import TYPE_CHECKING
|
|
|
|
from urllib.parse import urlencode
|
|
from lxml import html
|
|
|
|
from searx.utils import (
|
|
eval_xpath,
|
|
eval_xpath_list,
|
|
eval_xpath_getindex,
|
|
extract_text,
|
|
)
|
|
|
|
from searx.engines.google import fetch_traits # pylint: disable=unused-import
|
|
from searx.engines.google import (
|
|
get_google_info,
|
|
time_range_dict,
|
|
filter_mapping,
|
|
suggestion_xpath,
|
|
detect_google_sorry,
|
|
)
|
|
from searx.enginelib.traits import EngineTraits
|
|
|
|
if TYPE_CHECKING:
|
|
import logging
|
|
|
|
logger: logging.Logger
|
|
|
|
traits: EngineTraits
|
|
|
|
# about
|
|
about = {
|
|
"website": 'https://www.google.com',
|
|
"wikidata_id": 'Q219885',
|
|
"official_api_documentation": 'https://developers.google.com/custom-search',
|
|
"use_official_api": False,
|
|
"require_api_key": False,
|
|
"results": 'HTML',
|
|
}
|
|
|
|
# engine dependent config
|
|
|
|
categories = ['videos', 'web']
|
|
paging = True
|
|
language_support = True
|
|
time_range_support = True
|
|
safesearch = True
|
|
|
|
|
|
def request(query, params):
|
|
"""Google-Video search request"""
|
|
|
|
google_info = get_google_info(params, traits)
|
|
|
|
query_url = (
|
|
'https://'
|
|
+ google_info['subdomain']
|
|
+ '/search'
|
|
+ "?"
|
|
+ urlencode(
|
|
{
|
|
'q': query,
|
|
'tbm': "vid",
|
|
'start': 10 * params['pageno'],
|
|
**google_info['params'],
|
|
'asearch': 'arc',
|
|
'async': 'use_ac:true,_fmt:html',
|
|
}
|
|
)
|
|
)
|
|
|
|
if params['time_range'] in time_range_dict:
|
|
query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
|
|
if params['safesearch']:
|
|
query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
|
|
params['url'] = query_url
|
|
|
|
params['cookies'] = google_info['cookies']
|
|
params['headers'].update(google_info['headers'])
|
|
return params
|
|
|
|
|
|
def response(resp):
|
|
"""Get response from google's search request"""
|
|
results = []
|
|
|
|
detect_google_sorry(resp)
|
|
|
|
# convert the text to dom
|
|
dom = html.fromstring(resp.text)
|
|
|
|
# parse results
|
|
for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):
|
|
|
|
img_src = eval_xpath_getindex(result, './/img/@src', 0, None)
|
|
if img_src is None:
|
|
continue
|
|
|
|
title = extract_text(eval_xpath_getindex(result, './/a/h3[1]', 0))
|
|
url = eval_xpath_getindex(result, './/a/h3[1]/../@href', 0)
|
|
|
|
c_node = eval_xpath_getindex(result, './/div[@class="Uroaid"]', 0)
|
|
content = extract_text(c_node)
|
|
pub_info = extract_text(eval_xpath(result, './/div[@class="P7xzyf"]'))
|
|
length = extract_text(eval_xpath(result, './/div[@class="J1mWY"]'))
|
|
|
|
results.append(
|
|
{
|
|
'url': url,
|
|
'title': title,
|
|
'content': content,
|
|
'author': pub_info,
|
|
'thumbnail': img_src,
|
|
'length': length,
|
|
'template': 'videos.html',
|
|
}
|
|
)
|
|
|
|
# parse suggestion
|
|
for suggestion in eval_xpath_list(dom, suggestion_xpath):
|
|
# append suggestion
|
|
results.append({'suggestion': extract_text(suggestion)})
|
|
|
|
return results
|