searxng/searx/engines/stackoverflow.py

66 lines
1.7 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Stackoverflow (IT)
"""
2014-09-02 16:49:42 +00:00
from urllib.parse import urlencode, urljoin, urlparse
2014-02-05 19:24:31 +00:00
from lxml import html
from searx.utils import extract_text
from searx.exceptions import SearxEngineCaptchaException
2013-10-16 22:27:25 +00:00
# about
about = {
"website": 'https://stackoverflow.com/',
"wikidata_id": 'Q549037',
"official_api_documentation": 'https://api.stackexchange.com/docs',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
2014-09-02 16:49:42 +00:00
# engine dependent config
2013-10-17 19:07:09 +00:00
categories = ['it']
2014-09-02 16:49:42 +00:00
paging = True
2013-10-17 19:07:09 +00:00
2014-09-02 16:49:42 +00:00
# search-url
2015-04-26 16:13:09 +00:00
url = 'https://stackoverflow.com/'
2016-01-18 11:47:31 +00:00
search_url = url + 'search?{query}&page={pageno}'
2014-01-20 01:31:20 +00:00
2014-09-02 16:49:42 +00:00
# specific xpath variables
results_xpath = '//div[contains(@class,"question-summary")]'
link_xpath = './/div[@class="result-link"]//a|.//div[@class="summary"]//h3//a'
2015-01-31 16:29:22 +00:00
content_xpath = './/div[@class="excerpt"]'
2014-01-30 00:55:49 +00:00
2013-10-16 22:27:25 +00:00
2014-09-02 16:49:42 +00:00
# do search-request
2013-10-16 22:27:25 +00:00
def request(query, params):
2016-11-30 17:43:03 +00:00
params['url'] = search_url.format(query=urlencode({'q': query}), pageno=params['pageno'])
2014-09-02 16:49:42 +00:00
2013-10-16 22:27:25 +00:00
return params
2014-09-02 16:49:42 +00:00
# get response from search-request
2013-10-16 22:27:25 +00:00
def response(resp):
resp_url = urlparse(resp.url)
if resp_url.path.startswith('/nocaptcha'):
raise SearxEngineCaptchaException()
2013-10-16 22:27:25 +00:00
results = []
2014-09-02 16:49:42 +00:00
2013-10-16 22:27:25 +00:00
dom = html.fromstring(resp.text)
2014-09-02 16:49:42 +00:00
# parse results
for result in dom.xpath(results_xpath):
link = result.xpath(link_xpath)[0]
2013-10-23 21:55:37 +00:00
href = urljoin(url, link.attrib.get('href'))
title = extract_text(link)
content = extract_text(result.xpath(content_xpath))
2014-09-02 16:49:42 +00:00
# append result
results.append({'url': href,
'title': title,
2014-09-02 16:49:42 +00:00
'content': content})
# return results
2013-10-16 22:27:25 +00:00
return results