Merge branch 'master' into engines/unsplash

This commit is contained in:
d-tux 2018-12-14 07:06:19 +01:00 committed by GitHub
commit 4a127b19de
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 27242 additions and 135 deletions

View file

@ -32,6 +32,7 @@ RUN echo "@commuedge http://nl.alpinelinux.org/alpine/edge/community" >> /etc/ap
openssl-dev \ openssl-dev \
ca-certificates \ ca-certificates \
tini@commuedge \ tini@commuedge \
&& pip install --upgrade pip \
&& pip install --no-cache -r requirements.txt \ && pip install --no-cache -r requirements.txt \
&& apk del \ && apk del \
build-base \ build-base \

File diff suppressed because one or more lines are too long

View file

@ -1,115 +0,0 @@
"""
FindX (General, Images, Videos)
@website https://www.findx.com
@provide-api no
@using-api no
@results HTML
@stable no
@parse url, title, content, embedded, img_src, thumbnail_src
"""
from dateutil import parser
from json import loads
import re
from lxml import html
from searx import logger
from searx.engines.xpath import extract_text
from searx.engines.youtube_noapi import base_youtube_url, embedded_url
from searx.url_utils import urlencode
paging = True
results_xpath = '//script[@id="initial-state"]'
search_url = 'https://www.findx.com/{category}?{q}'
type_map = {
'none': 'web',
'general': 'web',
'images': 'images',
'videos': 'videos',
}
def request(query, params):
params['url'] = search_url.format(
category=type_map[params['category']],
q=urlencode({
'q': query,
'page': params['pageno']
})
)
return params
def response(resp):
dom = html.fromstring(resp.text)
results_raw_json = dom.xpath(results_xpath)
results_json = loads(extract_text(results_raw_json))
if len(results_json['web']['results']) > 0:
return _general_results(results_json['web']['results']['webSearch']['results'])
if len(results_json['images']['results']) > 0:
return _images_results(results_json['images']['results'])
if len(results_json['video']['results']) > 0:
return _videos_results(results_json['video']['results'])
return []
def _general_results(general_results):
results = []
for result in general_results:
results.append({
'url': result['url'],
'title': result['title'],
'content': result['sum'],
})
return results
def _images_results(image_results):
results = []
for result in image_results:
results.append({
'url': result['sourceURL'],
'title': result['title'],
'content': result['source'],
'thumbnail_src': _extract_url(result['assets']['thumb']['url']),
'img_src': _extract_url(result['assets']['file']['url']),
'template': 'images.html',
})
return results
def _videos_results(video_results):
results = []
for result in video_results:
if not result['kind'].startswith('youtube'):
logger.warn('Unknown video kind in findx: {}'.format(result['kind']))
continue
description = result['snippet']['description']
if len(description) > 300:
description = description[:300] + '...'
results.append({
'url': base_youtube_url + result['id'],
'title': result['snippet']['title'],
'content': description,
'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),
'publishedDate': parser.parse(result['snippet']['publishedAt']),
'embedded': embedded_url.format(videoid=result['id']),
'template': 'videos.html',
})
return results
def _extract_url(url):
matching = re.search('(/https?://[^)]+)', url)
if matching:
return matching.group(0)[1:]
return ''

View file

@ -218,24 +218,6 @@ engines:
shortcut : fd shortcut : fd
disabled : True disabled : True
- name : findx
engine : findx
shortcut : fx
categories : general
disabled : True
- name : findx images
engine : findx
shortcut : fxi
categories : images
disabled : True
- name : findx videos
engine : findx
shortcut : fxv
categories : videos
disabled : True
- name : flickr - name : flickr
categories : images categories : images
shortcut : fl shortcut : fl
@ -728,6 +710,19 @@ engines:
shortcut : du shortcut : du
disabled : True disabled : True
- name : seznam
shortcut: szn
engine: xpath
paging : True
search_url : https://search.seznam.cz/?q={query}&count=10&from={pageno}
results_xpath: //div[@class="Page-content"]//div[@class="Result "]
url_xpath : ./h3/a/@href
title_xpath : ./h3
content_xpath : .//p[@class="Result-description"]
first_page_num : 0
page_size : 10
disabled : True
# - name : yacy # - name : yacy
# engine : yacy # engine : yacy
# shortcut : ya # shortcut : ya

View file

@ -27,12 +27,14 @@ def fetch_supported_languages():
if hasattr(engines[engine_name], 'fetch_supported_languages'): if hasattr(engines[engine_name], 'fetch_supported_languages'):
try: try:
engines_languages[engine_name] = engines[engine_name].fetch_supported_languages() engines_languages[engine_name] = engines[engine_name].fetch_supported_languages()
if type(engines_languages[engine_name]) == list:
engines_languages[engine_name] = sorted(engines_languages[engine_name])
except Exception as e: except Exception as e:
print(e) print(e)
# write json file # write json file
with io.open(engines_languages_file, "w", encoding="utf-8") as f: with io.open(engines_languages_file, "w", encoding="utf-8") as f:
dump(engines_languages, f, ensure_ascii=False) dump(engines_languages, f, ensure_ascii=False, indent=4, separators=(',', ': '))
return engines_languages return engines_languages