From 916739d6b41957c3f277dea7c1429488560a3784 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Sun, 12 May 2024 17:52:52 +0200 Subject: [PATCH] [mod] simple theme: drop img_src from default results The use of img_src AND thumbnail in the default results makes no sense (only a thumbnail is needed). In the current state this is rather confusing, because img_src is displayed like a thumbnail (small) and thumbnail is displayed like an image (large). Signed-off-by: Markus Heiser --- searx/engines/annas_archive.py | 2 +- searx/engines/apkmirror.py | 4 +-- searx/engines/apple_app_store.py | 2 +- searx/engines/ask.py | 2 +- searx/engines/bandcamp.py | 2 +- searx/engines/bing_news.py | 2 +- searx/engines/bpb.py | 6 ++-- searx/engines/brave.py | 8 ++--- searx/engines/docker_hub.py | 2 +- searx/engines/fdroid.py | 4 +-- searx/engines/genius.py | 6 ++-- searx/engines/github.py | 2 +- searx/engines/goodreads.py | 2 +- searx/engines/google.py | 14 ++++---- searx/engines/google_news.py | 4 +-- searx/engines/google_play.py | 4 +-- searx/engines/google_videos.py | 6 ++-- searx/engines/imdb.py | 2 +- searx/engines/lemmy.py | 8 ++--- searx/engines/mixcloud.py | 2 +- searx/engines/moviepilot.py | 4 +-- searx/engines/openstreetmap.py | 4 +-- searx/engines/pdbe.py | 16 ++++----- searx/engines/piped.py | 2 +- searx/engines/presearch.py | 6 ++-- searx/engines/qwant.py | 6 ++-- searx/engines/radio_browser.py | 2 +- searx/engines/recoll.py | 2 +- searx/engines/rottentomatoes.py | 4 +-- searx/engines/scanr_structures.py | 3 +- searx/engines/soundcloud.py | 6 ++-- searx/engines/xpath.py | 4 +-- searx/engines/yahoo_news.py | 4 +-- searx/engines/yummly.py | 8 ++--- searx/engines/zlibrary.py | 5 ++- .../static/themes/simple/src/less/style.less | 34 +++++++++---------- searx/templates/simple/macros.html | 3 +- 37 files changed, 98 insertions(+), 99 deletions(-) diff --git a/searx/engines/annas_archive.py b/searx/engines/annas_archive.py index d758e4a96..a290dd06e 100644 --- a/searx/engines/annas_archive.py +++ b/searx/engines/annas_archive.py @@ -133,7 +133,7 @@ def _get_result(item): 'publisher': extract_text(eval_xpath(item, './/div[contains(@class, "text-sm")]')), 'authors': [extract_text(eval_xpath(item, './/div[contains(@class, "italic")]'))], 'content': extract_text(eval_xpath(item, './/div[contains(@class, "text-xs")]')), - 'img_src': item.xpath('.//img/@src')[0], + 'thumbnail': item.xpath('.//img/@src')[0], } diff --git a/searx/engines/apkmirror.py b/searx/engines/apkmirror.py index 93d093a60..d9f291c4f 100644 --- a/searx/engines/apkmirror.py +++ b/searx/engines/apkmirror.py @@ -53,8 +53,8 @@ def response(resp): url = base_url + link.attrib.get('href') + '#downloads' title = extract_text(link) - img_src = base_url + eval_xpath_getindex(result, './/img/@src', 0) - res = {'url': url, 'title': title, 'img_src': img_src} + thumbnail = base_url + eval_xpath_getindex(result, './/img/@src', 0) + res = {'url': url, 'title': title, 'thumbnail': thumbnail} results.append(res) diff --git a/searx/engines/apple_app_store.py b/searx/engines/apple_app_store.py index a11dd0f5c..7817848ff 100644 --- a/searx/engines/apple_app_store.py +++ b/searx/engines/apple_app_store.py @@ -47,7 +47,7 @@ def response(resp): 'url': result['trackViewUrl'], 'title': result['trackName'], 'content': result['description'], - 'img_src': result['artworkUrl100'], + 'thumbnail': result['artworkUrl100'], 'publishedDate': parse(result['currentVersionReleaseDate']), 'author': result['sellerName'], } diff --git a/searx/engines/ask.py b/searx/engines/ask.py index 3366ee5d0..82545c417 100644 --- a/searx/engines/ask.py +++ b/searx/engines/ask.py @@ -66,7 +66,7 @@ def response(resp): "title": item['title'], "content": item['abstract'], "publishedDate": pubdate_original, - # "img_src": item.get('image_url') or None, # these are not thumbs / to large + # "thumbnail": item.get('image_url') or None, # these are not thumbs / to large "metadata": ' | '.join(metadata), } ) diff --git a/searx/engines/bandcamp.py b/searx/engines/bandcamp.py index b79b0df3e..590d206d2 100644 --- a/searx/engines/bandcamp.py +++ b/searx/engines/bandcamp.py @@ -68,7 +68,7 @@ def response(resp): thumbnail = result.xpath('.//div[@class="art"]/img/@src') if thumbnail: - new_result['img_src'] = thumbnail[0] + new_result['thumbnail'] = thumbnail[0] result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0] itemtype = extract_text(result.xpath('.//div[@class="itemtype"]')).lower() diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py index 459927fd7..e93f7fea3 100644 --- a/searx/engines/bing_news.py +++ b/searx/engines/bing_news.py @@ -130,7 +130,7 @@ def response(resp): 'url': url, 'title': title, 'content': content, - 'img_src': thumbnail, + 'thumbnail': thumbnail, 'metadata': metadata, } ) diff --git a/searx/engines/bpb.py b/searx/engines/bpb.py index 8be451f95..d6ad7f909 100644 --- a/searx/engines/bpb.py +++ b/searx/engines/bpb.py @@ -40,9 +40,9 @@ def response(resp): json_resp = resp.json() for result in json_resp['teaser']: - img_src = None + thumbnail = None if result['teaser']['image']: - img_src = base_url + result['teaser']['image']['sources'][-1]['url'] + thumbnail = base_url + result['teaser']['image']['sources'][-1]['url'] metadata = result['extension']['overline'] authors = ', '.join(author['name'] for author in result['extension'].get('authors', [])) @@ -58,7 +58,7 @@ def response(resp): 'url': base_url + result['teaser']['link']['url'], 'title': result['teaser']['title'], 'content': result['teaser']['text'], - 'img_src': img_src, + 'thumbnail': thumbnail, 'publishedDate': publishedDate, 'metadata': metadata, } diff --git a/searx/engines/brave.py b/searx/engines/brave.py index 6d4ab3a44..04c2931f9 100644 --- a/searx/engines/brave.py +++ b/searx/engines/brave.py @@ -296,14 +296,14 @@ def _parse_search(resp): content_tag = eval_xpath_getindex(result, './/div[contains(@class, "snippet-description")]', 0, default='') pub_date_raw = eval_xpath(result, 'substring-before(.//div[contains(@class, "snippet-description")], "-")') - img_src = eval_xpath_getindex(result, './/img[contains(@class, "thumb")]/@src', 0, default='') + thumbnail = eval_xpath_getindex(result, './/img[contains(@class, "thumb")]/@src', 0, default='') item = { 'url': url, 'title': extract_text(title_tag), 'content': extract_text(content_tag), 'publishedDate': _extract_published_date(pub_date_raw), - 'img_src': img_src, + 'thumbnail': thumbnail, } video_tag = eval_xpath_getindex( @@ -324,7 +324,7 @@ def _parse_search(resp): ) item['publishedDate'] = _extract_published_date(pub_date_raw) else: - item['img_src'] = eval_xpath_getindex(video_tag, './/img/@src', 0, default='') + item['thumbnail'] = eval_xpath_getindex(video_tag, './/img/@src', 0, default='') result_list.append(item) @@ -351,7 +351,7 @@ def _parse_news(json_resp): 'publishedDate': _extract_published_date(result['age']), } if result['thumbnail'] is not None: - item['img_src'] = result['thumbnail']['src'] + item['thumbnail'] = result['thumbnail']['src'] result_list.append(item) return result_list diff --git a/searx/engines/docker_hub.py b/searx/engines/docker_hub.py index 48b4dcf0e..5963ee2c5 100644 --- a/searx/engines/docker_hub.py +++ b/searx/engines/docker_hub.py @@ -47,7 +47,7 @@ def response(resp): 'url': base_url + ("_/" if is_official else "r/") + item.get("slug", ""), 'title': item.get("name"), 'content': item.get("short_description"), - 'img_src': item["logo_url"].get("large") or item["logo_url"].get("small"), + 'thumbnail': item["logo_url"].get("large") or item["logo_url"].get("small"), 'package_name': item.get("name"), 'maintainer': item["publisher"].get("name"), 'publishedDate': parser.parse(item.get("updated_at") or item.get("created_at")), diff --git a/searx/engines/fdroid.py b/searx/engines/fdroid.py index b5f004e7b..6a6bf63cb 100644 --- a/searx/engines/fdroid.py +++ b/searx/engines/fdroid.py @@ -47,8 +47,8 @@ def response(resp): + ' - ' + extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip() ) - app_img_src = app.xpath('./img[@class="package-icon"]/@src')[0] + thumbnail = app.xpath('./img[@class="package-icon"]/@src')[0] - results.append({'url': app_url, 'title': app_title, 'content': app_content, 'img_src': app_img_src}) + results.append({'url': app_url, 'title': app_title, 'content': app_content, 'thumbnail': thumbnail}) return results diff --git a/searx/engines/genius.py b/searx/engines/genius.py index b25b01568..24ab91386 100644 --- a/searx/engines/genius.py +++ b/searx/engines/genius.py @@ -50,7 +50,7 @@ def parse_lyric(hit): 'url': hit['result']['url'], 'title': hit['result']['full_title'], 'content': content, - 'img_src': hit['result']['song_art_image_thumbnail_url'], + 'thumbnail': hit['result']['song_art_image_thumbnail_url'], } if timestamp: result.update({'publishedDate': datetime.fromtimestamp(timestamp)}) @@ -68,7 +68,7 @@ def parse_artist(hit): 'url': hit['result']['url'], 'title': hit['result']['name'], 'content': '', - 'img_src': hit['result']['image_url'], + 'thumbnail': hit['result']['image_url'], } return result @@ -84,7 +84,7 @@ def parse_album(hit): return { 'url': res['url'], 'title': res['full_title'], - 'img_src': res['cover_art_url'], + 'thumbnail': res['cover_art_url'], 'content': content.strip(), } diff --git a/searx/engines/github.py b/searx/engines/github.py index a20f14d14..714cb5ca3 100644 --- a/searx/engines/github.py +++ b/searx/engines/github.py @@ -50,7 +50,7 @@ def response(resp): 'url': item.get('html_url'), 'title': item.get('full_name'), 'content': ' / '.join(content), - 'img_src': item.get('owner', {}).get('avatar_url'), + 'thumbnail': item.get('owner', {}).get('avatar_url'), 'package_name': item.get('name'), # 'version': item.get('updated_at'), 'maintainer': item.get('owner', {}).get('login'), diff --git a/searx/engines/goodreads.py b/searx/engines/goodreads.py index eb5c25605..c4b7bb5b1 100644 --- a/searx/engines/goodreads.py +++ b/searx/engines/goodreads.py @@ -48,7 +48,7 @@ def response(resp): { 'url': base_url + extract_text(eval_xpath(result, url_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)), - 'img_src': extract_text(eval_xpath(result, thumbnail_xpath)), + 'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)), 'content': extract_text(eval_xpath(result, info_text_xpath)), 'metadata': extract_text(eval_xpath(result, author_xpath)), } diff --git a/searx/engines/google.py b/searx/engines/google.py index ef82a057b..90d687a4e 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -365,17 +365,17 @@ def response(resp): logger.debug('ignoring item from the result_xpath list: missing content of title "%s"', title) continue - img_src = content_nodes[0].xpath('.//img/@src') - if img_src: - img_src = img_src[0] - if img_src.startswith('data:image'): + thumbnail = content_nodes[0].xpath('.//img/@src') + if thumbnail: + thumbnail = thumbnail[0] + if thumbnail.startswith('data:image'): img_id = content_nodes[0].xpath('.//img/@id') if img_id: - img_src = data_image_map.get(img_id[0]) + thumbnail = data_image_map.get(img_id[0]) else: - img_src = None + thumbnail = None - results.append({'url': url, 'title': title, 'content': content, 'img_src': img_src}) + results.append({'url': url, 'title': title, 'content': content, 'thumbnail': thumbnail}) except Exception as e: # pylint: disable=broad-except logger.error(e, exc_info=True) diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py index 6f5db041e..3720b68fa 100644 --- a/searx/engines/google_news.py +++ b/searx/engines/google_news.py @@ -165,14 +165,14 @@ def response(resp): # "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100" # These URL are long but not personalized (double checked via tor). - img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src')) + thumbnail = extract_text(result.xpath('preceding-sibling::a/figure/img/@src')) results.append( { 'url': href, 'title': title, 'content': content, - 'img_src': img_src, + 'thumbnail': thumbnail, } ) diff --git a/searx/engines/google_play.py b/searx/engines/google_play.py index f8ab02c27..9853a4911 100644 --- a/searx/engines/google_play.py +++ b/searx/engines/google_play.py @@ -64,13 +64,13 @@ def response_movies(resp): title = extract_text(eval_xpath(div_2, './div[@title]')) metadata = extract_text(eval_xpath(div_2, './div[@class]')) img = eval_xpath(div_1, './/img')[0] - img_src = img.get('src') + thumbnail = img.get('src') results.append( { "url": url, "title": title, "content": sec_name, - "img_src": img_src, + "thumbnail": thumbnail, 'metadata': metadata, 'template': 'videos.html', } diff --git a/searx/engines/google_videos.py b/searx/engines/google_videos.py index 4a5818f7e..4a032ef0f 100644 --- a/searx/engines/google_videos.py +++ b/searx/engines/google_videos.py @@ -107,8 +107,8 @@ def response(resp): # parse results for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'): - img_src = eval_xpath_getindex(result, './/img/@src', 0, None) - if img_src is None: + thumbnail = eval_xpath_getindex(result, './/img/@src', 0, None) + if thumbnail is None: continue title = extract_text(eval_xpath_getindex(result, './/a/h3[1]', 0)) @@ -124,7 +124,7 @@ def response(resp): 'title': title, 'content': content, 'author': pub_info, - 'thumbnail': img_src, + 'thumbnail': thumbnail, 'template': 'videos.html', } ) diff --git a/searx/engines/imdb.py b/searx/engines/imdb.py index b0938b523..f93d24674 100644 --- a/searx/engines/imdb.py +++ b/searx/engines/imdb.py @@ -90,7 +90,7 @@ def response(resp): "title": title, "url": href_base.format(category=categ, entry_id=entry_id), "content": content, - "img_src": image_url, + "thumbnail": image_url, } ) diff --git a/searx/engines/lemmy.py b/searx/engines/lemmy.py index a924b4bc4..1845431d7 100644 --- a/searx/engines/lemmy.py +++ b/searx/engines/lemmy.py @@ -91,7 +91,7 @@ def _get_communities(json): 'url': result['community']['actor_id'], 'title': result['community']['title'], 'content': markdown_to_text(result['community'].get('description', '')), - 'img_src': result['community'].get('icon', result['community'].get('banner')), + 'thumbnail': result['community'].get('icon', result['community'].get('banner')), 'publishedDate': datetime.strptime(counts['published'][:19], '%Y-%m-%dT%H:%M:%S'), 'metadata': metadata, } @@ -120,9 +120,9 @@ def _get_posts(json): for result in json["posts"]: user = result['creator'].get('display_name', result['creator']['name']) - img_src = None + thumbnail = None if result['post'].get('thumbnail_url'): - img_src = result['post']['thumbnail_url'] + '?format=webp&thumbnail=208' + thumbnail = result['post']['thumbnail_url'] + '?format=webp&thumbnail=208' metadata = ( f"▲ {result['counts']['upvotes']} ▼ {result['counts']['downvotes']}" @@ -140,7 +140,7 @@ def _get_posts(json): 'url': result['post']['ap_id'], 'title': result['post']['name'], 'content': content, - 'img_src': img_src, + 'thumbnail': thumbnail, 'publishedDate': datetime.strptime(result['post']['published'][:19], '%Y-%m-%dT%H:%M:%S'), 'metadata': metadata, } diff --git a/searx/engines/mixcloud.py b/searx/engines/mixcloud.py index d6618aa6a..006d59578 100644 --- a/searx/engines/mixcloud.py +++ b/searx/engines/mixcloud.py @@ -44,7 +44,7 @@ def response(resp): 'url': r_url, 'title': result['name'], 'iframe_src': iframe_src.format(url=r_url), - 'img_src': result['pictures']['medium'], + 'thumbnail': result['pictures']['medium'], 'publishedDate': publishedDate, 'content': result['user']['name'], } diff --git a/searx/engines/moviepilot.py b/searx/engines/moviepilot.py index 645936d9f..532c1e66e 100644 --- a/searx/engines/moviepilot.py +++ b/searx/engines/moviepilot.py @@ -104,11 +104,11 @@ def response(resp): item['metadata'] = html_to_text(result.get('meta_short', '')) if result.get('image'): - item['img_src'] = image_url.format(image_id=result['image'], filename=result['image_filename']) + item['thumbnail'] = image_url.format(image_id=result['image'], filename=result['image_filename']) else: item['url'] = result['url'] item['content'] = ', '.join([result['class'], result['info'], result['more']]) - item['img_src'] = result['image'] + item['thumbnail'] = result['image'] results.append(item) diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py index 7e3d8f43b..8f3565eda 100644 --- a/searx/engines/openstreetmap.py +++ b/searx/engines/openstreetmap.py @@ -178,7 +178,7 @@ def response(resp): continue url, osm, geojson = get_url_osm_geojson(result) - img_src = get_thumbnail(get_img_src(result)) + thumbnail = get_thumbnail(get_img_src(result)) links, link_keys = get_links(result, user_language) data = get_data(result, user_language, link_keys) @@ -191,7 +191,7 @@ def response(resp): 'url': url, 'osm': osm, 'geojson': geojson, - 'img_src': img_src, + 'thumbnail': thumbnail, 'links': links, 'data': data, 'type': get_tag_label(result.get('category'), result.get('type', ''), user_language), diff --git a/searx/engines/pdbe.py b/searx/engines/pdbe.py index 96e3509c2..ae76a7290 100644 --- a/searx/engines/pdbe.py +++ b/searx/engines/pdbe.py @@ -65,18 +65,18 @@ def construct_body(result): page='', year=result['release_year'], ) - img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) + thumbnail = pdbe_preview_url.format(pdb_id=result['pdb_id']) except KeyError: content = None - img_src = None + thumbnail = None # construct url for preview image try: - img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) + thumbnail = pdbe_preview_url.format(pdb_id=result['pdb_id']) except KeyError: - img_src = None + thumbnail = None - return [title, content, img_src] + return [title, content, thumbnail] def response(resp): @@ -106,16 +106,16 @@ def response(resp): ) # obsoleted entries don't have preview images - img_src = None + thumbnail = None else: - title, content, img_src = construct_body(result) + title, content, thumbnail = construct_body(result) results.append( { 'url': pdbe_entry_url.format(pdb_id=result['pdb_id']), 'title': title, 'content': content, - 'img_src': img_src, + 'thumbnail': thumbnail, } ) diff --git a/searx/engines/piped.py b/searx/engines/piped.py index 09ef48fe6..5b60dec5a 100644 --- a/searx/engines/piped.py +++ b/searx/engines/piped.py @@ -151,7 +151,7 @@ def response(resp): elif piped_filter == 'music_songs': item["template"] = "default.html" - item["img_src"] = result.get("thumbnail", "") + item["thumbnail"] = result.get("thumbnail", "") item["content"] = result.get("uploaderName", "") or "" results.append(item) diff --git a/searx/engines/presearch.py b/searx/engines/presearch.py index 1af32ac85..051138e91 100644 --- a/searx/engines/presearch.py +++ b/searx/engines/presearch.py @@ -162,7 +162,7 @@ def parse_search_query(json_results): result = { 'url': item['link'], 'title': item['title'], - 'img_src': item['image'], + 'thumbnail': item['image'], 'content': '', 'metadata': item.get('source'), } @@ -244,7 +244,7 @@ def response(resp): 'url': item.get('link'), 'content': '', 'metadata': ' / '.join(metadata), - 'img_src': item.get('image'), + 'thumbnail': item.get('image'), } ) @@ -257,7 +257,7 @@ def response(resp): 'url': item.get('link'), 'content': item.get('description', ''), 'metadata': ' / '.join(metadata), - 'img_src': item.get('image'), + 'thumbnail': item.get('image'), } ) diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py index 46edb6958..facd47bb9 100644 --- a/searx/engines/qwant.py +++ b/searx/engines/qwant.py @@ -242,15 +242,15 @@ def parse_web_api(resp): if pub_date is not None: pub_date = datetime.fromtimestamp(pub_date) news_media = item.get('media', []) - img_src = None + thumbnail = None if news_media: - img_src = news_media[0].get('pict', {}).get('url', None) + thumbnail = news_media[0].get('pict', {}).get('url', None) results.append( { 'title': title, 'url': res_url, 'publishedDate': pub_date, - 'img_src': img_src, + 'thumbnail': thumbnail, } ) diff --git a/searx/engines/radio_browser.py b/searx/engines/radio_browser.py index 3c06159a2..c20580616 100644 --- a/searx/engines/radio_browser.py +++ b/searx/engines/radio_browser.py @@ -114,7 +114,7 @@ def response(resp): { 'url': url, 'title': result['name'], - 'img_src': result.get('favicon', '').replace("http://", "https://"), + 'thumbnail': result.get('favicon', '').replace("http://", "https://"), 'content': ' | '.join(content), 'metadata': ' | '.join(metadata), 'iframe_src': result['url_resolved'].replace("http://", "https://"), diff --git a/searx/engines/recoll.py b/searx/engines/recoll.py index b9e87a723..b7499b5a5 100644 --- a/searx/engines/recoll.py +++ b/searx/engines/recoll.py @@ -133,7 +133,7 @@ def response(resp): ) if mtype in ['image'] and subtype in ['bmp', 'gif', 'jpeg', 'png']: - item['img_src'] = url + item['thumbnail'] = url results.append(item) diff --git a/searx/engines/rottentomatoes.py b/searx/engines/rottentomatoes.py index 0cac6e247..131abfaa3 100644 --- a/searx/engines/rottentomatoes.py +++ b/searx/engines/rottentomatoes.py @@ -22,7 +22,7 @@ base_url = "https://www.rottentomatoes.com" results_xpath = "//search-page-media-row" url_xpath = "./a[1]/@href" title_xpath = "./a/img/@alt" -img_src_xpath = "./a/img/@src" +thumbnail_xpath = "./a/img/@src" release_year_xpath = "concat('From ', string(./@releaseyear))" score_xpath = "concat('Score: ', string(./@tomatometerscore))" cast_xpath = "concat('Starring ', string(./@cast))" @@ -52,7 +52,7 @@ def response(resp): 'url': extract_text(eval_xpath(result, url_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)), 'content': ', '.join(content), - 'img_src': extract_text(eval_xpath(result, img_src_xpath)), + 'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)), } ) diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py index ad27079dd..7a5415e54 100644 --- a/searx/engines/scanr_structures.py +++ b/searx/engines/scanr_structures.py @@ -77,8 +77,7 @@ def response(resp): { 'url': url + 'structure/' + result['id'], 'title': result['label'], - # 'thumbnail': thumbnail, - 'img_src': thumbnail, + 'thumbnail': thumbnail, 'content': html_to_text(content), } ) diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py index 3181d39b7..3281ea398 100644 --- a/searx/engines/soundcloud.py +++ b/searx/engines/soundcloud.py @@ -94,9 +94,9 @@ def response(resp): 'publishedDate': parser.parse(result['last_modified']), 'iframe_src': "https://w.soundcloud.com/player/?url=" + uri, } - img_src = result['artwork_url'] or result['user']['avatar_url'] - if img_src: - res['img_src'] = img_src + thumbnail = result['artwork_url'] or result['user']['avatar_url'] + if thumbnail: + res['thumbnail'] = thumbnail results.append(res) return results diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index bdad7e753..558531880 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -135,7 +135,7 @@ title_xpath = None '''`XPath selector`_ of result's ``title``.''' thumbnail_xpath = False -'''`XPath selector`_ of result's ``img_src``.''' +'''`XPath selector`_ of result's ``thumbnail``.''' suggestion_xpath = '' '''`XPath selector`_ of result's ``suggestion``.''' @@ -266,7 +266,7 @@ def response(resp): # pylint: disable=too-many-branches if thumbnail_xpath: thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath) if len(thumbnail_xpath_result) > 0: - tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) + tmp_result['thumbnail'] = extract_url(thumbnail_xpath_result, search_url) # add alternative cached url if available if cached_xpath: diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py index f3f756ba3..f9a5fee1b 100644 --- a/searx/engines/yahoo_news.py +++ b/searx/engines/yahoo_news.py @@ -77,9 +77,9 @@ def response(resp): url = parse_url(url) title = extract_text(result.xpath('.//h4/a')) content = extract_text(result.xpath('.//p')) - img_src = eval_xpath_getindex(result, './/img/@data-src', 0, None) + thumbnail = eval_xpath_getindex(result, './/img/@data-src', 0, None) - item = {'url': url, 'title': title, 'content': content, 'img_src': img_src} + item = {'url': url, 'title': title, 'content': content, 'thumbnail': thumbnail} pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]')) ago = AGO_RE.search(pub_date) diff --git a/searx/engines/yummly.py b/searx/engines/yummly.py index 6d4c68350..3f86baabe 100644 --- a/searx/engines/yummly.py +++ b/searx/engines/yummly.py @@ -52,11 +52,11 @@ def response(resp): if description is not None: content = markdown_to_text(description['text']) - img_src = None + thumbnail = None if result['display']['images']: - img_src = result['display']['images'][0] + thumbnail = result['display']['images'][0] elif result['content']['details']['images']: - img_src = result['content']['details']['images'][0]['resizableImageUrl'] + thumbnail = result['content']['details']['images'][0]['resizableImageUrl'] url = result['display']['source']['sourceRecipeUrl'] if 'www.yummly.com/private' in url: @@ -67,7 +67,7 @@ def response(resp): 'url': url, 'title': result['display']['displayName'], 'content': content, - 'img_src': img_src, + 'thumbnail': thumbnail, 'metadata': gettext('Language') + f": {result['locale'].split('-')[0]}", } ) diff --git a/searx/engines/zlibrary.py b/searx/engines/zlibrary.py index ba1f474fa..c29c9135c 100644 --- a/searx/engines/zlibrary.py +++ b/searx/engines/zlibrary.py @@ -141,9 +141,12 @@ def _parse_result(item) -> Dict[str, Any]: "authors": [extract_text(author) for author in author_elements], "publisher": _text(item, './/a[@title="Publisher"]'), "type": _text(item, './/div[contains(@class, "property__file")]//div[contains(@class, "property_value")]'), - "img_src": _text(item, './/img[contains(@class, "cover")]/@data-src'), } + thumbnail = _text(item, './/img[contains(@class, "cover")]/@data-src') + if not thumbnail.startswith('/'): + result["thumbnail"] = thumbnail + year = _text(item, './/div[contains(@class, "property_year")]//div[contains(@class, "property_value")]') if year: result["publishedDate"] = datetime.strptime(year, '%Y') diff --git a/searx/static/themes/simple/src/less/style.less b/searx/static/themes/simple/src/less/style.less index c7f459e59..02dc59efc 100644 --- a/searx/static/themes/simple/src/less/style.less +++ b/searx/static/themes/simple/src/less/style.less @@ -281,24 +281,12 @@ article[data-vim-selected].category-social { color: var(--color-result-description-highlight-font); } - img { - &.thumbnail { - .ltr-float-left(); - padding-top: 0.6rem; - .ltr-padding-right(1rem); - width: 20rem; - height: unset; // remove heigth value that was needed for lazy loading - } - - &.image { - .ltr-float-left(); - padding-top: 0.6rem; - .ltr-padding-right(1rem); - width: 7rem; - max-height: 7rem; - object-fit: scale-down; - object-position: right top; - } + img.thumbnail { + .ltr-float-left(); + padding-top: 0.6rem; + .ltr-padding-right(1rem); + width: 7rem; + height: unset; // remove heigth value that was needed for lazy loading } .break { @@ -394,6 +382,16 @@ article[data-vim-selected].category-social { padding: 10px 0 0 0; } +.result-videos { + img.thumbnail { + .ltr-float-left(); + padding-top: 0.6rem; + .ltr-padding-right(1rem); + width: 20rem; + height: unset; // remove heigth value that was needed for lazy loading + } +} + .result-videos .content { overflow: hidden; } diff --git a/searx/templates/simple/macros.html b/searx/templates/simple/macros.html index 23aaa4f87..9ec6fb47c 100644 --- a/searx/templates/simple/macros.html +++ b/searx/templates/simple/macros.html @@ -25,8 +25,7 @@ {{- part -}} {%- endfor %} {{- result_close_link() -}} - {%- if result.img_src %}{{ result_open_link(result.url) }}{{ result_close_link() }}{% endif -%} - {%- if result.thumbnail %}{{ result_open_link(result.url) }}{{ result_close_link() }}{% endif -%} + {%- if result.thumbnail %}{{ result_open_link(result.url) }}{{ result_close_link() }}{% endif -%}

{{ result_link(result.url, result.title|safe) }}

{%- endmacro -%}