[mod] simple theme: drop img_src from default results

The use of img_src AND thumbnail in the default results makes no sense (only a
thumbnail is needed).  In the current state this is rather confusing, because
img_src is displayed like a thumbnail (small) and thumbnail is displayed like an
image (large).

Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
This commit is contained in:
Markus Heiser 2024-05-12 17:52:52 +02:00 committed by Markus Heiser
parent 0f2f52f0b5
commit 916739d6b4
37 changed files with 98 additions and 99 deletions

View file

@ -133,7 +133,7 @@ def _get_result(item):
'publisher': extract_text(eval_xpath(item, './/div[contains(@class, "text-sm")]')), 'publisher': extract_text(eval_xpath(item, './/div[contains(@class, "text-sm")]')),
'authors': [extract_text(eval_xpath(item, './/div[contains(@class, "italic")]'))], 'authors': [extract_text(eval_xpath(item, './/div[contains(@class, "italic")]'))],
'content': extract_text(eval_xpath(item, './/div[contains(@class, "text-xs")]')), 'content': extract_text(eval_xpath(item, './/div[contains(@class, "text-xs")]')),
'img_src': item.xpath('.//img/@src')[0], 'thumbnail': item.xpath('.//img/@src')[0],
} }

View file

@ -53,8 +53,8 @@ def response(resp):
url = base_url + link.attrib.get('href') + '#downloads' url = base_url + link.attrib.get('href') + '#downloads'
title = extract_text(link) title = extract_text(link)
img_src = base_url + eval_xpath_getindex(result, './/img/@src', 0) thumbnail = base_url + eval_xpath_getindex(result, './/img/@src', 0)
res = {'url': url, 'title': title, 'img_src': img_src} res = {'url': url, 'title': title, 'thumbnail': thumbnail}
results.append(res) results.append(res)

View file

@ -47,7 +47,7 @@ def response(resp):
'url': result['trackViewUrl'], 'url': result['trackViewUrl'],
'title': result['trackName'], 'title': result['trackName'],
'content': result['description'], 'content': result['description'],
'img_src': result['artworkUrl100'], 'thumbnail': result['artworkUrl100'],
'publishedDate': parse(result['currentVersionReleaseDate']), 'publishedDate': parse(result['currentVersionReleaseDate']),
'author': result['sellerName'], 'author': result['sellerName'],
} }

View file

@ -66,7 +66,7 @@ def response(resp):
"title": item['title'], "title": item['title'],
"content": item['abstract'], "content": item['abstract'],
"publishedDate": pubdate_original, "publishedDate": pubdate_original,
# "img_src": item.get('image_url') or None, # these are not thumbs / to large # "thumbnail": item.get('image_url') or None, # these are not thumbs / to large
"metadata": ' | '.join(metadata), "metadata": ' | '.join(metadata),
} }
) )

View file

@ -68,7 +68,7 @@ def response(resp):
thumbnail = result.xpath('.//div[@class="art"]/img/@src') thumbnail = result.xpath('.//div[@class="art"]/img/@src')
if thumbnail: if thumbnail:
new_result['img_src'] = thumbnail[0] new_result['thumbnail'] = thumbnail[0]
result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0] result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0]
itemtype = extract_text(result.xpath('.//div[@class="itemtype"]')).lower() itemtype = extract_text(result.xpath('.//div[@class="itemtype"]')).lower()

View file

@ -130,7 +130,7 @@ def response(resp):
'url': url, 'url': url,
'title': title, 'title': title,
'content': content, 'content': content,
'img_src': thumbnail, 'thumbnail': thumbnail,
'metadata': metadata, 'metadata': metadata,
} }
) )

View file

@ -40,9 +40,9 @@ def response(resp):
json_resp = resp.json() json_resp = resp.json()
for result in json_resp['teaser']: for result in json_resp['teaser']:
img_src = None thumbnail = None
if result['teaser']['image']: if result['teaser']['image']:
img_src = base_url + result['teaser']['image']['sources'][-1]['url'] thumbnail = base_url + result['teaser']['image']['sources'][-1]['url']
metadata = result['extension']['overline'] metadata = result['extension']['overline']
authors = ', '.join(author['name'] for author in result['extension'].get('authors', [])) authors = ', '.join(author['name'] for author in result['extension'].get('authors', []))
@ -58,7 +58,7 @@ def response(resp):
'url': base_url + result['teaser']['link']['url'], 'url': base_url + result['teaser']['link']['url'],
'title': result['teaser']['title'], 'title': result['teaser']['title'],
'content': result['teaser']['text'], 'content': result['teaser']['text'],
'img_src': img_src, 'thumbnail': thumbnail,
'publishedDate': publishedDate, 'publishedDate': publishedDate,
'metadata': metadata, 'metadata': metadata,
} }

View file

@ -296,14 +296,14 @@ def _parse_search(resp):
content_tag = eval_xpath_getindex(result, './/div[contains(@class, "snippet-description")]', 0, default='') content_tag = eval_xpath_getindex(result, './/div[contains(@class, "snippet-description")]', 0, default='')
pub_date_raw = eval_xpath(result, 'substring-before(.//div[contains(@class, "snippet-description")], "-")') pub_date_raw = eval_xpath(result, 'substring-before(.//div[contains(@class, "snippet-description")], "-")')
img_src = eval_xpath_getindex(result, './/img[contains(@class, "thumb")]/@src', 0, default='') thumbnail = eval_xpath_getindex(result, './/img[contains(@class, "thumb")]/@src', 0, default='')
item = { item = {
'url': url, 'url': url,
'title': extract_text(title_tag), 'title': extract_text(title_tag),
'content': extract_text(content_tag), 'content': extract_text(content_tag),
'publishedDate': _extract_published_date(pub_date_raw), 'publishedDate': _extract_published_date(pub_date_raw),
'img_src': img_src, 'thumbnail': thumbnail,
} }
video_tag = eval_xpath_getindex( video_tag = eval_xpath_getindex(
@ -324,7 +324,7 @@ def _parse_search(resp):
) )
item['publishedDate'] = _extract_published_date(pub_date_raw) item['publishedDate'] = _extract_published_date(pub_date_raw)
else: else:
item['img_src'] = eval_xpath_getindex(video_tag, './/img/@src', 0, default='') item['thumbnail'] = eval_xpath_getindex(video_tag, './/img/@src', 0, default='')
result_list.append(item) result_list.append(item)
@ -351,7 +351,7 @@ def _parse_news(json_resp):
'publishedDate': _extract_published_date(result['age']), 'publishedDate': _extract_published_date(result['age']),
} }
if result['thumbnail'] is not None: if result['thumbnail'] is not None:
item['img_src'] = result['thumbnail']['src'] item['thumbnail'] = result['thumbnail']['src']
result_list.append(item) result_list.append(item)
return result_list return result_list

View file

@ -47,7 +47,7 @@ def response(resp):
'url': base_url + ("_/" if is_official else "r/") + item.get("slug", ""), 'url': base_url + ("_/" if is_official else "r/") + item.get("slug", ""),
'title': item.get("name"), 'title': item.get("name"),
'content': item.get("short_description"), 'content': item.get("short_description"),
'img_src': item["logo_url"].get("large") or item["logo_url"].get("small"), 'thumbnail': item["logo_url"].get("large") or item["logo_url"].get("small"),
'package_name': item.get("name"), 'package_name': item.get("name"),
'maintainer': item["publisher"].get("name"), 'maintainer': item["publisher"].get("name"),
'publishedDate': parser.parse(item.get("updated_at") or item.get("created_at")), 'publishedDate': parser.parse(item.get("updated_at") or item.get("created_at")),

View file

@ -47,8 +47,8 @@ def response(resp):
+ ' - ' + ' - '
+ extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip() + extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip()
) )
app_img_src = app.xpath('./img[@class="package-icon"]/@src')[0] thumbnail = app.xpath('./img[@class="package-icon"]/@src')[0]
results.append({'url': app_url, 'title': app_title, 'content': app_content, 'img_src': app_img_src}) results.append({'url': app_url, 'title': app_title, 'content': app_content, 'thumbnail': thumbnail})
return results return results

View file

@ -50,7 +50,7 @@ def parse_lyric(hit):
'url': hit['result']['url'], 'url': hit['result']['url'],
'title': hit['result']['full_title'], 'title': hit['result']['full_title'],
'content': content, 'content': content,
'img_src': hit['result']['song_art_image_thumbnail_url'], 'thumbnail': hit['result']['song_art_image_thumbnail_url'],
} }
if timestamp: if timestamp:
result.update({'publishedDate': datetime.fromtimestamp(timestamp)}) result.update({'publishedDate': datetime.fromtimestamp(timestamp)})
@ -68,7 +68,7 @@ def parse_artist(hit):
'url': hit['result']['url'], 'url': hit['result']['url'],
'title': hit['result']['name'], 'title': hit['result']['name'],
'content': '', 'content': '',
'img_src': hit['result']['image_url'], 'thumbnail': hit['result']['image_url'],
} }
return result return result
@ -84,7 +84,7 @@ def parse_album(hit):
return { return {
'url': res['url'], 'url': res['url'],
'title': res['full_title'], 'title': res['full_title'],
'img_src': res['cover_art_url'], 'thumbnail': res['cover_art_url'],
'content': content.strip(), 'content': content.strip(),
} }

View file

@ -50,7 +50,7 @@ def response(resp):
'url': item.get('html_url'), 'url': item.get('html_url'),
'title': item.get('full_name'), 'title': item.get('full_name'),
'content': ' / '.join(content), 'content': ' / '.join(content),
'img_src': item.get('owner', {}).get('avatar_url'), 'thumbnail': item.get('owner', {}).get('avatar_url'),
'package_name': item.get('name'), 'package_name': item.get('name'),
# 'version': item.get('updated_at'), # 'version': item.get('updated_at'),
'maintainer': item.get('owner', {}).get('login'), 'maintainer': item.get('owner', {}).get('login'),

View file

@ -48,7 +48,7 @@ def response(resp):
{ {
'url': base_url + extract_text(eval_xpath(result, url_xpath)), 'url': base_url + extract_text(eval_xpath(result, url_xpath)),
'title': extract_text(eval_xpath(result, title_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)),
'img_src': extract_text(eval_xpath(result, thumbnail_xpath)), 'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)),
'content': extract_text(eval_xpath(result, info_text_xpath)), 'content': extract_text(eval_xpath(result, info_text_xpath)),
'metadata': extract_text(eval_xpath(result, author_xpath)), 'metadata': extract_text(eval_xpath(result, author_xpath)),
} }

View file

@ -365,17 +365,17 @@ def response(resp):
logger.debug('ignoring item from the result_xpath list: missing content of title "%s"', title) logger.debug('ignoring item from the result_xpath list: missing content of title "%s"', title)
continue continue
img_src = content_nodes[0].xpath('.//img/@src') thumbnail = content_nodes[0].xpath('.//img/@src')
if img_src: if thumbnail:
img_src = img_src[0] thumbnail = thumbnail[0]
if img_src.startswith('data:image'): if thumbnail.startswith('data:image'):
img_id = content_nodes[0].xpath('.//img/@id') img_id = content_nodes[0].xpath('.//img/@id')
if img_id: if img_id:
img_src = data_image_map.get(img_id[0]) thumbnail = data_image_map.get(img_id[0])
else: else:
img_src = None thumbnail = None
results.append({'url': url, 'title': title, 'content': content, 'img_src': img_src}) results.append({'url': url, 'title': title, 'content': content, 'thumbnail': thumbnail})
except Exception as e: # pylint: disable=broad-except except Exception as e: # pylint: disable=broad-except
logger.error(e, exc_info=True) logger.error(e, exc_info=True)

View file

@ -165,14 +165,14 @@ def response(resp):
# "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100" # "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100"
# These URL are long but not personalized (double checked via tor). # These URL are long but not personalized (double checked via tor).
img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src')) thumbnail = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
results.append( results.append(
{ {
'url': href, 'url': href,
'title': title, 'title': title,
'content': content, 'content': content,
'img_src': img_src, 'thumbnail': thumbnail,
} }
) )

View file

@ -64,13 +64,13 @@ def response_movies(resp):
title = extract_text(eval_xpath(div_2, './div[@title]')) title = extract_text(eval_xpath(div_2, './div[@title]'))
metadata = extract_text(eval_xpath(div_2, './div[@class]')) metadata = extract_text(eval_xpath(div_2, './div[@class]'))
img = eval_xpath(div_1, './/img')[0] img = eval_xpath(div_1, './/img')[0]
img_src = img.get('src') thumbnail = img.get('src')
results.append( results.append(
{ {
"url": url, "url": url,
"title": title, "title": title,
"content": sec_name, "content": sec_name,
"img_src": img_src, "thumbnail": thumbnail,
'metadata': metadata, 'metadata': metadata,
'template': 'videos.html', 'template': 'videos.html',
} }

View file

@ -107,8 +107,8 @@ def response(resp):
# parse results # parse results
for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'): for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):
img_src = eval_xpath_getindex(result, './/img/@src', 0, None) thumbnail = eval_xpath_getindex(result, './/img/@src', 0, None)
if img_src is None: if thumbnail is None:
continue continue
title = extract_text(eval_xpath_getindex(result, './/a/h3[1]', 0)) title = extract_text(eval_xpath_getindex(result, './/a/h3[1]', 0))
@ -124,7 +124,7 @@ def response(resp):
'title': title, 'title': title,
'content': content, 'content': content,
'author': pub_info, 'author': pub_info,
'thumbnail': img_src, 'thumbnail': thumbnail,
'template': 'videos.html', 'template': 'videos.html',
} }
) )

View file

@ -90,7 +90,7 @@ def response(resp):
"title": title, "title": title,
"url": href_base.format(category=categ, entry_id=entry_id), "url": href_base.format(category=categ, entry_id=entry_id),
"content": content, "content": content,
"img_src": image_url, "thumbnail": image_url,
} }
) )

View file

@ -91,7 +91,7 @@ def _get_communities(json):
'url': result['community']['actor_id'], 'url': result['community']['actor_id'],
'title': result['community']['title'], 'title': result['community']['title'],
'content': markdown_to_text(result['community'].get('description', '')), 'content': markdown_to_text(result['community'].get('description', '')),
'img_src': result['community'].get('icon', result['community'].get('banner')), 'thumbnail': result['community'].get('icon', result['community'].get('banner')),
'publishedDate': datetime.strptime(counts['published'][:19], '%Y-%m-%dT%H:%M:%S'), 'publishedDate': datetime.strptime(counts['published'][:19], '%Y-%m-%dT%H:%M:%S'),
'metadata': metadata, 'metadata': metadata,
} }
@ -120,9 +120,9 @@ def _get_posts(json):
for result in json["posts"]: for result in json["posts"]:
user = result['creator'].get('display_name', result['creator']['name']) user = result['creator'].get('display_name', result['creator']['name'])
img_src = None thumbnail = None
if result['post'].get('thumbnail_url'): if result['post'].get('thumbnail_url'):
img_src = result['post']['thumbnail_url'] + '?format=webp&thumbnail=208' thumbnail = result['post']['thumbnail_url'] + '?format=webp&thumbnail=208'
metadata = ( metadata = (
f"&#x25B2; {result['counts']['upvotes']} &#x25BC; {result['counts']['downvotes']}" f"&#x25B2; {result['counts']['upvotes']} &#x25BC; {result['counts']['downvotes']}"
@ -140,7 +140,7 @@ def _get_posts(json):
'url': result['post']['ap_id'], 'url': result['post']['ap_id'],
'title': result['post']['name'], 'title': result['post']['name'],
'content': content, 'content': content,
'img_src': img_src, 'thumbnail': thumbnail,
'publishedDate': datetime.strptime(result['post']['published'][:19], '%Y-%m-%dT%H:%M:%S'), 'publishedDate': datetime.strptime(result['post']['published'][:19], '%Y-%m-%dT%H:%M:%S'),
'metadata': metadata, 'metadata': metadata,
} }

View file

@ -44,7 +44,7 @@ def response(resp):
'url': r_url, 'url': r_url,
'title': result['name'], 'title': result['name'],
'iframe_src': iframe_src.format(url=r_url), 'iframe_src': iframe_src.format(url=r_url),
'img_src': result['pictures']['medium'], 'thumbnail': result['pictures']['medium'],
'publishedDate': publishedDate, 'publishedDate': publishedDate,
'content': result['user']['name'], 'content': result['user']['name'],
} }

View file

@ -104,11 +104,11 @@ def response(resp):
item['metadata'] = html_to_text(result.get('meta_short', '')) item['metadata'] = html_to_text(result.get('meta_short', ''))
if result.get('image'): if result.get('image'):
item['img_src'] = image_url.format(image_id=result['image'], filename=result['image_filename']) item['thumbnail'] = image_url.format(image_id=result['image'], filename=result['image_filename'])
else: else:
item['url'] = result['url'] item['url'] = result['url']
item['content'] = ', '.join([result['class'], result['info'], result['more']]) item['content'] = ', '.join([result['class'], result['info'], result['more']])
item['img_src'] = result['image'] item['thumbnail'] = result['image']
results.append(item) results.append(item)

View file

@ -178,7 +178,7 @@ def response(resp):
continue continue
url, osm, geojson = get_url_osm_geojson(result) url, osm, geojson = get_url_osm_geojson(result)
img_src = get_thumbnail(get_img_src(result)) thumbnail = get_thumbnail(get_img_src(result))
links, link_keys = get_links(result, user_language) links, link_keys = get_links(result, user_language)
data = get_data(result, user_language, link_keys) data = get_data(result, user_language, link_keys)
@ -191,7 +191,7 @@ def response(resp):
'url': url, 'url': url,
'osm': osm, 'osm': osm,
'geojson': geojson, 'geojson': geojson,
'img_src': img_src, 'thumbnail': thumbnail,
'links': links, 'links': links,
'data': data, 'data': data,
'type': get_tag_label(result.get('category'), result.get('type', ''), user_language), 'type': get_tag_label(result.get('category'), result.get('type', ''), user_language),

View file

@ -65,18 +65,18 @@ def construct_body(result):
page='', page='',
year=result['release_year'], year=result['release_year'],
) )
img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) thumbnail = pdbe_preview_url.format(pdb_id=result['pdb_id'])
except KeyError: except KeyError:
content = None content = None
img_src = None thumbnail = None
# construct url for preview image # construct url for preview image
try: try:
img_src = pdbe_preview_url.format(pdb_id=result['pdb_id']) thumbnail = pdbe_preview_url.format(pdb_id=result['pdb_id'])
except KeyError: except KeyError:
img_src = None thumbnail = None
return [title, content, img_src] return [title, content, thumbnail]
def response(resp): def response(resp):
@ -106,16 +106,16 @@ def response(resp):
) )
# obsoleted entries don't have preview images # obsoleted entries don't have preview images
img_src = None thumbnail = None
else: else:
title, content, img_src = construct_body(result) title, content, thumbnail = construct_body(result)
results.append( results.append(
{ {
'url': pdbe_entry_url.format(pdb_id=result['pdb_id']), 'url': pdbe_entry_url.format(pdb_id=result['pdb_id']),
'title': title, 'title': title,
'content': content, 'content': content,
'img_src': img_src, 'thumbnail': thumbnail,
} }
) )

View file

@ -151,7 +151,7 @@ def response(resp):
elif piped_filter == 'music_songs': elif piped_filter == 'music_songs':
item["template"] = "default.html" item["template"] = "default.html"
item["img_src"] = result.get("thumbnail", "") item["thumbnail"] = result.get("thumbnail", "")
item["content"] = result.get("uploaderName", "") or "" item["content"] = result.get("uploaderName", "") or ""
results.append(item) results.append(item)

View file

@ -162,7 +162,7 @@ def parse_search_query(json_results):
result = { result = {
'url': item['link'], 'url': item['link'],
'title': item['title'], 'title': item['title'],
'img_src': item['image'], 'thumbnail': item['image'],
'content': '', 'content': '',
'metadata': item.get('source'), 'metadata': item.get('source'),
} }
@ -244,7 +244,7 @@ def response(resp):
'url': item.get('link'), 'url': item.get('link'),
'content': '', 'content': '',
'metadata': ' / '.join(metadata), 'metadata': ' / '.join(metadata),
'img_src': item.get('image'), 'thumbnail': item.get('image'),
} }
) )
@ -257,7 +257,7 @@ def response(resp):
'url': item.get('link'), 'url': item.get('link'),
'content': item.get('description', ''), 'content': item.get('description', ''),
'metadata': ' / '.join(metadata), 'metadata': ' / '.join(metadata),
'img_src': item.get('image'), 'thumbnail': item.get('image'),
} }
) )

View file

@ -242,15 +242,15 @@ def parse_web_api(resp):
if pub_date is not None: if pub_date is not None:
pub_date = datetime.fromtimestamp(pub_date) pub_date = datetime.fromtimestamp(pub_date)
news_media = item.get('media', []) news_media = item.get('media', [])
img_src = None thumbnail = None
if news_media: if news_media:
img_src = news_media[0].get('pict', {}).get('url', None) thumbnail = news_media[0].get('pict', {}).get('url', None)
results.append( results.append(
{ {
'title': title, 'title': title,
'url': res_url, 'url': res_url,
'publishedDate': pub_date, 'publishedDate': pub_date,
'img_src': img_src, 'thumbnail': thumbnail,
} }
) )

View file

@ -114,7 +114,7 @@ def response(resp):
{ {
'url': url, 'url': url,
'title': result['name'], 'title': result['name'],
'img_src': result.get('favicon', '').replace("http://", "https://"), 'thumbnail': result.get('favicon', '').replace("http://", "https://"),
'content': ' | '.join(content), 'content': ' | '.join(content),
'metadata': ' | '.join(metadata), 'metadata': ' | '.join(metadata),
'iframe_src': result['url_resolved'].replace("http://", "https://"), 'iframe_src': result['url_resolved'].replace("http://", "https://"),

View file

@ -133,7 +133,7 @@ def response(resp):
) )
if mtype in ['image'] and subtype in ['bmp', 'gif', 'jpeg', 'png']: if mtype in ['image'] and subtype in ['bmp', 'gif', 'jpeg', 'png']:
item['img_src'] = url item['thumbnail'] = url
results.append(item) results.append(item)

View file

@ -22,7 +22,7 @@ base_url = "https://www.rottentomatoes.com"
results_xpath = "//search-page-media-row" results_xpath = "//search-page-media-row"
url_xpath = "./a[1]/@href" url_xpath = "./a[1]/@href"
title_xpath = "./a/img/@alt" title_xpath = "./a/img/@alt"
img_src_xpath = "./a/img/@src" thumbnail_xpath = "./a/img/@src"
release_year_xpath = "concat('From ', string(./@releaseyear))" release_year_xpath = "concat('From ', string(./@releaseyear))"
score_xpath = "concat('Score: ', string(./@tomatometerscore))" score_xpath = "concat('Score: ', string(./@tomatometerscore))"
cast_xpath = "concat('Starring ', string(./@cast))" cast_xpath = "concat('Starring ', string(./@cast))"
@ -52,7 +52,7 @@ def response(resp):
'url': extract_text(eval_xpath(result, url_xpath)), 'url': extract_text(eval_xpath(result, url_xpath)),
'title': extract_text(eval_xpath(result, title_xpath)), 'title': extract_text(eval_xpath(result, title_xpath)),
'content': ', '.join(content), 'content': ', '.join(content),
'img_src': extract_text(eval_xpath(result, img_src_xpath)), 'thumbnail': extract_text(eval_xpath(result, thumbnail_xpath)),
} }
) )

View file

@ -77,8 +77,7 @@ def response(resp):
{ {
'url': url + 'structure/' + result['id'], 'url': url + 'structure/' + result['id'],
'title': result['label'], 'title': result['label'],
# 'thumbnail': thumbnail, 'thumbnail': thumbnail,
'img_src': thumbnail,
'content': html_to_text(content), 'content': html_to_text(content),
} }
) )

View file

@ -94,9 +94,9 @@ def response(resp):
'publishedDate': parser.parse(result['last_modified']), 'publishedDate': parser.parse(result['last_modified']),
'iframe_src': "https://w.soundcloud.com/player/?url=" + uri, 'iframe_src': "https://w.soundcloud.com/player/?url=" + uri,
} }
img_src = result['artwork_url'] or result['user']['avatar_url'] thumbnail = result['artwork_url'] or result['user']['avatar_url']
if img_src: if thumbnail:
res['img_src'] = img_src res['thumbnail'] = thumbnail
results.append(res) results.append(res)
return results return results

View file

@ -135,7 +135,7 @@ title_xpath = None
'''`XPath selector`_ of result's ``title``.''' '''`XPath selector`_ of result's ``title``.'''
thumbnail_xpath = False thumbnail_xpath = False
'''`XPath selector`_ of result's ``img_src``.''' '''`XPath selector`_ of result's ``thumbnail``.'''
suggestion_xpath = '' suggestion_xpath = ''
'''`XPath selector`_ of result's ``suggestion``.''' '''`XPath selector`_ of result's ``suggestion``.'''
@ -266,7 +266,7 @@ def response(resp): # pylint: disable=too-many-branches
if thumbnail_xpath: if thumbnail_xpath:
thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath) thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
if len(thumbnail_xpath_result) > 0: if len(thumbnail_xpath_result) > 0:
tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) tmp_result['thumbnail'] = extract_url(thumbnail_xpath_result, search_url)
# add alternative cached url if available # add alternative cached url if available
if cached_xpath: if cached_xpath:

View file

@ -77,9 +77,9 @@ def response(resp):
url = parse_url(url) url = parse_url(url)
title = extract_text(result.xpath('.//h4/a')) title = extract_text(result.xpath('.//h4/a'))
content = extract_text(result.xpath('.//p')) content = extract_text(result.xpath('.//p'))
img_src = eval_xpath_getindex(result, './/img/@data-src', 0, None) thumbnail = eval_xpath_getindex(result, './/img/@data-src', 0, None)
item = {'url': url, 'title': title, 'content': content, 'img_src': img_src} item = {'url': url, 'title': title, 'content': content, 'thumbnail': thumbnail}
pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]')) pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]'))
ago = AGO_RE.search(pub_date) ago = AGO_RE.search(pub_date)

View file

@ -52,11 +52,11 @@ def response(resp):
if description is not None: if description is not None:
content = markdown_to_text(description['text']) content = markdown_to_text(description['text'])
img_src = None thumbnail = None
if result['display']['images']: if result['display']['images']:
img_src = result['display']['images'][0] thumbnail = result['display']['images'][0]
elif result['content']['details']['images']: elif result['content']['details']['images']:
img_src = result['content']['details']['images'][0]['resizableImageUrl'] thumbnail = result['content']['details']['images'][0]['resizableImageUrl']
url = result['display']['source']['sourceRecipeUrl'] url = result['display']['source']['sourceRecipeUrl']
if 'www.yummly.com/private' in url: if 'www.yummly.com/private' in url:
@ -67,7 +67,7 @@ def response(resp):
'url': url, 'url': url,
'title': result['display']['displayName'], 'title': result['display']['displayName'],
'content': content, 'content': content,
'img_src': img_src, 'thumbnail': thumbnail,
'metadata': gettext('Language') + f": {result['locale'].split('-')[0]}", 'metadata': gettext('Language') + f": {result['locale'].split('-')[0]}",
} }
) )

View file

@ -141,9 +141,12 @@ def _parse_result(item) -> Dict[str, Any]:
"authors": [extract_text(author) for author in author_elements], "authors": [extract_text(author) for author in author_elements],
"publisher": _text(item, './/a[@title="Publisher"]'), "publisher": _text(item, './/a[@title="Publisher"]'),
"type": _text(item, './/div[contains(@class, "property__file")]//div[contains(@class, "property_value")]'), "type": _text(item, './/div[contains(@class, "property__file")]//div[contains(@class, "property_value")]'),
"img_src": _text(item, './/img[contains(@class, "cover")]/@data-src'),
} }
thumbnail = _text(item, './/img[contains(@class, "cover")]/@data-src')
if not thumbnail.startswith('/'):
result["thumbnail"] = thumbnail
year = _text(item, './/div[contains(@class, "property_year")]//div[contains(@class, "property_value")]') year = _text(item, './/div[contains(@class, "property_year")]//div[contains(@class, "property_value")]')
if year: if year:
result["publishedDate"] = datetime.strptime(year, '%Y') result["publishedDate"] = datetime.strptime(year, '%Y')

View file

@ -281,24 +281,12 @@ article[data-vim-selected].category-social {
color: var(--color-result-description-highlight-font); color: var(--color-result-description-highlight-font);
} }
img { img.thumbnail {
&.thumbnail { .ltr-float-left();
.ltr-float-left(); padding-top: 0.6rem;
padding-top: 0.6rem; .ltr-padding-right(1rem);
.ltr-padding-right(1rem); width: 7rem;
width: 20rem; height: unset; // remove heigth value that was needed for lazy loading
height: unset; // remove heigth value that was needed for lazy loading
}
&.image {
.ltr-float-left();
padding-top: 0.6rem;
.ltr-padding-right(1rem);
width: 7rem;
max-height: 7rem;
object-fit: scale-down;
object-position: right top;
}
} }
.break { .break {
@ -394,6 +382,16 @@ article[data-vim-selected].category-social {
padding: 10px 0 0 0; padding: 10px 0 0 0;
} }
.result-videos {
img.thumbnail {
.ltr-float-left();
padding-top: 0.6rem;
.ltr-padding-right(1rem);
width: 20rem;
height: unset; // remove heigth value that was needed for lazy loading
}
}
.result-videos .content { .result-videos .content {
overflow: hidden; overflow: hidden;
} }

View file

@ -25,8 +25,7 @@
<span class="url_o{{loop.index}}"><span class="url_i{{loop.index}}">{{- part -}}</span></span> <span class="url_o{{loop.index}}"><span class="url_i{{loop.index}}">{{- part -}}</span></span>
{%- endfor %} {%- endfor %}
{{- result_close_link() -}} {{- result_close_link() -}}
{%- if result.img_src %}{{ result_open_link(result.url) }}<img class="image" src="{{ image_proxify(result.img_src) }}" title="{{ result.title|striptags }}" loading="lazy" width="200" height="200">{{ result_close_link() }}{% endif -%} {%- if result.thumbnail %}{{ result_open_link(result.url) }}<img class="thumbnail" src="{{ image_proxify(result.thumbnail) }}" title="{{ result.title|striptags }}" loading="lazy">{{ result_close_link() }}{% endif -%}
{%- if result.thumbnail %}{{ result_open_link(result.url) }}<img class="thumbnail" src="{{ image_proxify(result.thumbnail) }}" title="{{ result.title|striptags }}" loading="lazy" width="200" height="200">{{ result_close_link() }}{% endif -%}
<h3>{{ result_link(result.url, result.title|safe) }}</h3> <h3>{{ result_link(result.url, result.title|safe) }}</h3>
{%- endmacro -%} {%- endmacro -%}