From 62cc2a5658c9f2cfe75c0af0e5db125d2c6f8e38 Mon Sep 17 00:00:00 2001 From: Dalf Date: Thu, 4 Jun 2015 18:30:08 +0200 Subject: [PATCH 1/2] [fix] bing_news based on RSS output format --- searx/engines/bing_news.py | 107 +++++----- searx/tests/engines/test_bing_news.py | 280 +++++++++----------------- searx/utils.py | 8 + 3 files changed, 157 insertions(+), 238 deletions(-) diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py index 56be4b912..a2397c48e 100644 --- a/searx/engines/bing_news.py +++ b/searx/engines/bing_news.py @@ -6,18 +6,17 @@ max. 5000 query/month @using-api no (because of query limit) - @results HTML (using search portal) - @stable no (HTML can change) - @parse url, title, content, publishedDate + @results RSS (using search portal) + @stable yes (except perhaps for the images) + @parse url, title, content, publishedDate, thumbnail """ from urllib import urlencode -from cgi import escape -from lxml import html -from datetime import datetime, timedelta +from urlparse import urlparse, parse_qsl +from datetime import datetime from dateutil import parser -import re -from searx.engines.xpath import extract_text +from lxml import etree +from searx.utils import list_get # engine dependent config categories = ['news'] @@ -26,7 +25,25 @@ language_support = True # search-url base_url = 'https://www.bing.com/' -search_string = 'news/search?{query}&first={offset}' +search_string = 'news/search?{query}&first={offset}&format=RSS' + + +# remove click +def url_cleanup(url_string): + parsed_url = urlparse(url_string) + if parsed_url.netloc == 'www.bing.com' and parsed_url.path == '/news/apiclick.aspx': + query = dict(parse_qsl(parsed_url.query)) + return query.get('url', None) + return url_string + + +# replace the http://*bing4.com/th?id=... by https://www.bing.com/th?id=... +def image_url_cleanup(url_string): + parsed_url = urlparse(url_string) + if parsed_url.netloc.endswith('bing4.com') and parsed_url.path == '/th': + query = dict(parse_qsl(parsed_url.query)) + return "https://www.bing.com/th?id=" + query.get('id') + return url_string # do search-request @@ -42,8 +59,6 @@ def request(query, params): query=urlencode({'q': query, 'setmkt': language}), offset=offset) - params['cookies']['_FP'] = "ui=en-US" - params['url'] = base_url + search_path return params @@ -53,50 +68,44 @@ def request(query, params): def response(resp): results = [] - dom = html.fromstring(resp.content) + rss = etree.fromstring(resp.content) + + ns = rss.nsmap # parse results - for result in dom.xpath('//div[@class="sn_r"]'): - link = result.xpath('.//div[@class="newstitle"]/a')[0] - url = link.attrib.get('href') - title = extract_text(link) - contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]') - content = escape(extract_text(contentXPath)) + for item in rss.xpath('./channel/item'): + # url / title / content + url = url_cleanup(item.xpath('./link/text()')[0]) + title = list_get(item.xpath('./title/text()'), 0, url) + content = list_get(item.xpath('./description/text()'), 0, '') - # parse publishedDate - publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div' - '//div[contains(@class,"sn_ST")]' - '//span[contains(@class,"sn_tm")]') + # publishedDate + publishedDate = list_get(item.xpath('./pubDate/text()'), 0) + try: + publishedDate = parser.parse(publishedDate, dayfirst=False) + except TypeError: + publishedDate = datetime.now() + except ValueError: + publishedDate = datetime.now() - publishedDate = escape(extract_text(publishedDateXPath)) - - if re.match("^[0-9]+ minute(s|) ago$", publishedDate): - timeNumbers = re.findall(r'\d+', publishedDate) - publishedDate = datetime.now() - timedelta(minutes=int(timeNumbers[0])) - elif re.match("^[0-9]+ hour(s|) ago$", publishedDate): - timeNumbers = re.findall(r'\d+', publishedDate) - publishedDate = datetime.now() - timedelta(hours=int(timeNumbers[0])) - elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate): - timeNumbers = re.findall(r'\d+', publishedDate) - publishedDate = datetime.now()\ - - timedelta(hours=int(timeNumbers[0]))\ - - timedelta(minutes=int(timeNumbers[1])) - elif re.match("^[0-9]+ day(s|) ago$", publishedDate): - timeNumbers = re.findall(r'\d+', publishedDate) - publishedDate = datetime.now() - timedelta(days=int(timeNumbers[0])) - else: - try: - publishedDate = parser.parse(publishedDate, dayfirst=False) - except TypeError: - publishedDate = datetime.now() - except ValueError: - publishedDate = datetime.now() + # thumbnail + thumbnail = list_get(item.xpath('./News:Image/text()', namespaces=ns), 0) + if thumbnail is not None: + thumbnail = image_url_cleanup(thumbnail) # append result - results.append({'url': url, - 'title': title, - 'publishedDate': publishedDate, - 'content': content}) + if thumbnail is not None: + results.append({'template': 'videos.html', + 'url': url, + 'title': title, + 'publishedDate': publishedDate, + 'content': content, + 'thumbnail': thumbnail}) + else: + results.append({'url': url, + 'title': title, + 'publishedDate': publishedDate, + 'content': content}) # return results return results diff --git a/searx/tests/engines/test_bing_news.py b/searx/tests/engines/test_bing_news.py index 4ef0c3af7..a64d59b7b 100644 --- a/searx/tests/engines/test_bing_news.py +++ b/searx/tests/engines/test_bing_news.py @@ -2,6 +2,7 @@ from collections import defaultdict import mock from searx.engines import bing_news from searx.testing import SearxTestCase +import lxml class TestBingNewsEngine(SearxTestCase): @@ -16,14 +17,10 @@ class TestBingNewsEngine(SearxTestCase): self.assertIn(query, params['url']) self.assertIn('bing.com', params['url']) self.assertIn('fr', params['url']) - self.assertIn('_FP', params['cookies']) - self.assertIn('en', params['cookies']['_FP']) dicto['language'] = 'all' params = bing_news.request(query, dicto) self.assertIn('en', params['url']) - self.assertIn('_FP', params['cookies']) - self.assertIn('en', params['cookies']['_FP']) def test_response(self): self.assertRaises(AttributeError, bing_news.response, None) @@ -37,200 +34,105 @@ class TestBingNewsEngine(SearxTestCase): response = mock.Mock(content='') self.assertEqual(bing_news.response(response), []) - html = """ -
- -
- - - -
-
-
- Article Content -
- metronews.fr -  · - 44 minutes ago -
-
-
-
- """ + html = """ + + + python - Bing News + https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS + Search results + + http://10.53.64.9/rsslogo.gif + test + https://www.bing.com:443/news/search?q=test&setmkt=en-US&first=1&format=RSS + + Copyright + + Title + https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2furl.of.article%2f&c=xxxxxxxxx&mkt=en-us + Article Content + Tue, 02 Jun 2015 13:37:00 GMT + Infoworld + http://a1.bing4.com/th?id=ON.13371337133713371337133713371337&pid=News + w={0}&h={1}&c=7 + + 620 + 413 + + + Another Title + https://www.bing.com/news/apiclick.aspx?ref=FexRss&aid=&tid=c237eccc50bd4758b106a5e3c94fce09&url=http%3a%2f%2fanother.url.of.article%2f&c=xxxxxxxxx&mkt=en-us + Another Article Content + Tue, 02 Jun 2015 13:37:00 GMT + + +""" # noqa + response = mock.Mock(content=html) + results = bing_news.response(response) + self.assertEqual(type(results), list) + self.assertEqual(len(results), 2) + self.assertEqual(results[0]['title'], 'Title') + self.assertEqual(results[0]['url'], 'http://url.of.article/') + self.assertEqual(results[0]['content'], 'Article Content') + self.assertEqual(results[0]['thumbnail'], 'https://www.bing.com/th?id=ON.13371337133713371337133713371337') + self.assertEqual(results[1]['title'], 'Another Title') + self.assertEqual(results[1]['url'], 'http://another.url.of.article/') + self.assertEqual(results[1]['content'], 'Another Article Content') + self.assertNotIn('thumbnail', results[1]) + + html = """ + + + python - Bing News + https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS + Search results + + http://10.53.64.9/rsslogo.gif + test + https://www.bing.com:443/news/search?q=test&setmkt=en-US&first=1&format=RSS + + Copyright + + Title + http://another.url.of.article/ + Article Content + garbage + Infoworld + http://another.bing.com/image + w={0}&h={1}&c=7 + + 620 + 413 + + +""" # noqa response = mock.Mock(content=html) results = bing_news.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 1) self.assertEqual(results[0]['title'], 'Title') - self.assertEqual(results[0]['url'], 'http://url.of.article/') + self.assertEqual(results[0]['url'], 'http://another.url.of.article/') self.assertEqual(results[0]['content'], 'Article Content') + self.assertEqual(results[0]['thumbnail'], 'http://another.bing.com/image') - html = """ -
- -
- - - -
-
-
- Article Content -
- metronews.fr -  · - 44 minutes ago -
-
-
-
-
- -
- - - -
-
-
- Article Content -
- metronews.fr -  · - 3 hours, 44 minutes ago -
-
-
-
-
- -
- - - -
-
-
- Article Content -
- metronews.fr -  · - 44 hours ago -
-
-
-
-
- -
- - - -
-
-
- Article Content -
- metronews.fr -  · - 2 days ago -
-
-
-
-
- -
- - - -
-
-
- Article Content -
- metronews.fr -  · - 27/01/2015 -
-
-
-
-
- -
- - - -
-
-
- Article Content -
- metronews.fr -  · - Il y a 3 heures -
-
-
-
- """ - response = mock.Mock(content=html) - results = bing_news.response(response) - self.assertEqual(type(results), list) - self.assertEqual(len(results), 6) + html = """ + + + python - Bing News + https://www.bing.com:443/news/search?q=python&setmkt=en-US&first=1&format=RSS + Search results + + http://10.53.64.9/rsslogo.gif + test + https://www.bing.com:443/news/search?q=test&setmkt=en-US&first=1&format=RSS + + +""" # noqa - html = """ -
- - Title - -
-
- - - -
-
-
- Article Content -
- metronews.fr -  · - 44 minutes ago -
-
-
- """ response = mock.Mock(content=html) results = bing_news.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 0) + + html = """gabarge""" + response = mock.Mock(content=html) + self.assertRaises(lxml.etree.XMLSyntaxError, bing_news.response, response) diff --git a/searx/utils.py b/searx/utils.py index 129971e31..c9784159c 100644 --- a/searx/utils.py +++ b/searx/utils.py @@ -228,6 +228,14 @@ def prettify_url(url): return url +# get element in list or default value +def list_get(a_list, index, default=None): + if len(a_list) > index: + return a_list[index] + else: + return default + + def get_blocked_engines(engines, cookies): if 'blocked_engines' not in cookies: return [(engine_name, category) for engine_name in engines From dafedbfc55773bac65838d1a63765ecb78bbc7ab Mon Sep 17 00:00:00 2001 From: Dalf Date: Thu, 4 Jun 2015 18:34:44 +0200 Subject: [PATCH 2/2] youtube_noapi uses searx.utils.list_get --- searx/engines/youtube_noapi.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/searx/engines/youtube_noapi.py b/searx/engines/youtube_noapi.py index 108b8950f..401fca4c9 100644 --- a/searx/engines/youtube_noapi.py +++ b/searx/engines/youtube_noapi.py @@ -11,6 +11,7 @@ from urllib import quote_plus from lxml import html from searx.engines.xpath import extract_text +from searx.utils import list_get # engine dependent config categories = ['videos', 'music'] @@ -34,14 +35,6 @@ title_xpath = './/div[@class="yt-lockup-content"]/h3/a' content_xpath = './/div[@class="yt-lockup-content"]/div[@class="yt-lockup-description yt-ui-ellipsis yt-ui-ellipsis-2"]' -# get element in list or default value -def list_get(a_list, index, default=None): - if len(a_list) > index: - return a_list[index] - else: - return default - - # returns extract_text on the first result selected by the xpath or None def extract_text_from_dom(result, xpath): r = result.xpath(xpath)