From 6b058962e1f87a17ce2d9c2bcb4faa73df285df3 Mon Sep 17 00:00:00 2001
From: Dalf <alex@al-f.net>
Date: Mon, 22 Sep 2014 22:55:51 +0200
Subject: [PATCH 1/6] [fix] when two results are merged, really use the content
 with more text

---
 searx/search.py | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/searx/search.py b/searx/search.py
index c861a795a..10916cc50 100644
--- a/searx/search.py
+++ b/searx/search.py
@@ -16,6 +16,7 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
 '''
 
 import grequests
+import re
 from itertools import izip_longest, chain
 from datetime import datetime
 from operator import itemgetter
@@ -76,6 +77,13 @@ def make_callback(engine_name, results, suggestions, callback, params):
 
     return process_callback
 
+# return the meaningful length of the content for a result
+def content_result_len(result):
+    if isinstance(result.get('content'), basestring):
+        content = re.sub('[,;:!?\./\\\\ ()-_]', '', result.get('content'))
+        return len(content) 
+    else:
+        return 0
 
 # score results and remove duplications
 def score_results(results):
@@ -110,6 +118,9 @@ def score_results(results):
         duplicated = False
 
         # check for duplicates
+        if 'content' in res:
+            res['content'] = re.sub(' +', ' ', res['content'].strip().replace('\n', ''))
+
         for new_res in results:
             # remove / from the end of the url if required
             p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path  # noqa
@@ -126,7 +137,7 @@ def score_results(results):
         # merge duplicates together
         if duplicated:
             # using content with more text
-            if res.get('content') > duplicated.get('content'):
+            if content_result_len(res) > content_result_len(duplicated):
                 duplicated['content'] = res['content']
 
             # increase result-score

From e39d9fe5423a0fceed1d15dc63c1f8aa30d72e44 Mon Sep 17 00:00:00 2001
From: Dalf <alex@al-f.net>
Date: Mon, 22 Sep 2014 23:39:21 +0200
Subject: [PATCH 2/6] update comment

---
 searx/search.py | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)

diff --git a/searx/search.py b/searx/search.py
index 10916cc50..48f8012f1 100644
--- a/searx/search.py
+++ b/searx/search.py
@@ -106,8 +106,13 @@ def score_results(results):
             res['host'] = res['host'].replace('www.', '', 1)
 
         res['engines'] = [res['engine']]
+
         weight = 1.0
 
+        # strip multiple spaces and cariage returns from content
+        if 'content' in res:
+            res['content'] = re.sub(' +', ' ', res['content'].strip().replace('\n', ''))
+
         # get weight of this engine if possible
         if hasattr(engines[res['engine']], 'weight'):
             weight = float(engines[res['engine']].weight)
@@ -115,12 +120,8 @@ def score_results(results):
         # calculate score for that engine
         score = int((flat_len - i) / engines_len) * weight + 1
 
-        duplicated = False
-
         # check for duplicates
-        if 'content' in res:
-            res['content'] = re.sub(' +', ' ', res['content'].strip().replace('\n', ''))
-
+        duplicated = False
         for new_res in results:
             # remove / from the end of the url if required
             p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path  # noqa

From 6bfd5663539052a64c984f5bdb7135d0d652c923 Mon Sep 17 00:00:00 2001
From: Dalf <alex@al-f.net>
Date: Sun, 28 Sep 2014 16:51:41 +0200
Subject: [PATCH 3/6] [enh] add infoboxes and answers

---
 searx/engines/currency_convert.py             |  11 +-
 searx/engines/duckduckgo_definitions.py       | 121 ++++++++++-
 searx/engines/wikidata.py                     | 193 ++++++++++++++++++
 searx/search.py                               | 104 +++++++++-
 searx/settings.yml                            |   2 +-
 searx/static/default/css/style.css            |  76 +------
 searx/static/default/less/style.less          | 124 ++++++++---
 .../default/result_templates/default.html     |   4 +-
 searx/templates/default/results.html          |  16 ++
 searx/webapp.py                               |   4 +-
 10 files changed, 525 insertions(+), 130 deletions(-)
 create mode 100644 searx/engines/wikidata.py

diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py
index 561527bce..b5f0953d8 100644
--- a/searx/engines/currency_convert.py
+++ b/searx/engines/currency_convert.py
@@ -38,16 +38,14 @@ def response(resp):
     except:
         return results
 
-    title = '{0} {1} in {2} is {3}'.format(
+    answer = '{0} {1} = {2} {3} (1 {1} = {4} {3})'.format(
         resp.search_params['ammount'],
         resp.search_params['from'],
+        resp.search_params['ammount'] * conversion_rate,
         resp.search_params['to'],
-        resp.search_params['ammount'] * conversion_rate
+        conversion_rate
     )
 
-    content = '1 {0} is {1} {2}'.format(resp.search_params['from'],
-                                        conversion_rate,
-                                        resp.search_params['to'])
     now_date = datetime.now().strftime('%Y%m%d')
     url = 'http://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html'  # noqa
     url = url.format(
@@ -56,6 +54,7 @@ def response(resp):
         resp.search_params['from'].lower(),
         resp.search_params['to'].lower()
     )
-    results.append({'title': title, 'content': content, 'url': url})
+
+    results.append({'answer' : answer, 'url': url})
 
     return results
diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py
index 3037aae53..3da7352a4 100644
--- a/searx/engines/duckduckgo_definitions.py
+++ b/searx/engines/duckduckgo_definitions.py
@@ -1,10 +1,25 @@
 import json
 from urllib import urlencode
+from lxml import html
+from searx.engines.xpath import extract_text
 
-url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1'
+url = 'https://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1&d=1'
 
+def result_to_text(url, text, htmlResult):
+    # TODO : remove result ending with "Meaning" or "Category"
+    dom = html.fromstring(htmlResult)
+    a = dom.xpath('//a')
+    if len(a)>=1:
+        return extract_text(a[0])
+    else:
+        return text
+
+def html_to_text(htmlFragment):
+    dom = html.fromstring(htmlFragment)
+    return extract_text(dom)
 
 def request(query, params):
+    # TODO add kl={locale}
     params['url'] = url.format(query=urlencode({'q': query}))
     return params
 
@@ -12,12 +27,104 @@ def request(query, params):
 def response(resp):
     search_res = json.loads(resp.text)
     results = []
+
+    content = ''
+    heading = search_res.get('Heading', '')
+    attributes = []
+    urls = []
+    infobox_id = None
+    relatedTopics = []
+
+    # add answer if there is one
+    answer = search_res.get('Answer', '')
+    if answer != '':
+        results.append({ 'answer' : html_to_text(answer) })
+
+    # add infobox
     if 'Definition' in search_res:
-        if search_res.get('AbstractURL'):
-            res = {'title': search_res.get('Heading', ''),
-                   'content': search_res.get('Definition', ''),
-                   'url': search_res.get('AbstractURL', ''),
-                   'class': 'definition_result'}
-            results.append(res)
+        content = content + search_res.get('Definition', '') 
+
+    if 'Abstract' in search_res:
+        content = content + search_res.get('Abstract', '')
+
+
+    # image
+    image = search_res.get('Image', '')
+    image = None if image == '' else image
+
+    # attributes
+    if 'Infobox' in search_res:
+        infobox = search_res.get('Infobox', None)
+        if  'content' in infobox:
+            for info in infobox.get('content'):
+                attributes.append({'label': info.get('label'), 'value': info.get('value')})
+
+    # urls
+    for ddg_result in search_res.get('Results', []):
+        if 'FirstURL' in ddg_result:
+            firstURL = ddg_result.get('FirstURL', '')
+            text = ddg_result.get('Text', '')
+            urls.append({'title':text, 'url':firstURL})
+            results.append({'title':heading, 'url': firstURL})
+
+    # related topics
+    for ddg_result in search_res.get('RelatedTopics', None):
+        if 'FirstURL' in ddg_result:
+            suggestion = result_to_text(ddg_result.get('FirstURL', None), ddg_result.get('Text', None), ddg_result.get('Result', None))
+            if suggestion != heading:
+                results.append({'suggestion': suggestion})
+        elif 'Topics' in ddg_result:
+            suggestions = []
+            relatedTopics.append({ 'name' : ddg_result.get('Name', ''), 'suggestions': suggestions })
+            for topic_result in ddg_result.get('Topics', []):
+                suggestion = result_to_text(topic_result.get('FirstURL', None), topic_result.get('Text', None), topic_result.get('Result', None))
+                if suggestion != heading:
+                    suggestions.append(suggestion)
+
+    # abstract
+    abstractURL = search_res.get('AbstractURL', '')
+    if abstractURL != '':
+        # add as result ? problem always in english
+        infobox_id = abstractURL
+        urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL})
+
+    # definition
+    definitionURL = search_res.get('DefinitionURL', '')
+    if definitionURL != '':
+        # add as result ? as answer ? problem always in english
+        infobox_id = definitionURL
+        urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL})
+
+    # entity
+    entity = search_res.get('Entity', None)
+    # TODO continent / country / department / location / waterfall / mountain range : link to map search, get weather, near by locations
+    # TODO musician : link to music search
+    # TODO concert tour : ??
+    # TODO film / actor / television  / media franchise : links to IMDB / rottentomatoes (or scrap result)
+    # TODO music : link tu musicbrainz / last.fm
+    # TODO book : ??
+    # TODO artist / playwright : ??
+    # TODO compagny : ??
+    # TODO software / os : ??
+    # TODO software engineer : ??
+    # TODO prepared food : ??
+    # TODO website : ??
+    # TODO performing art : ??
+    # TODO prepared food : ??
+    # TODO programming language : ??
+    # TODO file format : ??
+
+    if len(heading)>0:
+        # TODO get infobox.meta.value where .label='article_title'
+        results.append({
+               'infobox': heading,
+               'id': infobox_id,
+               'entity': entity,
+               'content': content,
+               'img_src' : image,
+               'attributes': attributes,
+               'urls': urls,
+               'relatedTopics': relatedTopics
+               })
 
     return results
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
new file mode 100644
index 000000000..a5ee44246
--- /dev/null
+++ b/searx/engines/wikidata.py
@@ -0,0 +1,193 @@
+import json
+from datetime import datetime
+from requests import get
+from urllib import urlencode
+
+resultCount=2
+urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectionsnippet&{query}'
+urlDetail = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=labels%7Cinfo%7Csitelinks%7Csitelinks%2Furls%7Cdescriptions%7Cclaims&{query}'
+# find the right URL for urlMap
+urlMap = 'http://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
+
+def request(query, params):
+    params['url'] = urlSearch.format(query=urlencode({'srsearch': query, 'srlimit': resultCount}))
+    print params['url']
+    return params
+
+
+def response(resp):
+    results = []
+    search_res = json.loads(resp.text)
+    # TODO parallel http queries
+    before = datetime.now()
+    for r in search_res.get('query', {}).get('search', {}):
+        wikidata_id = r.get('title', '')
+        results = results + getDetail(wikidata_id)
+    after = datetime.now()
+    print str(after - before) + " second(s)"
+
+    return results
+
+def getDetail(wikidata_id):
+    language = 'fr'
+
+    url = urlDetail.format(query=urlencode({'ids': wikidata_id, 'languages': language + '|en'}))
+    print url
+    response = get(url)
+    result = json.loads(response.content)
+    result = result.get('entities', {}).get(wikidata_id, {})
+    
+    title = result.get('labels', {}).get(language, {}).get('value', None)
+    if title == None:
+        title = result.get('labels', {}).get('en', {}).get('value', wikidata_id)
+    results = []
+    urls = []
+    attributes = []
+
+    description = result.get('descriptions', {}).get(language, {}).get('value', '')
+    if description == '':
+        description = result.get('descriptions', {}).get('en', {}).get('value', '')
+
+    claims = result.get('claims', {})
+    official_website = get_string(claims, 'P856', None)
+    print official_website
+    if official_website != None:
+        urls.append({ 'title' : 'Official site', 'url': official_website })
+        results.append({ 'title': title, 'url' : official_website })
+
+    if language != 'en':
+        add_url(urls, 'Wikipedia (' + language + ')', get_wikilink(result, language + 'wiki'))
+    wikipedia_en_link = get_wikilink(result, 'enwiki')
+    add_url(urls, 'Wikipedia (en)', wikipedia_en_link)
+
+    if language != 'en':
+        add_url(urls, 'Wiki voyage (' + language + ')', get_wikilink(result, language + 'wikivoyage'))
+    add_url(urls, 'Wiki voyage (en)', get_wikilink(result, 'enwikivoyage'))
+
+    if language != 'en':
+        add_url(urls, 'Wikiquote (' + language + ')', get_wikilink(result, language + 'wikiquote'))
+    add_url(urls, 'Wikiquote (en)', get_wikilink(result, 'enwikiquote'))
+
+
+    add_url(urls, 'Commons wiki', get_wikilink(result, 'commonswiki'))
+
+    add_url(urls, 'Location', get_geolink(claims, 'P625', None))
+
+    add_url(urls, 'Wikidata', 'https://www.wikidata.org/wiki/' + wikidata_id + '?uselang='+ language)
+
+    postal_code = get_string(claims, 'P281', None)
+    if postal_code != None:
+        attributes.append({'label' : 'Postal code(s)', 'value' : postal_code})
+
+    date_of_birth = get_time(claims, 'P569', None)
+    if date_of_birth != None:
+        attributes.append({'label' : 'Date of birth', 'value' : date_of_birth})
+
+    date_of_death = get_time(claims, 'P570', None)
+    if date_of_death != None:
+        attributes.append({'label' : 'Date of death', 'value' : date_of_death})
+
+
+    results.append({
+            'infobox' : title, 
+            'id' : wikipedia_en_link,
+            'content' : description,
+            'attributes' : attributes,
+            'urls' : urls
+            })
+
+    return results
+
+def add_url(urls, title, url):
+    if url != None:
+        urls.append({'title' : title, 'url' : url})
+
+def get_mainsnak(claims, propertyName):
+    propValue = claims.get(propertyName, {})
+    if len(propValue) == 0:
+        return None
+
+    propValue = propValue[0].get('mainsnak', None)
+    return propValue
+
+def get_string(claims, propertyName, defaultValue=None):
+    propValue = claims.get(propertyName, {})
+    if len(propValue) == 0:
+        return defaultValue
+
+    result = []
+    for e in propValue:
+        mainsnak = e.get('mainsnak', {})
+
+        datatype = mainsnak.get('datatype', '')
+        datavalue = mainsnak.get('datavalue', {})
+        if datavalue != None:
+            result.append(datavalue.get('value', ''))
+
+    if len(result) == 0:
+        return defaultValue
+    else:
+        return ', '.join(result)
+
+def get_time(claims, propertyName, defaultValue=None):
+    propValue = claims.get(propertyName, {})
+    if len(propValue) == 0:
+        return defaultValue
+
+    result = []
+    for e in propValue:
+        mainsnak = e.get('mainsnak', {})
+
+        datatype = mainsnak.get('datatype', '')
+        datavalue = mainsnak.get('datavalue', {})
+        if datavalue != None:
+            value = datavalue.get('value', '')
+            result.append(value.get('time', ''))
+
+    if len(result) == 0:
+        return defaultValue
+    else:
+        return ', '.join(result)
+
+def get_geolink(claims, propertyName, defaultValue=''):
+    mainsnak = get_mainsnak(claims, propertyName)
+
+    if mainsnak == None:
+        return defaultValue
+
+    datatype = mainsnak.get('datatype', '')
+    datavalue = mainsnak.get('datavalue', {})
+
+    if datatype != 'globe-coordinate':
+        return defaultValue
+
+    value = datavalue.get('value', {})
+
+    precision = value.get('precision', 0.0002)
+
+    # there is no zoom information, deduce from precision (error prone)    
+    # samples :
+    # 13 --> 5
+    # 1 --> 6
+    # 0.016666666666667 --> 9
+    # 0.00027777777777778 --> 19
+    # wolframalpha : quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
+    # 14.1186-8.8322 x+0.625447 x^2
+    if precision < 0.0003:
+        zoom = 19
+    else:
+        zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
+
+    url = urlMap.replace('{latitude}', str(value.get('latitude',0))).replace('{longitude}', str(value.get('longitude',0))).replace('{zoom}', str(zoom))
+
+    return url
+
+def get_wikilink(result, wikiid):
+    url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
+    if url == None:
+        return url
+    elif url.startswith('http://'):
+        url = url.replace('http://', 'https://')
+    elif url.startswith('//'):
+        url = 'https:' + url
+    return url
diff --git a/searx/search.py b/searx/search.py
index 48f8012f1..7eb605e11 100644
--- a/searx/search.py
+++ b/searx/search.py
@@ -38,17 +38,14 @@ def default_request_params():
 
 
 # create a callback wrapper for the search engine results
-def make_callback(engine_name, results, suggestions, callback, params):
+def make_callback(engine_name, results, suggestions, answers, infoboxes, callback, params):
 
     # creating a callback wrapper for the search engine results
     def process_callback(response, **kwargs):
         cb_res = []
         response.search_params = params
 
-        # update stats with current page-load-time
-        engines[engine_name].stats['page_load_time'] += \
-            (datetime.now() - params['started']).total_seconds()
-
+        # callback
         try:
             search_results = callback(response)
         except Exception, e:
@@ -61,6 +58,7 @@ def make_callback(engine_name, results, suggestions, callback, params):
                 engine_name, str(e))
             return
 
+        # add results
         for result in search_results:
             result['engine'] = engine_name
 
@@ -70,21 +68,38 @@ def make_callback(engine_name, results, suggestions, callback, params):
                 suggestions.add(result['suggestion'])
                 continue
 
+            # if it is an answer, add it to list of answers
+            if 'answer' in result:
+                answers.add(result['answer'])
+                continue
+
+            # if it is an infobox, add it to list of infoboxes
+            if 'infobox' in result:
+                infoboxes.append(result)
+                print result
+                continue
+
             # append result
             cb_res.append(result)
 
         results[engine_name] = cb_res
 
+        # update stats with current page-load-time
+        engines[engine_name].stats['page_load_time'] += \
+            (datetime.now() - params['started']).total_seconds()
+
     return process_callback
 
+
 # return the meaningful length of the content for a result
-def content_result_len(result):
-    if isinstance(result.get('content'), basestring):
-        content = re.sub('[,;:!?\./\\\\ ()-_]', '', result.get('content'))
+def content_result_len(content):
+    if isinstance(content, basestring):
+        content = re.sub('[,;:!?\./\\\\ ()-_]', '', content)
         return len(content) 
     else:
         return 0
 
+
 # score results and remove duplications
 def score_results(results):
     # calculate scoring parameters
@@ -138,7 +153,7 @@ def score_results(results):
         # merge duplicates together
         if duplicated:
             # using content with more text
-            if content_result_len(res) > content_result_len(duplicated):
+            if content_result_len(res.get('content', '')) > content_result_len(duplicated.get('content', '')):
                 duplicated['content'] = res['content']
 
             # increase result-score
@@ -197,6 +212,64 @@ def score_results(results):
     return gresults
 
 
+def merge_two_infoboxes(infobox1, infobox2):
+    if 'urls' in infobox2:
+        urls1 = infobox1.get('urls', None)
+        if urls1 == None:
+            urls1 = []
+            infobox1.set('urls', urls1)
+
+        urlSet = set()
+        for url in infobox1.get('urls', []):
+            urlSet.add(url.get('url', None))
+        
+        for url in infobox2.get('urls', []):
+            if url.get('url', None) not in urlSet:
+                urls1.append(url)
+
+    if 'attributes' in infobox2:
+        attributes1 = infobox1.get('attributes', None)
+        if attributes1 == None:
+            attributes1 = []
+            infobox1.set('attributes', attributes1)
+
+        attributeSet = set()
+        for attribute in infobox1.get('attributes', []):
+            if attribute.get('label', None) not in attributeSet:
+                attributeSet.add(attribute.get('label', None))
+        
+        for attribute in infobox2.get('attributes', []):
+            attributes1.append(attribute)
+
+    if 'content' in infobox2:
+        content1 = infobox1.get('content', None)
+        content2 = infobox2.get('content', '')
+        if content1 != None:
+            if content_result_len(content2) > content_result_len(content1):
+                infobox1['content'] = content2
+        else:
+            infobox1.set('content', content2)
+
+
+def merge_infoboxes(infoboxes):
+    results = []
+    infoboxes_id = {}
+    for infobox in infoboxes:
+        add_infobox = True
+        infobox_id = infobox.get('id', None)
+        if infobox_id != None:
+            existingIndex = infoboxes_id.get(infobox_id, None)
+            if existingIndex != None:
+                merge_two_infoboxes(results[existingIndex], infobox)
+                add_infobox=False
+            
+        if add_infobox:
+            results.append(infobox)
+            infoboxes_id[infobox_id] = len(results)-1
+
+    return results
+
+
 class Search(object):
 
     """Search information container"""
@@ -219,6 +292,8 @@ class Search(object):
 
         self.results = []
         self.suggestions = []
+        self.answers = []
+        self.infoboxes = []
         self.request_data = {}
 
         # set specific language if set
@@ -350,6 +425,8 @@ class Search(object):
         requests = []
         results = {}
         suggestions = set()
+        answers = set()
+        infoboxes = []
 
         # increase number of searches
         number_of_searches += 1
@@ -394,6 +471,8 @@ class Search(object):
                 selected_engine['name'],
                 results,
                 suggestions,
+                answers,
+                infoboxes,
                 engine.response,
                 request_params
             )
@@ -431,11 +510,14 @@ class Search(object):
         # score results and remove duplications
         results = score_results(results)
 
+        # merge infoboxes according to their ids
+        infoboxes = merge_infoboxes(infoboxes)
+
         # update engine stats, using calculated score
         for result in results:
             for res_engine in result['engines']:
                 engines[result['engine']]\
                     .stats['score_count'] += result['score']
 
-        # return results and suggestions
-        return results, suggestions
+        # return results, suggestions, answers and infoboxes
+        return results, suggestions, answers, infoboxes
diff --git a/searx/settings.yml b/searx/settings.yml
index da053ce6a..77bcd2aa4 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -1,7 +1,7 @@
 server:
     port : 8888
     secret_key : "ultrasecretkey" # change this!
-    debug : False # Debug mode, only for development
+    debug : True # Debug mode, only for development
     request_timeout : 2.0 # seconds
     base_url : False # Set custom base_url. Possible values: False or "https://your.custom.host/location/"
     themes_path : "" # Custom ui themes path
diff --git a/searx/static/default/css/style.css b/searx/static/default/css/style.css
index 9a6faadef..ec476d19a 100644
--- a/searx/static/default/css/style.css
+++ b/searx/static/default/css/style.css
@@ -1,75 +1 @@
-html{font-family:sans-serif;font-size:.9em;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%;color:#444;padding:0;margin:0}
-body,#container{padding:0;margin:0}
-#container{width:100%;position:absolute;top:0}
-.search{padding:0;margin:0}.search .checkbox_container label{font-size:.9em;border-bottom:2px solid #e8e7e6}
-.search .checkbox_container label:hover{border-bottom:2px solid #3498db}
-.search .checkbox_container input[type="checkbox"]:checked+label{border-bottom:2px solid #2980b9}
-#search_wrapper{position:relative;width:50em;padding:10px}
-.center #search_wrapper{margin-left:auto;margin-right:auto}
-.q{background:none repeat scroll 0 0 #fff;border:1px solid #3498db;color:#222;font-size:16px;height:28px;margin:0;outline:medium none;padding:2px;padding-left:8px;padding-right:0 !important;width:100%;z-index:2}
-#search_submit{position:absolute;top:13px;right:1px;padding:0;border:0;background:url('../img/search-icon.png') no-repeat;background-size:24px 24px;opacity:.8;width:24px;height:30px;font-size:0}
-@media screen and (max-width:50em){#search_wrapper{width:90%;clear:both;overflow:hidden}}ul.autocompleter-choices{position:absolute;margin:0;padding:0;list-style:none;border:1px solid #3498db;border-left-color:#3498db;border-right-color:#3498db;border-bottom-color:#3498db;text-align:left;font-family:Verdana,Geneva,Arial,Helvetica,sans-serif;z-index:50;background-color:#fff;color:#444}ul.autocompleter-choices li{position:relative;margin:-2px 0 0 0;padding:.2em 1.5em .2em 1em;display:block;float:none !important;cursor:pointer;font-weight:normal;white-space:nowrap;font-size:1em;line-height:1.5em}ul.autocompleter-choices li.autocompleter-selected{background-color:#444;color:#fff}ul.autocompleter-choices li.autocompleter-selected span.autocompleter-queried{color:#9fcfff}
-ul.autocompleter-choices span.autocompleter-queried{display:inline;float:none;font-weight:bold;margin:0;padding:0}
-.row{max-width:800px;margin:20px auto;text-align:justify}.row h1{font-size:3em;margin-top:50px}
-.row p{padding:0 10px;max-width:700px}
-.row h3,.row ul{margin:4px 8px}
-.hmarg{margin:0 20px;border:1px solid #3498db;padding:4px 10px}
-a:link.hmarg{color:#3498db}
-a:visited.hmarg{color:#3498db}
-a:active.hmarg{color:#3498db}
-a:hover.hmarg{color:#3498db}
-.top_margin{margin-top:60px}
-.center{text-align:center}
-h1{font-size:5em}
-div.title{background:url('../img/searx.png') no-repeat;width:100%;background-position:center}div.title h1{visibility:hidden}
-input[type="submit"]{padding:2px 6px;margin:2px 4px;display:inline-block;background:#3498db;color:#fff;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;border:0;cursor:pointer}
-input[type="checkbox"]{visibility:hidden}
-fieldset{margin:8px;border:1px solid #3498db}
-#categories{margin:0 10px}
-.checkbox_container{display:inline-block;position:relative;margin:0 3px;padding:0}.checkbox_container input{display:none}
-.checkbox_container label,.engine_checkbox label{cursor:pointer;padding:4px 10px;margin:0;display:block;text-transform:capitalize;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}
-.checkbox_container input[type="checkbox"]:checked+label{background:#3498db;color:#fff}
-.engine_checkbox{padding:4px}
-label.allow{background:#e74c3c;padding:4px 8px;color:#fff;display:none}
-label.deny{background:#2ecc71;padding:4px 8px;color:#444;display:inline}
-.engine_checkbox input[type="checkbox"]:checked+label:nth-child(2)+label{display:none}
-.engine_checkbox input[type="checkbox"]:checked+label.allow{display:inline}
-a{text-decoration:none;color:#1a11be}a:visited{color:#8e44ad}
-.result{margin:19px 0 18px 0;padding:0;clear:both}
-.result_title{margin-bottom:0}.result_title a{color:#2980b9;font-weight:normal;font-size:1.1em}.result_title a:hover{text-decoration:underline}
-.result_title a:visited{color:#8e44ad}
-.cache_link{font-size:10px !important}
-.result h3{font-size:1em;word-wrap:break-word;margin:5px 0 1px 0;padding:0}
-.result .content{font-size:.8em;margin:0;padding:0;max-width:54em;word-wrap:break-word;line-height:1.24}
-.result .url{font-size:.8em;margin:0 0 3px 0;padding:0;max-width:54em;word-wrap:break-word;color:#c0392b}
-.result .published_date{font-size:.8em;color:#888;margin:5px 20px}
-.engines{color:#888}
-.small_font{font-size:.8em}
-.small p{margin:2px 0}
-.right{float:right}
-.invisible{display:none}
-.left{float:left}
-.highlight{color:#094089}
-.content .highlight{color:#000}
-.image_result{float:left;margin:10px 10px;position:relative;height:160px}.image_result img{border:0;height:160px}
-.image_result p{margin:0;padding:0}.image_result p span a{display:none;color:#fff}
-.image_result p:hover span a{display:block;position:absolute;bottom:0;right:0;padding:4px;background-color:rgba(0,0,0,0.6);font-size:.7em}
-.torrent_result{border-left:10px solid #d3d3d3;padding-left:3px}.torrent_result p{margin:3px;font-size:.8em}
-.definition_result{border-left:10px solid #808080;padding-left:3px}
-.percentage{position:relative;width:300px}.percentage div{background:#444}
-table{width:100%}
-td{padding:0 4px}
-tr:hover{background:#ddd}
-#results{margin:auto;padding:0;width:50em;margin-bottom:20px}
-#sidebar{position:absolute;top:100px;right:10px;margin:0 2px 5px 5px;padding:0 2px 2px 2px;width:14em}#sidebar input{padding:0;margin:3px;font-size:.8em;display:inline-block;background:transparent;color:#444;cursor:pointer}
-#sidebar input[type="submit"]{text-decoration:underline}
-#suggestions{margin-top:20px}#suggestions span{display:inline;margin:0 2px 2px 2px;padding:0}
-#suggestions input{padding:0;margin:3px;font-size:.8em;display:inline-block;background:transparent;color:#444;cursor:pointer}
-#suggestions input[type="submit"]{text-decoration:underline}
-#suggestions form{display:inline}
-#search_url{margin-top:8px}#search_url input{border:1px solid #888;padding:4px;color:#444;width:14em;display:block;margin:4px;font-size:.8em}
-#preferences{top:10px;padding:0;border:0;background:url('../img/preference-icon.png') no-repeat;background-size:28px 28px;opacity:.8;width:28px;height:30px;display:block}#preferences *{display:none}
-#pagination{clear:both;width:40em}
-#apis{margin-top:8px;clear:both}
-@media screen and (max-width:50em){#categories{font-size:90%;clear:both}#categories .checkbox_container{margin-top:2px;margin:auto} #results{margin:auto;padding:0;width:90%} .github{display:none} .checkbox_container{display:block;width:90%}.checkbox_container label{border-bottom:0}}@media screen and (max-width:70em){.right{display:none;postion:fixed !important;top:100px;right:0} #sidebar{position:static;max-width:50em;margin:0 0 2px 0;padding:0;float:none;border:none;width:auto}#sidebar input{border:0} #apis{display:none} #search_url{display:none} .result{border-top:1px solid #e8e7e6;margin:7px 0 6px 0}.result img{max-width:90%;width:auto;height:auto}}.favicon{float:left;margin-right:4px;margin-top:2px}
-.preferences_back{background:none repeat scroll 0 0 #3498db;border:0 none;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;cursor:pointer;display:inline-block;margin:2px 4px;padding:4px 6px}.preferences_back a{color:#fff}
+html{font-family:sans-serif;font-size:.9em;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%;color:#444;padding:0;margin:0}body,#container{padding:0;margin:0}#container{width:100%;position:absolute;top:0}.search{padding:0;margin:0}.search .checkbox_container label{font-size:.9em;border-bottom:2px solid #e8e7e6}.search .checkbox_container label:hover{border-bottom:2px solid #3498db}.search .checkbox_container input[type="checkbox"]:checked+label{border-bottom:2px solid #2980b9}#search_wrapper{position:relative;width:50em;padding:10px}.center #search_wrapper{margin-left:auto;margin-right:auto}.q{background:none repeat scroll 0 0 #fff;border:1px solid #3498db;color:#222;font-size:16px;height:28px;margin:0;outline:medium none;padding:2px;padding-left:8px;padding-right:0 !important;width:100%;z-index:2}#search_submit{position:absolute;top:13px;right:1px;padding:0;border:0;background:url('../img/search-icon.png') no-repeat;background-size:24px 24px;opacity:.8;width:24px;height:30px;font-size:0}@media screen and (max-width:50em){#search_wrapper{width:90%;clear:both;overflow:hidden}}ul.autocompleter-choices{position:absolute;margin:0;padding:0;list-style:none;border:1px solid #3498db;border-left-color:#3498db;border-right-color:#3498db;border-bottom-color:#3498db;text-align:left;font-family:Verdana,Geneva,Arial,Helvetica,sans-serif;z-index:50;background-color:#fff;color:#444}ul.autocompleter-choices li{position:relative;margin:-2px 0 0 0;padding:.2em 1.5em .2em 1em;display:block;float:none !important;cursor:pointer;font-weight:normal;white-space:nowrap;font-size:1em;line-height:1.5em}ul.autocompleter-choices li.autocompleter-selected{background-color:#444;color:#fff}ul.autocompleter-choices li.autocompleter-selected span.autocompleter-queried{color:#9fcfff}ul.autocompleter-choices span.autocompleter-queried{display:inline;float:none;font-weight:bold;margin:0;padding:0}.row{max-width:800px;margin:20px auto;text-align:justify}.row h1{font-size:3em;margin-top:50px}.row p{padding:0 10px;max-width:700px}.row h3,.row ul{margin:4px 8px}.hmarg{margin:0 20px;border:1px solid #3498db;padding:4px 10px}a:link.hmarg{color:#3498db}a:visited.hmarg{color:#3498db}a:active.hmarg{color:#3498db}a:hover.hmarg{color:#3498db}.top_margin{margin-top:60px}.center{text-align:center}h1{font-size:5em}div.title{background:url('../img/searx.png') no-repeat;width:100%;background-position:center}div.title h1{visibility:hidden}input[type="submit"]{padding:2px 6px;margin:2px 4px;display:inline-block;background:#3498db;color:#fff;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;border:0;cursor:pointer}input[type="checkbox"]{visibility:hidden}fieldset{margin:8px;border:1px solid #3498db}#categories{margin:0 10px}.checkbox_container{display:inline-block;position:relative;margin:0 3px;padding:0}.checkbox_container input{display:none}.checkbox_container label,.engine_checkbox label{cursor:pointer;padding:4px 10px;margin:0;display:block;text-transform:capitalize;-webkit-touch-callout:none;-webkit-user-select:none;-khtml-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.checkbox_container input[type="checkbox"]:checked+label{background:#3498db;color:#fff}.engine_checkbox{padding:4px}label.allow{background:#e74c3c;padding:4px 8px;color:#fff;display:none}label.deny{background:#2ecc71;padding:4px 8px;color:#444;display:inline}.engine_checkbox input[type="checkbox"]:checked+label:nth-child(2)+label{display:none}.engine_checkbox input[type="checkbox"]:checked+label.allow{display:inline}a{text-decoration:none;color:#1a11be}a:visited{color:#8e44ad}.result{margin:19px 0 18px 0;padding:0;clear:both}.result_title{margin-bottom:0}.result_title a{color:#2980b9;font-weight:normal;font-size:1.1em}.result_title a:hover{text-decoration:underline}.result_title a:visited{color:#8e44ad}.cache_link{font-size:10px !important}.result h3{font-size:1em;word-wrap:break-word;margin:5px 0 1px 0;padding:0}.result .content{font-size:.8em;margin:0;padding:0;max-width:54em;word-wrap:break-word;line-height:1.24}.result .content img{float:left;margin-right:5px;max-width:200px;max-height:100px}.result .content br.last{clear:both}.result .url{font-size:.8em;margin:0 0 3px 0;padding:0;max-width:54em;word-wrap:break-word;color:#c0392b}.result .published_date{font-size:.8em;color:#888;margin:5px 20px}.engines{color:#888}.small_font{font-size:.8em}.small p{margin:2px 0}.right{float:right}.invisible{display:none}.left{float:left}.highlight{color:#094089}.content .highlight{color:#000}.image_result{float:left;margin:10px 10px;position:relative;height:160px}.image_result img{border:0;height:160px}.image_result p{margin:0;padding:0}.image_result p span a{display:none;color:#fff}.image_result p:hover span a{display:block;position:absolute;bottom:0;right:0;padding:4px;background-color:rgba(0,0,0,0.6);font-size:.7em}.torrent_result{border-left:10px solid #d3d3d3;padding-left:3px}.torrent_result p{margin:3px;font-size:.8em}.definition_result{border-left:10px solid #808080;padding-left:3px}.percentage{position:relative;width:300px}.percentage div{background:#444}table{width:100%}td{padding:0 4px}tr:hover{background:#ddd}#results{margin:auto;padding:0;width:50em;margin-bottom:20px}#sidebar{position:absolute;top:100px;right:10px;margin:0 2px 5px 5px;padding:0 2px 2px 2px;width:14em}#sidebar input{padding:0;margin:3px;font-size:.8em;display:inline-block;background:transparent;color:#444;cursor:pointer}#sidebar input[type="submit"]{text-decoration:underline}#suggestions,#answers{margin-top:20px}#suggestions input,#answers input,#infoboxes input{padding:0;margin:3px;font-size:.8em;display:inline-block;background:transparent;color:#444;cursor:pointer}#suggestions input[type="submit"],#answers input[type="submit"],#infoboxes input[type="submit"]{text-decoration:underline}#suggestions form,#answers form,#infoboxes form{display:inline}#infoboxes{position:absolute;top:220px;right:20px;margin:0 2px 5px 5px;padding:0 2px 2px;max-width:21em}#infoboxes .infobox{margin:10px 0 10px;border:1px solid #ddd;padding:5px;font-size:.8em}#infoboxes .infobox img{max-width:20em;max-heigt:12em;display:block;margin:5px;padding:5px}#infoboxes .infobox h2{margin:0}#infoboxes .infobox table{width:auto}#infoboxes .infobox table td{vertical-align:top}#infoboxes .infobox input{font-size:1em}#infoboxes .infobox br{clear:both}#search_url{margin-top:8px}#search_url input{border:1px solid #888;padding:4px;color:#444;width:14em;display:block;margin:4px;font-size:.8em}#preferences{top:10px;padding:0;border:0;background:url('../img/preference-icon.png') no-repeat;background-size:28px 28px;opacity:.8;width:28px;height:30px;display:block}#preferences *{display:none}#pagination{clear:both;width:40em}#apis{margin-top:8px;clear:both}@media screen and (max-width:50em){#results{margin:auto;padding:0;width:90%}.github{display:none}.checkbox_container{display:block;width:90%}.checkbox_container label{border-bottom:0}}@media screen and (max-width:75em){#infoboxes{position:inherit;max-width:inherit}#infoboxes .infobox{clear:both}#infoboxes .infobox img{float:left;max-width:10em}#categories{font-size:90%;clear:both}#categories .checkbox_container{margin-top:2px;margin:auto}.right{display:none;postion:fixed !important;top:100px;right:0}#sidebar{position:static;max-width:50em;margin:0 0 2px 0;padding:0;float:none;border:none;width:auto}#sidebar input{border:0}#apis{display:none}#search_url{display:none}.result{border-top:1px solid #e8e7e6;margin:7px 0 6px 0}}.favicon{float:left;margin-right:4px;margin-top:2px}.preferences_back{background:none repeat scroll 0 0 #3498db;border:0 none;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;cursor:pointer;display:inline-block;margin:2px 4px;padding:4px 6px}.preferences_back a{color:#fff}
\ No newline at end of file
diff --git a/searx/static/default/less/style.less b/searx/static/default/less/style.less
index e3fac1b10..27da09e35 100644
--- a/searx/static/default/less/style.less
+++ b/searx/static/default/less/style.less
@@ -235,6 +235,17 @@ a {
 		max-width: 54em;
 		word-wrap:break-word;
 		line-height: 1.24;
+
+		img {
+		    float: left;
+		    margin-right: 5px;
+		    max-width: 200px;
+		    max-height: 100px;
+		}
+		
+		br.last {
+		    clear: both;
+		}
 	}
 
 	.url {
@@ -384,33 +395,80 @@ tr {
     }
 }
 
-#suggestions {
+#suggestions, #answers {
 
-    margin-top: 20px;
+    	margin-top: 20px;
+
+}
+
+#suggestions, #answers, #infoboxes {
 
-	span {
-		display: inline;
-		margin: 0 2px 2px 2px;
-		padding: 0;
-	}
 	input {
 		padding: 0;
 		margin: 3px;
 		font-size: 0.8em;
 		display: inline-block;
-        background: transparent;
-        color: @color-result-search-url-font;
+        	background: transparent;
+        	color: @color-result-search-url-font;
 		cursor: pointer;
 	}
-    input[type="submit"] {
+
+    	input[type="submit"] {
 		text-decoration: underline;
-    }
+    	}
 
 	form {
 		display: inline;
 	}
 }
 
+
+#infoboxes {
+	   position: absolute;
+	   top: 220px;
+	   right: 20px;
+	   margin: 0px 2px 5px 5px;
+	   padding: 0px 2px 2px;
+	   max-width: 21em;
+
+	   .infobox {
+	   	    margin: 10px 0 10px;
+	   	    border: 1px solid #ddd;
+		    padding: 5px;
+	   	    font-size: 0.8em;
+
+	   	    img {
+		    	max-width: 20em;
+			max-heigt: 12em;
+			display: block;
+			margin: 5px;
+			padding: 5px;
+		    }
+
+		    h2 {
+		       margin: 0;
+		    }
+
+		    table {
+		    	  width: auto;
+
+			  td {
+		       	     vertical-align: top;
+		    	  }
+
+		    }
+
+		    input {
+		    	  font-size: 1em;
+		    }
+
+		    br {
+		       clear: both;
+		    }
+
+	   }
+}
+
 #search_url {
 	margin-top: 8px;
 
@@ -453,16 +511,6 @@ tr {
 
 @media screen and (max-width: @results-width) {
 
-	#categories {
-		font-size: 90%;
-		clear: both;
-
-		.checkbox_container {
-			margin-top: 2px;
-			margin: auto; 
-		}
-	}
-
     #results {
         margin: auto;
         padding: 0;
@@ -483,7 +531,33 @@ tr {
 	}
 }
 
-@media screen and (max-width: 70em) {
+@media screen and (max-width: 75em) {
+
+       #infoboxes {
+	   position: inherit;
+	   max-width: inherit;
+	   
+	   .infobox {
+	   	    clear:both;
+	   
+	   	   img {
+	   	       float: left;
+	       	       max-width: 10em;
+	   	   }
+	   }
+
+       }
+
+	#categories {
+		font-size: 90%;
+		clear: both;
+
+		.checkbox_container {
+			margin-top: 2px;
+			margin: auto; 
+		}
+	}
+
 	.right {
 		display: none;
 		postion: fixed !important;
@@ -515,12 +589,6 @@ tr {
 	.result {
 		border-top: 1px solid @color-result-top-border;
 		margin: 7px 0 6px 0;
-
-		img {
-			max-width: 90%;
-			width: auto;
-			height: auto
-		}
 	}
 }
 
diff --git a/searx/templates/default/result_templates/default.html b/searx/templates/default/result_templates/default.html
index ac9b9b979..277946202 100644
--- a/searx/templates/default/result_templates/default.html
+++ b/searx/templates/default/result_templates/default.html
@@ -8,6 +8,8 @@
     <h3 class="result_title"><a href="{{ result.url }}">{{ result.title|safe }}</a></h3>
     <p class="url">{{ result.pretty_url }} <a class="cache_link" href="https://web.archive.org/web/{{ result.url }}">cached</a></p>
 	{% if result.publishedDate %}<p class="published_date">{{ result.publishedDate }}</p>{% endif %}
-    <p class="content">{% if result.content %}{{ result.content|safe }}<br />{% endif %}</p>
+    <p class="content">
+      {% if result.img_src %}<img src="{{ result.img_src|safe }}" class="image" />{% endif %}
+      {% if result.content %}{{ result.content|safe }}<br class="last"/>{% endif %}</p>
   </div>
 </div>
diff --git a/searx/templates/default/results.html b/searx/templates/default/results.html
index d0b53b48a..b66d6e2af 100644
--- a/searx/templates/default/results.html
+++ b/searx/templates/default/results.html
@@ -30,6 +30,14 @@
         </div>
     </div>
 
+    {% if answers %}
+    <div id="answers"><span>{{ _('Answers') }}</span>
+        {% for answer in answers %}
+        <span>{{ answer }}</span>
+        {% endfor %}
+    </div>
+    {% endif %}
+
     {% if suggestions %}
     <div id="suggestions"><span>{{ _('Suggestions') }}</span>
         {% for suggestion in suggestions %}
@@ -41,6 +49,14 @@
     </div>
     {% endif %}
 
+    {% if infoboxes %}
+    <div id="infoboxes">
+      {% for infobox in infoboxes %}
+         {% include 'default/infobox.html' %}
+      {% endfor %}
+    </div>
+    {% endif %}    
+
     {% for result in results %}
         {% if result['template'] %}
             {% include 'default/result_templates/'+result['template'] %}
diff --git a/searx/webapp.py b/searx/webapp.py
index 42cb42678..d0212169c 100644
--- a/searx/webapp.py
+++ b/searx/webapp.py
@@ -198,7 +198,7 @@ def index():
             'index.html',
         )
 
-    search.results, search.suggestions = search.search(request)
+    search.results, search.suggestions, search.answers, search.infoboxes = search.search(request)
 
     for result in search.results:
 
@@ -291,6 +291,8 @@ def index():
         pageno=search.pageno,
         base_url=get_base_url(),
         suggestions=search.suggestions,
+        answers=search.answers,
+        infoboxes=search.infoboxes,
         theme=get_current_theme_name()
     )
 

From 0a71525ab6d4fe4cbc4b33b4653bdb39ae4d55e9 Mon Sep 17 00:00:00 2001
From: Dalf <alex@al-f.net>
Date: Sun, 28 Sep 2014 16:53:30 +0200
Subject: [PATCH 4/6] [enh] add infoboxes and answers (clean up)

---
 searx/engines/wikidata.py            | 45 ++++++++++++++++------------
 searx/search.py                      |  1 -
 searx/settings.yml                   |  4 +++
 searx/templates/default/infobox.html | 44 +++++++++++++++++++++++++++
 4 files changed, 74 insertions(+), 20 deletions(-)
 create mode 100644 searx/templates/default/infobox.html

diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index a5ee44246..46f2323c8 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -1,13 +1,12 @@
 import json
-from datetime import datetime
 from requests import get
 from urllib import urlencode
+from datetime import datetime
 
 resultCount=2
-urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectionsnippet&{query}'
+urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}'
 urlDetail = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=labels%7Cinfo%7Csitelinks%7Csitelinks%2Furls%7Cdescriptions%7Cclaims&{query}'
-# find the right URL for urlMap
-urlMap = 'http://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
+urlMap = 'https://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
 
 def request(query, params):
     params['url'] = urlSearch.format(query=urlencode({'srsearch': query, 'srlimit': resultCount}))
@@ -18,24 +17,27 @@ def request(query, params):
 def response(resp):
     results = []
     search_res = json.loads(resp.text)
-    # TODO parallel http queries
-    before = datetime.now()
+
+    wikidata_ids = set()
     for r in search_res.get('query', {}).get('search', {}):
-        wikidata_id = r.get('title', '')
-        results = results + getDetail(wikidata_id)
-    after = datetime.now()
-    print str(after - before) + " second(s)"
+        wikidata_ids.add(r.get('title', ''))
+
+    language = resp.search_params['language'].split('_')[0]
+    if language == 'all':
+        language = 'en'
+    url = urlDetail.format(query=urlencode({'ids': '|'.join(wikidata_ids), 'languages': language + '|en'}))
+
+    before = datetime.now()
+    htmlresponse = get(url)
+    print datetime.now() - before
+    jsonresponse = json.loads(htmlresponse.content)
+    for wikidata_id in wikidata_ids:
+        results = results + getDetail(jsonresponse, wikidata_id, language)
 
     return results
 
-def getDetail(wikidata_id):
-    language = 'fr'
-
-    url = urlDetail.format(query=urlencode({'ids': wikidata_id, 'languages': language + '|en'}))
-    print url
-    response = get(url)
-    result = json.loads(response.content)
-    result = result.get('entities', {}).get(wikidata_id, {})
+def getDetail(jsonresponse, wikidata_id, language):
+    result = jsonresponse.get('entities', {}).get(wikidata_id, {})
     
     title = result.get('labels', {}).get(language, {}).get('value', None)
     if title == None:
@@ -50,7 +52,6 @@ def getDetail(wikidata_id):
 
     claims = result.get('claims', {})
     official_website = get_string(claims, 'P856', None)
-    print official_website
     if official_website != None:
         urls.append({ 'title' : 'Official site', 'url': official_website })
         results.append({ 'title': title, 'url' : official_website })
@@ -98,10 +99,12 @@ def getDetail(wikidata_id):
 
     return results
 
+
 def add_url(urls, title, url):
     if url != None:
         urls.append({'title' : title, 'url' : url})
 
+
 def get_mainsnak(claims, propertyName):
     propValue = claims.get(propertyName, {})
     if len(propValue) == 0:
@@ -110,6 +113,7 @@ def get_mainsnak(claims, propertyName):
     propValue = propValue[0].get('mainsnak', None)
     return propValue
 
+
 def get_string(claims, propertyName, defaultValue=None):
     propValue = claims.get(propertyName, {})
     if len(propValue) == 0:
@@ -129,6 +133,7 @@ def get_string(claims, propertyName, defaultValue=None):
     else:
         return ', '.join(result)
 
+
 def get_time(claims, propertyName, defaultValue=None):
     propValue = claims.get(propertyName, {})
     if len(propValue) == 0:
@@ -149,6 +154,7 @@ def get_time(claims, propertyName, defaultValue=None):
     else:
         return ', '.join(result)
 
+
 def get_geolink(claims, propertyName, defaultValue=''):
     mainsnak = get_mainsnak(claims, propertyName)
 
@@ -182,6 +188,7 @@ def get_geolink(claims, propertyName, defaultValue=''):
 
     return url
 
+
 def get_wikilink(result, wikiid):
     url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
     if url == None:
diff --git a/searx/search.py b/searx/search.py
index 7eb605e11..f9157ef7e 100644
--- a/searx/search.py
+++ b/searx/search.py
@@ -76,7 +76,6 @@ def make_callback(engine_name, results, suggestions, answers, infoboxes, callbac
             # if it is an infobox, add it to list of infoboxes
             if 'infobox' in result:
                 infoboxes.append(result)
-                print result
                 continue
 
             # append result
diff --git a/searx/settings.yml b/searx/settings.yml
index 77bcd2aa4..02f7caacb 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -44,6 +44,10 @@ engines:
     engine : duckduckgo_definitions
     shortcut : ddd
 
+  - name : wikidata
+    engine : wikidata
+    shortcut : wd
+
   - name : duckduckgo
     engine : duckduckgo
     shortcut : ddg
diff --git a/searx/templates/default/infobox.html b/searx/templates/default/infobox.html
new file mode 100644
index 000000000..f963e898c
--- /dev/null
+++ b/searx/templates/default/infobox.html
@@ -0,0 +1,44 @@
+<div class="infobox">
+  <h2>{{ infobox.infobox }}</h2>
+  {% if infobox.img_src %}<img src="{{ infobox.img_src }}" />{% endif %}
+  <p>{{ infobox.entity }}</p>
+  <p>{{ infobox.content }}</p>
+  {% if infobox.attributes %}
+  <div class="attributes">
+    <table>
+      {% for attribute in infobox.attributes %}
+      <tr><td>{{ attribute.label }}</td><td>{{ attribute.value }}</td></tr>
+      {% endfor %}
+    </table>
+  </div>
+  {% endif %}
+
+  {% if infobox.urls %}
+  <div class="urls">
+    <ul>
+      {% for url in infobox.urls %}
+      <li class="url"><a href="{{ url.url }}">{{ url.title }}</a></li>
+      {% endfor %}
+    </ul>
+  </div>
+  {% endif %}
+
+  {% if infobox.relatedTopics %}
+  <div class="relatedTopics">
+      {% for topic in infobox.relatedTopics %}
+      <div>
+	<h3>{{ topic.name }}</h3>
+	{% for suggestion in topic.suggestions %}
+	<form method="{{ method or 'POST' }}" action="{{ url_for('index') }}">
+            <input type="hidden" name="q" value="{{ suggestion }}">
+            <input type="submit" value="{{ suggestion }}" />
+        </form>
+	{% endfor %}
+      </div>
+      {% endfor %}
+  </div>
+  {% endif %}
+
+  <br />
+  
+</div>

From 728fc611da126ddb20cb16ccaa214a1f6d8bcdbd Mon Sep 17 00:00:00 2001
From: dalf <alex@al-f.net>
Date: Wed, 1 Oct 2014 22:46:31 +0200
Subject: [PATCH 5/6] [fix] infoboxes and answers : pass all tests

---
 searx/templates/default/result_templates/default.html |  4 +---
 searx/tests/test_webapp.py                            | 10 +++++++++-
 2 files changed, 10 insertions(+), 4 deletions(-)

diff --git a/searx/templates/default/result_templates/default.html b/searx/templates/default/result_templates/default.html
index 277946202..938d66d04 100644
--- a/searx/templates/default/result_templates/default.html
+++ b/searx/templates/default/result_templates/default.html
@@ -8,8 +8,6 @@
     <h3 class="result_title"><a href="{{ result.url }}">{{ result.title|safe }}</a></h3>
     <p class="url">{{ result.pretty_url }} <a class="cache_link" href="https://web.archive.org/web/{{ result.url }}">cached</a></p>
 	{% if result.publishedDate %}<p class="published_date">{{ result.publishedDate }}</p>{% endif %}
-    <p class="content">
-      {% if result.img_src %}<img src="{{ result.img_src|safe }}" class="image" />{% endif %}
-      {% if result.content %}{{ result.content|safe }}<br class="last"/>{% endif %}</p>
+    <p class="content">{% if result.img_src %}<img src="{{ result.img_src|safe }}" class="image" />{% endif %}{% if result.content %}{{ result.content|safe }}<br class="last"/>{% endif %}</p>
   </div>
 </div>
diff --git a/searx/tests/test_webapp.py b/searx/tests/test_webapp.py
index 9d1722eeb..9cf586180 100644
--- a/searx/tests/test_webapp.py
+++ b/searx/tests/test_webapp.py
@@ -43,6 +43,8 @@ class ViewsTestCase(SearxTestCase):
     def test_index_html(self, search):
         search.return_value = (
             self.test_results,
+            set(),
+            set(),
             set()
         )
         result = self.app.post('/', data={'q': 'test'})
@@ -51,7 +53,7 @@ class ViewsTestCase(SearxTestCase):
             result.data
         )
         self.assertIn(
-            '<p class="content">first <span class="highlight">test</span> content<br /></p>',  # noqa
+            '<p class="content">first <span class="highlight">test</span> content<br class="last"/></p>',  # noqa
             result.data
         )
 
@@ -59,6 +61,8 @@ class ViewsTestCase(SearxTestCase):
     def test_index_json(self, search):
         search.return_value = (
             self.test_results,
+            set(),
+            set(),
             set()
         )
         result = self.app.post('/', data={'q': 'test', 'format': 'json'})
@@ -75,6 +79,8 @@ class ViewsTestCase(SearxTestCase):
     def test_index_csv(self, search):
         search.return_value = (
             self.test_results,
+            set(),
+            set(),
             set()
         )
         result = self.app.post('/', data={'q': 'test', 'format': 'csv'})
@@ -90,6 +96,8 @@ class ViewsTestCase(SearxTestCase):
     def test_index_rss(self, search):
         search.return_value = (
             self.test_results,
+            set(),
+            set(),
             set()
         )
         result = self.app.post('/', data={'q': 'test', 'format': 'rss'})

From 63a0328c8b26c5d749ecf83ee73a44902e1d5cef Mon Sep 17 00:00:00 2001
From: dalf <alex@al-f.net>
Date: Thu, 2 Oct 2014 23:36:18 +0200
Subject: [PATCH 6/6] [enh] wikidata engine : add links to musicbrainz

---
 searx/engines/wikidata.py | 23 ++++++++++++++++++++++-
 1 file changed, 22 insertions(+), 1 deletion(-)

diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index 46f2323c8..e8af8e204 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -69,13 +69,34 @@ def getDetail(jsonresponse, wikidata_id, language):
         add_url(urls, 'Wikiquote (' + language + ')', get_wikilink(result, language + 'wikiquote'))
     add_url(urls, 'Wikiquote (en)', get_wikilink(result, 'enwikiquote'))
 
-
     add_url(urls, 'Commons wiki', get_wikilink(result, 'commonswiki'))
 
     add_url(urls, 'Location', get_geolink(claims, 'P625', None))
 
     add_url(urls, 'Wikidata', 'https://www.wikidata.org/wiki/' + wikidata_id + '?uselang='+ language)
 
+    musicbrainz_work_id = get_string(claims, 'P435')
+    if musicbrainz_work_id != None:
+        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/work/' + musicbrainz_work_id)
+
+    musicbrainz_artist_id = get_string(claims, 'P434')
+    if musicbrainz_artist_id != None:
+        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/artist/' + musicbrainz_artist_id)
+
+    musicbrainz_release_group_id = get_string(claims, 'P436')
+    if musicbrainz_release_group_id != None:
+        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/release-group/' + musicbrainz_release_group_id)
+    
+    musicbrainz_label_id = get_string(claims, 'P966')
+    if musicbrainz_label_id != None:
+        add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/label/' + musicbrainz_label_id)
+
+    # musicbrainz_area_id = get_string(claims, 'P982')
+    # P1407 MusicBrainz series ID
+    # P1004 MusicBrainz place ID
+    # P1330 MusicBrainz instrument ID
+    # P1407 MusicBrainz series ID
+
     postal_code = get_string(claims, 'P281', None)
     if postal_code != None:
         attributes.append({'label' : 'Postal code(s)', 'value' : postal_code})