[mod] duckduckgo_definitions: display only user friendly attributes / URL

various bug fixes
This commit is contained in:
Alexandre Flament 2020-10-26 19:25:28 +01:00
parent 382fded665
commit ca593728af
3 changed files with 167 additions and 76 deletions

View file

@ -12,28 +12,53 @@ DuckDuckGo (definitions)
import json import json
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from re import compile
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.utils import extract_text, html_to_text, match_language
url = 'https://api.duckduckgo.com/'\ from searx import logger
from searx.data import WIKIDATA_UNITS
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
logger = logger.getChild('duckduckgo_definitions')
URL = 'https://api.duckduckgo.com/'\
+ '?{query}&format=json&pretty=0&no_redirect=1&d=1' + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
http_regex = compile(r'^http:') WIKIDATA_PREFIX = [
'http://www.wikidata.org/entity/',
'https://www.wikidata.org/entity/'
]
replace_http_by_https = get_string_replaces_function({'http:': 'https:'})
def result_to_text(url, text, htmlResult): def is_broken_text(text):
""" duckduckgo may return something like "<a href="xxxx">http://somewhere Related website<a/>"
The href URL is broken, the "Related website" may contains some HTML.
The best solution seems to ignore these results.
"""
return text.startswith('http') and ' ' in text
def result_to_text(text, htmlResult):
# TODO : remove result ending with "Meaning" or "Category" # TODO : remove result ending with "Meaning" or "Category"
result = None
dom = html.fromstring(htmlResult) dom = html.fromstring(htmlResult)
a = dom.xpath('//a') a = dom.xpath('//a')
if len(a) >= 1: if len(a) >= 1:
return extract_text(a[0]) result = extract_text(a[0])
else: else:
return text result = text
if not is_broken_text(result):
return result
return None
def request(query, params): def request(query, params):
params['url'] = url.format(query=urlencode({'q': query})) params['url'] = URL.format(query=urlencode({'q': query}))
language = match_language(params['language'], supported_languages, language_aliases) language = match_language(params['language'], supported_languages, language_aliases)
language = language.split('-')[0] language = language.split('-')[0]
params['headers']['Accept-Language'] = language params['headers']['Accept-Language'] = language
@ -45,6 +70,14 @@ def response(resp):
search_res = json.loads(resp.text) search_res = json.loads(resp.text)
# search_res.get('Entity') possible values (not exhaustive) :
# * continent / country / department / location / waterfall
# * actor / musician / artist
# * book / performing art / film / television / media franchise / concert tour / playwright
# * prepared food
# * website / software / os / programming language / file format / software engineer
# * compagny
content = '' content = ''
heading = search_res.get('Heading', '') heading = search_res.get('Heading', '')
attributes = [] attributes = []
@ -55,7 +88,8 @@ def response(resp):
# add answer if there is one # add answer if there is one
answer = search_res.get('Answer', '') answer = search_res.get('Answer', '')
if answer: if answer:
if search_res.get('AnswerType', '') not in ['calc']: logger.debug('AnswerType="%s" Answer="%s"', search_res.get('AnswerType'), answer)
if search_res.get('AnswerType') not in ['calc', 'ip']:
results.append({'answer': html_to_text(answer)}) results.append({'answer': html_to_text(answer)})
# add infobox # add infobox
@ -66,42 +100,36 @@ def response(resp):
content = content + search_res.get('Abstract', '') content = content + search_res.get('Abstract', '')
# image # image
image = search_res.get('Image', '') image = search_res.get('Image')
image = None if image == '' else image image = None if image == '' else image
# attributes
if 'Infobox' in search_res:
infobox = search_res.get('Infobox', None)
if 'content' in infobox:
for info in infobox.get('content'):
attributes.append({'label': info.get('label'),
'value': info.get('value')})
# urls # urls
# Official website, Wikipedia page
for ddg_result in search_res.get('Results', []): for ddg_result in search_res.get('Results', []):
if 'FirstURL' in ddg_result: firstURL = ddg_result.get('FirstURL')
firstURL = ddg_result.get('FirstURL', '') text = ddg_result.get('Text')
text = ddg_result.get('Text', '') if firstURL is not None and text is not None:
urls.append({'title': text, 'url': firstURL}) urls.append({'title': text, 'url': firstURL})
results.append({'title': heading, 'url': firstURL}) results.append({'title': heading, 'url': firstURL})
# related topics # related topics
for ddg_result in search_res.get('RelatedTopics', []): for ddg_result in search_res.get('RelatedTopics', []):
if 'FirstURL' in ddg_result: if 'FirstURL' in ddg_result:
suggestion = result_to_text(ddg_result.get('FirstURL', None), firstURL = ddg_result.get('FirstURL')
ddg_result.get('Text', None), text = ddg_result.get('Text')
ddg_result.get('Result', None)) if not is_broken_text(text):
if suggestion != heading: suggestion = result_to_text(text,
results.append({'suggestion': suggestion}) ddg_result.get('Result'))
if suggestion != heading and suggestion is not None:
results.append({'suggestion': suggestion})
elif 'Topics' in ddg_result: elif 'Topics' in ddg_result:
suggestions = [] suggestions = []
relatedTopics.append({'name': ddg_result.get('Name', ''), relatedTopics.append({'name': ddg_result.get('Name', ''),
'suggestions': suggestions}) 'suggestions': suggestions})
for topic_result in ddg_result.get('Topics', []): for topic_result in ddg_result.get('Topics', []):
suggestion = result_to_text(topic_result.get('FirstURL', None), suggestion = result_to_text(topic_result.get('Text'),
topic_result.get('Text', None), topic_result.get('Result'))
topic_result.get('Result', None)) if suggestion != heading and suggestion is not None:
if suggestion != heading:
suggestions.append(suggestion) suggestions.append(suggestion)
# abstract # abstract
@ -110,7 +138,10 @@ def response(resp):
# add as result ? problem always in english # add as result ? problem always in english
infobox_id = abstractURL infobox_id = abstractURL
urls.append({'title': search_res.get('AbstractSource'), urls.append({'title': search_res.get('AbstractSource'),
'url': abstractURL}) 'url': abstractURL,
'official': True})
results.append({'url': abstractURL,
'title': heading})
# definition # definition
definitionURL = search_res.get('DefinitionURL', '') definitionURL = search_res.get('DefinitionURL', '')
@ -118,53 +149,107 @@ def response(resp):
# add as result ? as answer ? problem always in english # add as result ? as answer ? problem always in english
infobox_id = definitionURL infobox_id = definitionURL
urls.append({'title': search_res.get('DefinitionSource'), urls.append({'title': search_res.get('DefinitionSource'),
'url': definitionURL}) 'url': definitionURL})
# to merge with wikidata's infobox # to merge with wikidata's infobox
if infobox_id: if infobox_id:
infobox_id = http_regex.sub('https:', infobox_id) infobox_id = replace_http_by_https(infobox_id)
# entity # attributes
entity = search_res.get('Entity', None) # some will be converted to urls
# TODO continent / country / department / location / waterfall / if 'Infobox' in search_res:
# mountain range : infobox = search_res.get('Infobox')
# link to map search, get weather, near by locations if 'content' in infobox:
# TODO musician : link to music search osm_zoom = 17
# TODO concert tour : ?? coordinates = None
# TODO film / actor / television / media franchise : for info in infobox.get('content'):
# links to IMDB / rottentomatoes (or scrap result) data_type = info.get('data_type')
# TODO music : link tu musicbrainz / last.fm data_label = info.get('label')
# TODO book : ?? data_value = info.get('value')
# TODO artist / playwright : ??
# TODO compagny : ?? # Workaround: ddg may return a double quote
# TODO software / os : ?? if data_value == '""':
# TODO software engineer : ?? continue
# TODO prepared food : ??
# TODO website : ?? # Is it an external URL ?
# TODO performing art : ?? # * imdb_id / facebook_profile / youtube_channel / youtube_video / twitter_profile
# TODO prepared food : ?? # * instagram_profile / rotten_tomatoes / spotify_artist_id / itunes_artist_id / soundcloud_id
# TODO programming language : ?? # * netflix_id
# TODO file format : ?? external_url = get_external_url(data_type, data_value)
if external_url is not None:
urls.append({'title': data_label,
'url': external_url})
elif data_type in ['instance', 'wiki_maps_trigger', 'google_play_artist_id']:
# ignore instance: Wikidata value from "Instance Of" (Qxxxx)
# ignore wiki_maps_trigger: reference to a javascript
# ignore google_play_artist_id: service shutdown
pass
elif data_type == 'string' and data_label == 'Website':
# There is already an URL for the website
pass
elif data_type == 'area':
attributes.append({'label': data_label,
'value': area_to_str(data_value),
'entity': 'P2046'})
osm_zoom = area_to_osm_zoom(data_value.get('amount'))
elif data_type == 'coordinates':
if data_value.get('globe') == 'http://www.wikidata.org/entity/Q2':
# coordinate on Earth
# get the zoom information from the area
coordinates = info
else:
# coordinate NOT on Earth
attributes.append({'label': data_label,
'value': data_value,
'entity': 'P625'})
elif data_type == 'string':
attributes.append({'label': data_label,
'value': data_value})
if coordinates:
data_label = coordinates.get('label')
data_value = coordinates.get('value')
latitude = data_value.get('latitude')
longitude = data_value.get('longitude')
url = get_earth_coordinates_url(latitude, longitude, osm_zoom)
urls.append({'title': 'OpenStreetMap',
'url': url,
'entity': 'P625'})
if len(heading) > 0: if len(heading) > 0:
# TODO get infobox.meta.value where .label='article_title' # TODO get infobox.meta.value where .label='article_title'
if image is None and len(attributes) == 0 and len(urls) == 1 and\ if image is None and len(attributes) == 0 and len(urls) == 1 and\
len(relatedTopics) == 0 and len(content) == 0: len(relatedTopics) == 0 and len(content) == 0:
results.append({ results.append({'url': urls[0]['url'],
'url': urls[0]['url'], 'title': heading,
'title': heading, 'content': content})
'content': content
})
else: else:
results.append({ results.append({'infobox': heading,
'infobox': heading, 'id': infobox_id,
'id': infobox_id, 'content': content,
'entity': entity, 'img_src': image,
'content': content, 'attributes': attributes,
'img_src': image, 'urls': urls,
'attributes': attributes, 'relatedTopics': relatedTopics})
'urls': urls,
'relatedTopics': relatedTopics
})
return results return results
def unit_to_str(unit):
for prefix in WIKIDATA_PREFIX:
if unit.startswith(prefix):
wikidata_entity = unit[len(prefix):]
return WIKIDATA_UNITS.get(wikidata_entity, unit)
return unit
def area_to_str(area):
"""parse {'unit': 'http://www.wikidata.org/entity/Q712226', 'amount': '+20.99'}"""
unit = unit_to_str(area.get('unit'))
if unit is not None:
try:
amount = float(area.get('amount'))
return '{} {}'.format(amount, unit)
except ValueError:
pass
return '{} {}'.format(area.get('amount', ''), area.get('unit', ''))

View file

@ -25,11 +25,7 @@
{%- if attribute.image -%} {%- if attribute.image -%}
<td><img class="img-responsive" src="{{ image_proxify(attribute.image.src) }}" alt="{{ attribute.image.alt }}" /></td> <td><img class="img-responsive" src="{{ image_proxify(attribute.image.src) }}" alt="{{ attribute.image.alt }}" /></td>
{%- else -%} {%- else -%}
{% if attribute.label == 'Instance of' %} <td><bdi>{{ attribute.value }}</bdi></td>
<td><bdi><a href="https://wikidata.org/wiki/{{ attribute.value.id }}">{{ attribute.value.id }}</a></bdi></td>
{% else %}
<td><bdi>{{ attribute.value }}</bdi></td>
{%- endif -%}
{%- endif -%} {%- endif -%}
</tr> </tr>
{% endfor -%} {% endfor -%}

View file

@ -481,6 +481,16 @@ def ecma_unescape(s):
return s return s
def get_string_replaces_function(replaces):
rep = {re.escape(k): v for k, v in replaces.items()}
pattern = re.compile("|".join(rep.keys()))
def f(text):
return pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
return f
def get_engine_from_settings(name): def get_engine_from_settings(name):
"""Return engine configuration from settings.yml of a given engine name""" """Return engine configuration from settings.yml of a given engine name"""