Ponysearch/searx/engines/json_engine.py

102 lines
2.2 KiB
Python
Raw Normal View History

2013-11-19 15:49:52 +01:00
from urllib import urlencode
from json import loads
from collections import Iterable
2014-01-20 02:31:20 +01:00
search_url = None
url_query = None
2013-11-19 15:49:52 +01:00
content_query = None
2014-01-20 02:31:20 +01:00
title_query = None
# suggestion_xpath = ''
2013-11-19 15:49:52 +01:00
# parameters for engines with paging support
#
# number of results on each page
# (only needed if the site requires not a page number, but an offset)
page_size = 1
# number of the first page (usually 0 or 1)
first_page_num = 1
2014-01-20 02:31:20 +01:00
2013-11-19 15:49:52 +01:00
def iterate(iterable):
if type(iterable) == dict:
it = iterable.iteritems()
else:
it = enumerate(iterable)
for index, value in it:
yield str(index), value
2014-01-20 02:31:20 +01:00
2013-11-19 15:49:52 +01:00
def is_iterable(obj):
2014-01-20 02:31:20 +01:00
if type(obj) == str:
return False
if type(obj) == unicode:
return False
2013-11-19 15:49:52 +01:00
return isinstance(obj, Iterable)
2014-01-20 02:31:20 +01:00
2013-11-19 15:49:52 +01:00
def parse(query):
q = []
for part in query.split('/'):
if part == '':
continue
else:
q.append(part)
return q
2014-01-20 02:31:20 +01:00
2013-11-19 15:49:52 +01:00
def do_query(data, q):
ret = []
2014-02-11 13:13:51 +01:00
if not q:
2013-11-19 15:49:52 +01:00
return ret
qkey = q[0]
2014-01-20 02:31:20 +01:00
for key, value in iterate(data):
2013-11-19 15:49:52 +01:00
if len(q) == 1:
if key == qkey:
ret.append(value)
elif is_iterable(value):
ret.extend(do_query(value, q))
else:
if not is_iterable(value):
continue
if key == qkey:
ret.extend(do_query(value, q[1:]))
else:
ret.extend(do_query(value, q))
return ret
2014-01-20 02:31:20 +01:00
2013-11-19 15:49:52 +01:00
def query(data, query_string):
q = parse(query_string)
return do_query(data, q)
2014-01-20 02:31:20 +01:00
2013-11-19 15:49:52 +01:00
def request(query, params):
query = urlencode({'q': query})[2:]
fp = {'query': query}
if paging and search_url.find('{pageno}') >= 0:
fp['pageno'] = (params['pageno'] + first_page_num - 1) * page_size
params['url'] = search_url.format(**fp)
2013-11-19 15:49:52 +01:00
params['query'] = query
2013-11-19 15:49:52 +01:00
return params
def response(resp):
results = []
json = loads(resp.text)
urls = query(json, url_query)
contents = query(json, content_query)
titles = query(json, title_query)
for url, title, content in zip(urls, titles, contents):
results.append({'url': url, 'title': title, 'content': content})
return results