Ponysearch/searx/engines/__init__.py

142 lines
5.5 KiB
Python
Raw Normal View History

2013-10-14 23:09:13 +02:00
2013-10-17 00:32:32 +02:00
'''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
'''
2013-10-14 23:09:13 +02:00
from os.path import realpath, dirname, splitext, join
from imp import load_source
2013-10-15 18:19:06 +02:00
import grequests
2013-10-15 23:20:26 +02:00
from itertools import izip_longest, chain
from operator import itemgetter
2013-10-19 17:36:44 +02:00
from urlparse import urlparse
2013-10-19 19:01:06 +02:00
from searx import settings
2013-10-23 23:54:46 +02:00
import ConfigParser
import sys
2013-10-14 23:09:13 +02:00
engine_dir = dirname(realpath(__file__))
2013-10-23 23:54:46 +02:00
searx_dir = join(engine_dir, '../../')
engines_config = ConfigParser.SafeConfigParser()
engines_config.read(join(searx_dir, 'engines.cfg'))
2013-10-14 23:09:13 +02:00
2013-10-15 19:11:43 +02:00
engines = {}
2013-10-14 23:09:13 +02:00
2013-10-17 21:06:28 +02:00
categories = {'general': []}
2013-10-23 23:54:46 +02:00
def load_module(filename):
2013-10-19 19:01:06 +02:00
modname = splitext(filename)[0]
2013-10-23 23:54:46 +02:00
if modname in sys.modules:
del sys.modules[modname]
2013-10-14 23:09:13 +02:00
filepath = join(engine_dir, filename)
2013-10-23 23:54:46 +02:00
module = load_source(modname, filepath)
module.name = modname
return module
for section in engines_config.sections():
engine_data = engines_config.options(section)
engine = load_module(engines_config.get(section, 'engine')+'.py')
engine.name = section
for param_name in engine_data:
if param_name == 'engine':
continue
if param_name == 'categories':
engine.categories = map(str.strip, engines_config.get(section, param_name).split(','))
continue
setattr(engine, param_name, engines_config.get(section, param_name))
engines[engine.name] = engine
if hasattr(engine, 'categories'):
2013-10-17 21:06:28 +02:00
for category_name in engine.categories:
categories.setdefault(category_name, []).append(engine)
2013-10-23 23:54:46 +02:00
else:
categories['general'].append(engine)
2013-10-15 18:19:06 +02:00
def default_request_params():
2013-10-19 22:34:46 +02:00
return {'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
2013-10-15 18:19:06 +02:00
2013-10-15 19:11:43 +02:00
def make_callback(engine_name, results, callback):
2013-10-15 18:19:06 +02:00
def process_callback(response, **kwargs):
2013-10-15 23:20:26 +02:00
cb_res = []
2013-10-15 19:11:43 +02:00
for result in callback(response):
result['engine'] = engine_name
2013-10-15 23:20:26 +02:00
cb_res.append(result)
results[engine_name] = cb_res
2013-10-15 18:19:06 +02:00
return process_callback
2013-10-22 18:57:20 +02:00
def search(query, request, selected_categories):
global engines, categories
2013-10-15 18:19:06 +02:00
requests = []
2013-10-15 23:20:26 +02:00
results = {}
2013-10-22 18:57:20 +02:00
selected_engines = []
2013-10-15 18:19:06 +02:00
user_agent = request.headers.get('User-Agent', '')
2013-10-22 18:57:20 +02:00
if not len(selected_categories):
selected_categories = ['general']
for categ in selected_categories:
selected_engines.extend({'category': categ, 'name': x.name} for x in categories[categ])
for selected_engine in selected_engines:
if selected_engine['name'] not in engines:
2013-10-15 22:18:08 +02:00
continue
2013-10-22 18:57:20 +02:00
engine = engines[selected_engine['name']]
2013-10-20 20:20:10 +02:00
request_params = default_request_params()
request_params['headers']['User-Agent'] = user_agent
2013-10-22 18:57:20 +02:00
request_params['category'] = selected_engine['category']
2013-10-20 20:20:10 +02:00
request_params = engine.request(query, request_params)
2013-10-22 18:57:20 +02:00
callback = make_callback(selected_engine['name'], results, engine.response)
2013-10-15 18:19:06 +02:00
if request_params['method'] == 'GET':
req = grequests.get(request_params['url']
2013-10-20 20:20:10 +02:00
,headers=request_params['headers']
2013-10-15 18:19:06 +02:00
,hooks=dict(response=callback)
2013-10-19 22:34:46 +02:00
,cookies = request_params['cookies']
2013-10-15 18:19:06 +02:00
)
else:
req = grequests.post(request_params['url']
,data=request_params['data']
2013-10-20 20:20:10 +02:00
,headers=request_params['headers']
2013-10-15 18:19:06 +02:00
,hooks=dict(response=callback)
2013-10-19 22:34:46 +02:00
,cookies = request_params['cookies']
2013-10-15 18:19:06 +02:00
)
requests.append(req)
grequests.map(requests)
2013-10-20 20:47:00 +02:00
flat_res = filter(None, chain.from_iterable(izip_longest(*results.values())))
flat_len = len(flat_res)
results = []
# deduplication + scoring
for i,res in enumerate(flat_res):
2013-10-19 17:36:44 +02:00
res['parsed_url'] = urlparse(res['url'])
2013-10-19 19:04:46 +02:00
score = (flat_len - i)*settings.weights.get(res['engine'], 1)
duplicated = False
for new_res in results:
2013-10-19 17:36:44 +02:00
if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\
2013-10-19 20:11:56 +02:00
res['parsed_url'].path == new_res['parsed_url'].path and\
res['parsed_url'].query == new_res['parsed_url'].query and\
res.get('template') == new_res.get('template'):
duplicated = new_res
break
if duplicated:
2013-10-18 08:24:58 +02:00
if len(res.get('content', '')) > len(duplicated.get('content', '')):
duplicated['content'] = res['content']
duplicated['score'] += score
duplicated['engine'] += ', '+res['engine']
2013-10-19 17:36:44 +02:00
if duplicated['parsed_url'].scheme == 'https':
continue
elif res['parsed_url'].scheme == 'https':
duplicated['parsed_url'].scheme == 'https'
duplicated['url'] = duplicated['parsed_url'].geturl()
else:
res['score'] = score
results.append(res)
return sorted(results, key=itemgetter('score'), reverse=True)