2019-08-02 13:37:13 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2020-08-06 17:42:46 +02:00
|
|
|
import os
|
|
|
|
import sys
|
2015-01-11 13:26:40 +01:00
|
|
|
import csv
|
2017-07-20 15:44:02 +02:00
|
|
|
import hashlib
|
|
|
|
import hmac
|
2015-01-11 13:26:40 +01:00
|
|
|
import re
|
2020-08-06 17:42:46 +02:00
|
|
|
import json
|
2015-01-11 13:26:40 +01:00
|
|
|
|
2014-01-20 02:31:20 +01:00
|
|
|
from codecs import getincrementalencoder
|
2016-11-19 17:51:19 +01:00
|
|
|
from imp import load_source
|
2017-12-01 20:45:24 +01:00
|
|
|
from numbers import Number
|
2016-11-19 17:51:19 +01:00
|
|
|
from os.path import splitext, join
|
2020-08-06 17:42:46 +02:00
|
|
|
from io import open, StringIO
|
2014-04-25 01:46:40 +02:00
|
|
|
from random import choice
|
2020-08-06 17:42:46 +02:00
|
|
|
from html.parser import HTMLParser
|
2019-11-15 09:31:37 +01:00
|
|
|
from lxml.etree import XPath
|
2020-08-06 17:42:46 +02:00
|
|
|
from babel.core import get_global
|
|
|
|
from babel.dates import format_date
|
2014-04-25 01:46:40 +02:00
|
|
|
|
2018-03-01 05:30:48 +01:00
|
|
|
from searx import settings
|
2014-11-18 11:37:42 +01:00
|
|
|
from searx.version import VERSION_STRING
|
2016-09-06 16:43:48 +02:00
|
|
|
from searx.languages import language_codes
|
2014-11-18 11:37:42 +01:00
|
|
|
from searx import settings
|
2015-01-11 13:26:40 +01:00
|
|
|
from searx import logger
|
2014-11-18 11:37:42 +01:00
|
|
|
|
2015-01-11 13:26:40 +01:00
|
|
|
|
|
|
|
logger = logger.getChild('utils')
|
2014-01-10 23:38:08 +01:00
|
|
|
|
2015-01-01 14:13:56 +01:00
|
|
|
blocked_tags = ('script',
|
|
|
|
'style')
|
|
|
|
|
2019-08-02 13:37:13 +02:00
|
|
|
ecma_unescape4_re = re.compile(r'%u([0-9a-fA-F]{4})', re.UNICODE)
|
|
|
|
ecma_unescape2_re = re.compile(r'%([0-9a-fA-F]{2})', re.UNICODE)
|
|
|
|
|
2017-05-28 15:46:45 +02:00
|
|
|
useragents = json.loads(open(os.path.dirname(os.path.realpath(__file__))
|
|
|
|
+ "/data/useragents.json", 'r', encoding='utf-8').read())
|
2014-01-12 20:13:14 +01:00
|
|
|
|
2019-11-15 09:31:37 +01:00
|
|
|
xpath_cache = dict()
|
2019-07-18 21:32:17 +02:00
|
|
|
lang_to_lc_cache = dict()
|
|
|
|
|
2014-01-19 22:59:01 +01:00
|
|
|
|
2014-10-17 12:34:51 +02:00
|
|
|
def searx_useragent():
|
2014-12-29 21:31:04 +01:00
|
|
|
return 'searx/{searx_version} {suffix}'.format(
|
|
|
|
searx_version=VERSION_STRING,
|
2015-08-02 19:38:27 +02:00
|
|
|
suffix=settings['outgoing'].get('useragent_suffix', ''))
|
2014-10-19 12:41:04 +02:00
|
|
|
|
|
|
|
|
2018-08-05 10:55:42 +02:00
|
|
|
def gen_useragent(os=None):
|
|
|
|
return str(useragents['ua'].format(os=os or choice(useragents['os']), version=choice(useragents['versions'])))
|
2017-05-28 15:46:45 +02:00
|
|
|
|
|
|
|
|
2014-01-10 23:38:08 +01:00
|
|
|
def highlight_content(content, query):
|
|
|
|
|
|
|
|
if not content:
|
|
|
|
return None
|
|
|
|
# ignoring html contents
|
|
|
|
# TODO better html content detection
|
|
|
|
if content.find('<') != -1:
|
|
|
|
return content
|
|
|
|
|
|
|
|
if content.lower().find(query.lower()) > -1:
|
2020-08-06 17:42:46 +02:00
|
|
|
query_regex = '({0})'.format(re.escape(query))
|
2014-05-16 16:51:23 +02:00
|
|
|
content = re.sub(query_regex, '<span class="highlight">\\1</span>',
|
|
|
|
content, flags=re.I | re.U)
|
2014-01-10 23:38:08 +01:00
|
|
|
else:
|
|
|
|
regex_parts = []
|
|
|
|
for chunk in query.split():
|
|
|
|
if len(chunk) == 1:
|
2020-08-06 17:42:46 +02:00
|
|
|
regex_parts.append('\\W+{0}\\W+'.format(re.escape(chunk)))
|
2014-01-10 23:38:08 +01:00
|
|
|
else:
|
2020-08-06 17:42:46 +02:00
|
|
|
regex_parts.append('{0}'.format(re.escape(chunk)))
|
|
|
|
query_regex = '({0})'.format('|'.join(regex_parts))
|
2014-05-16 16:51:23 +02:00
|
|
|
content = re.sub(query_regex, '<span class="highlight">\\1</span>',
|
|
|
|
content, flags=re.I | re.U)
|
2014-01-10 23:38:08 +01:00
|
|
|
|
|
|
|
return content
|
2013-11-08 23:44:26 +01:00
|
|
|
|
2014-01-19 22:59:01 +01:00
|
|
|
|
2020-09-11 10:23:56 +02:00
|
|
|
class HTMLTextExtractorException(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2013-11-08 23:44:26 +01:00
|
|
|
class HTMLTextExtractor(HTMLParser):
|
2016-07-10 16:44:27 +02:00
|
|
|
|
2013-11-08 23:44:26 +01:00
|
|
|
def __init__(self):
|
|
|
|
HTMLParser.__init__(self)
|
2014-01-19 22:59:01 +01:00
|
|
|
self.result = []
|
2015-01-01 14:13:56 +01:00
|
|
|
self.tags = []
|
|
|
|
|
|
|
|
def handle_starttag(self, tag, attrs):
|
|
|
|
self.tags.append(tag)
|
|
|
|
|
|
|
|
def handle_endtag(self, tag):
|
2015-01-22 17:43:45 +01:00
|
|
|
if not self.tags:
|
|
|
|
return
|
|
|
|
|
2015-01-01 14:13:56 +01:00
|
|
|
if tag != self.tags[-1]:
|
2020-09-11 10:23:56 +02:00
|
|
|
raise HTMLTextExtractorException()
|
2015-01-22 17:43:45 +01:00
|
|
|
|
2015-01-01 14:13:56 +01:00
|
|
|
self.tags.pop()
|
|
|
|
|
|
|
|
def is_valid_tag(self):
|
|
|
|
return not self.tags or self.tags[-1] not in blocked_tags
|
2013-11-08 23:44:26 +01:00
|
|
|
|
|
|
|
def handle_data(self, d):
|
2015-01-01 14:13:56 +01:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2013-11-08 23:44:26 +01:00
|
|
|
self.result.append(d)
|
|
|
|
|
|
|
|
def handle_charref(self, number):
|
2015-01-01 14:13:56 +01:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2020-08-06 17:42:46 +02:00
|
|
|
if number[0] in ('x', 'X'):
|
2014-01-20 02:31:20 +01:00
|
|
|
codepoint = int(number[1:], 16)
|
|
|
|
else:
|
|
|
|
codepoint = int(number)
|
2020-08-06 17:42:46 +02:00
|
|
|
self.result.append(chr(codepoint))
|
2013-11-08 23:44:26 +01:00
|
|
|
|
|
|
|
def handle_entityref(self, name):
|
2015-01-01 14:13:56 +01:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2014-10-19 12:41:04 +02:00
|
|
|
# codepoint = htmlentitydefs.name2codepoint[name]
|
2020-08-06 17:42:46 +02:00
|
|
|
# self.result.append(chr(codepoint))
|
2013-11-18 16:47:20 +01:00
|
|
|
self.result.append(name)
|
2013-11-08 23:44:26 +01:00
|
|
|
|
|
|
|
def get_text(self):
|
2020-08-06 17:42:46 +02:00
|
|
|
return ''.join(self.result).strip()
|
2013-11-08 23:44:26 +01:00
|
|
|
|
2014-01-19 22:59:01 +01:00
|
|
|
|
2013-11-08 23:44:26 +01:00
|
|
|
def html_to_text(html):
|
2015-01-30 21:00:49 +01:00
|
|
|
html = html.replace('\n', ' ')
|
|
|
|
html = ' '.join(html.split())
|
2013-11-08 23:44:26 +01:00
|
|
|
s = HTMLTextExtractor()
|
2020-09-11 10:23:56 +02:00
|
|
|
try:
|
|
|
|
s.feed(html)
|
|
|
|
except HTMLTextExtractorException:
|
|
|
|
logger.debug("HTMLTextExtractor: invalid HTML\n%s", html)
|
2013-11-08 23:44:26 +01:00
|
|
|
return s.get_text()
|
2013-11-15 18:55:18 +01:00
|
|
|
|
|
|
|
|
|
|
|
class UnicodeWriter:
|
|
|
|
"""
|
|
|
|
A CSV writer which will write rows to CSV file "f",
|
|
|
|
which is encoded in the given encoding.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
|
|
|
|
# Redirect output to a queue
|
2016-11-30 18:43:03 +01:00
|
|
|
self.queue = StringIO()
|
2013-11-15 18:55:18 +01:00
|
|
|
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
|
|
|
|
self.stream = f
|
2014-01-20 02:31:20 +01:00
|
|
|
self.encoder = getincrementalencoder(encoding)()
|
2013-11-15 18:55:18 +01:00
|
|
|
|
|
|
|
def writerow(self, row):
|
2017-11-21 16:49:28 +01:00
|
|
|
self.writer.writerow(row)
|
2013-11-15 18:55:18 +01:00
|
|
|
# Fetch UTF-8 output from the queue ...
|
2017-11-21 16:49:28 +01:00
|
|
|
data = self.queue.getvalue()
|
2020-08-06 17:42:46 +02:00
|
|
|
data = data.strip('\x00')
|
2013-11-15 18:55:18 +01:00
|
|
|
# ... and reencode it into the target encoding
|
|
|
|
data = self.encoder.encode(data)
|
|
|
|
# write to the target stream
|
2020-08-06 17:42:46 +02:00
|
|
|
self.stream.write(data.decode())
|
2013-11-15 18:55:18 +01:00
|
|
|
# empty queue
|
|
|
|
self.queue.truncate(0)
|
|
|
|
|
|
|
|
def writerows(self, rows):
|
|
|
|
for row in rows:
|
|
|
|
self.writerow(row)
|
2014-04-25 01:46:40 +02:00
|
|
|
|
|
|
|
|
2017-01-06 13:23:30 +01:00
|
|
|
def get_resources_directory(searx_directory, subdirectory, resources_directory):
|
|
|
|
if not resources_directory:
|
|
|
|
resources_directory = os.path.join(searx_directory, subdirectory)
|
|
|
|
if not os.path.isdir(resources_directory):
|
2019-07-18 21:32:17 +02:00
|
|
|
raise Exception(resources_directory + " is not a directory")
|
2017-01-06 13:23:30 +01:00
|
|
|
return resources_directory
|
2014-04-25 01:46:40 +02:00
|
|
|
|
|
|
|
|
2017-01-06 13:52:59 +01:00
|
|
|
def get_themes(templates_path):
|
2017-01-06 13:23:30 +01:00
|
|
|
"""Returns available themes list."""
|
2017-01-06 13:52:59 +01:00
|
|
|
themes = os.listdir(templates_path)
|
2017-01-13 22:15:11 +01:00
|
|
|
if '__common__' in themes:
|
|
|
|
themes.remove('__common__')
|
2017-01-06 13:23:30 +01:00
|
|
|
return themes
|
2015-01-01 17:48:12 +01:00
|
|
|
|
|
|
|
|
2017-01-06 13:23:30 +01:00
|
|
|
def get_static_files(static_path):
|
2015-01-01 17:48:12 +01:00
|
|
|
static_files = set()
|
2017-01-06 13:23:30 +01:00
|
|
|
static_path_length = len(static_path) + 1
|
|
|
|
for directory, _, files in os.walk(static_path):
|
2015-01-01 17:48:12 +01:00
|
|
|
for filename in files:
|
2017-01-06 13:23:30 +01:00
|
|
|
f = os.path.join(directory[static_path_length:], filename)
|
2015-01-01 17:48:12 +01:00
|
|
|
static_files.add(f)
|
|
|
|
return static_files
|
2015-01-01 18:59:53 +01:00
|
|
|
|
|
|
|
|
2017-01-06 13:23:30 +01:00
|
|
|
def get_result_templates(templates_path):
|
2015-01-01 18:59:53 +01:00
|
|
|
result_templates = set()
|
2017-01-06 13:23:30 +01:00
|
|
|
templates_path_length = len(templates_path) + 1
|
|
|
|
for directory, _, files in os.walk(templates_path):
|
2015-01-01 18:59:53 +01:00
|
|
|
if directory.endswith('result_templates'):
|
|
|
|
for filename in files:
|
2017-01-06 13:23:30 +01:00
|
|
|
f = os.path.join(directory[templates_path_length:], filename)
|
2015-01-01 18:59:53 +01:00
|
|
|
result_templates.add(f)
|
|
|
|
return result_templates
|
2015-01-11 13:26:40 +01:00
|
|
|
|
|
|
|
|
2015-09-07 22:39:33 +02:00
|
|
|
def format_date_by_locale(date, locale_string):
|
2015-01-11 13:26:40 +01:00
|
|
|
# strftime works only on dates after 1900
|
2015-09-07 22:39:33 +02:00
|
|
|
|
|
|
|
if date.year <= 1900:
|
|
|
|
return date.isoformat().split('T')[0]
|
|
|
|
|
|
|
|
if locale_string == 'all':
|
|
|
|
locale_string = settings['ui']['default_locale'] or 'en_US'
|
|
|
|
|
2016-06-04 06:02:53 +02:00
|
|
|
# to avoid crashing if locale is not supported by babel
|
|
|
|
try:
|
|
|
|
formatted_date = format_date(date, locale=locale_string)
|
|
|
|
except:
|
|
|
|
formatted_date = format_date(date, "YYYY-MM-dd")
|
|
|
|
|
|
|
|
return formatted_date
|
2015-01-17 21:54:40 +01:00
|
|
|
|
|
|
|
|
|
|
|
def dict_subset(d, properties):
|
|
|
|
result = {}
|
|
|
|
for k in properties:
|
|
|
|
if k in d:
|
|
|
|
result[k] = d[k]
|
|
|
|
return result
|
2015-01-29 19:44:52 +01:00
|
|
|
|
|
|
|
|
2015-09-07 19:22:01 +02:00
|
|
|
def prettify_url(url, max_length=74):
|
|
|
|
if len(url) > max_length:
|
2016-11-30 18:43:03 +01:00
|
|
|
chunk_len = int(max_length / 2 + 1)
|
2020-08-06 17:42:46 +02:00
|
|
|
return '{0}[...]{1}'.format(url[:chunk_len], url[-chunk_len:])
|
2015-01-29 19:44:52 +01:00
|
|
|
else:
|
|
|
|
return url
|
2015-01-31 23:11:45 +01:00
|
|
|
|
|
|
|
|
2015-06-04 18:30:08 +02:00
|
|
|
# get element in list or default value
|
|
|
|
def list_get(a_list, index, default=None):
|
|
|
|
if len(a_list) > index:
|
|
|
|
return a_list[index]
|
|
|
|
else:
|
|
|
|
return default
|
2016-08-13 14:55:47 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_torrent_size(filesize, filesize_multiplier):
|
|
|
|
try:
|
|
|
|
filesize = float(filesize)
|
|
|
|
|
|
|
|
if filesize_multiplier == 'TB':
|
|
|
|
filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
|
|
|
|
elif filesize_multiplier == 'GB':
|
|
|
|
filesize = int(filesize * 1024 * 1024 * 1024)
|
|
|
|
elif filesize_multiplier == 'MB':
|
|
|
|
filesize = int(filesize * 1024 * 1024)
|
|
|
|
elif filesize_multiplier == 'KB':
|
|
|
|
filesize = int(filesize * 1024)
|
2016-10-11 19:31:42 +02:00
|
|
|
elif filesize_multiplier == 'TiB':
|
|
|
|
filesize = int(filesize * 1000 * 1000 * 1000 * 1000)
|
|
|
|
elif filesize_multiplier == 'GiB':
|
|
|
|
filesize = int(filesize * 1000 * 1000 * 1000)
|
|
|
|
elif filesize_multiplier == 'MiB':
|
|
|
|
filesize = int(filesize * 1000 * 1000)
|
|
|
|
elif filesize_multiplier == 'KiB':
|
|
|
|
filesize = int(filesize * 1000)
|
2016-08-13 14:55:47 +02:00
|
|
|
except:
|
|
|
|
filesize = None
|
|
|
|
|
|
|
|
return filesize
|
2016-09-06 16:43:48 +02:00
|
|
|
|
|
|
|
|
2016-10-11 19:31:42 +02:00
|
|
|
def convert_str_to_int(number_str):
|
|
|
|
if number_str.isdigit():
|
|
|
|
return int(number_str)
|
|
|
|
else:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
2017-09-04 20:05:04 +02:00
|
|
|
# convert a variable to integer or return 0 if it's not a number
|
|
|
|
def int_or_zero(num):
|
|
|
|
if isinstance(num, list):
|
|
|
|
if len(num) < 1:
|
|
|
|
return 0
|
|
|
|
num = num[0]
|
|
|
|
return convert_str_to_int(num)
|
|
|
|
|
|
|
|
|
2016-09-06 16:43:48 +02:00
|
|
|
def is_valid_lang(lang):
|
2020-09-08 16:08:37 +02:00
|
|
|
if isinstance(lang, bytes):
|
|
|
|
lang = lang.decode()
|
2016-09-06 16:43:48 +02:00
|
|
|
is_abbr = (len(lang) == 2)
|
2020-09-08 16:08:37 +02:00
|
|
|
lang = lang.lower()
|
2016-09-06 16:43:48 +02:00
|
|
|
if is_abbr:
|
|
|
|
for l in language_codes:
|
2019-10-16 14:52:57 +02:00
|
|
|
if l[0][:2] == lang:
|
2017-06-15 10:51:09 +02:00
|
|
|
return (True, l[0][:2], l[3].lower())
|
2016-09-06 16:43:48 +02:00
|
|
|
return False
|
|
|
|
else:
|
|
|
|
for l in language_codes:
|
2019-10-16 14:52:57 +02:00
|
|
|
if l[1].lower() == lang or l[3].lower() == lang:
|
2017-06-15 10:51:09 +02:00
|
|
|
return (True, l[0][:2], l[3].lower())
|
2016-09-06 16:43:48 +02:00
|
|
|
return False
|
2016-11-19 17:51:19 +01:00
|
|
|
|
|
|
|
|
2019-07-18 21:32:17 +02:00
|
|
|
def _get_lang_to_lc_dict(lang_list):
|
|
|
|
key = str(lang_list)
|
|
|
|
value = lang_to_lc_cache.get(key, None)
|
|
|
|
if value is None:
|
|
|
|
value = dict()
|
|
|
|
for lc in lang_list:
|
|
|
|
value.setdefault(lc.split('-')[0], lc)
|
|
|
|
lang_to_lc_cache[key] = value
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
2018-03-01 05:30:48 +01:00
|
|
|
# auxiliary function to match lang_code in lang_list
|
|
|
|
def _match_language(lang_code, lang_list=[], custom_aliases={}):
|
|
|
|
# replace language code with a custom alias if necessary
|
|
|
|
if lang_code in custom_aliases:
|
|
|
|
lang_code = custom_aliases[lang_code]
|
|
|
|
|
|
|
|
if lang_code in lang_list:
|
|
|
|
return lang_code
|
|
|
|
|
|
|
|
# try to get the most likely country for this language
|
|
|
|
subtags = get_global('likely_subtags').get(lang_code)
|
|
|
|
if subtags:
|
|
|
|
subtag_parts = subtags.split('_')
|
|
|
|
new_code = subtag_parts[0] + '-' + subtag_parts[-1]
|
|
|
|
if new_code in custom_aliases:
|
|
|
|
new_code = custom_aliases[new_code]
|
|
|
|
if new_code in lang_list:
|
|
|
|
return new_code
|
|
|
|
|
|
|
|
# try to get the any supported country for this language
|
2019-07-18 21:32:17 +02:00
|
|
|
return _get_lang_to_lc_dict(lang_list).get(lang_code, None)
|
2018-03-01 05:30:48 +01:00
|
|
|
|
|
|
|
|
|
|
|
# get the language code from lang_list that best matches locale_code
|
|
|
|
def match_language(locale_code, lang_list=[], custom_aliases={}, fallback='en-US'):
|
|
|
|
# try to get language from given locale_code
|
|
|
|
language = _match_language(locale_code, lang_list, custom_aliases)
|
|
|
|
if language:
|
|
|
|
return language
|
|
|
|
|
|
|
|
locale_parts = locale_code.split('-')
|
|
|
|
lang_code = locale_parts[0]
|
|
|
|
|
|
|
|
# try to get language using an equivalent country code
|
|
|
|
if len(locale_parts) > 1:
|
|
|
|
country_alias = get_global('territory_aliases').get(locale_parts[-1])
|
|
|
|
if country_alias:
|
|
|
|
language = _match_language(lang_code + '-' + country_alias[0], lang_list, custom_aliases)
|
|
|
|
if language:
|
|
|
|
return language
|
|
|
|
|
|
|
|
# try to get language using an equivalent language code
|
|
|
|
alias = get_global('language_aliases').get(lang_code)
|
|
|
|
if alias:
|
|
|
|
language = _match_language(alias, lang_list, custom_aliases)
|
|
|
|
if language:
|
|
|
|
return language
|
|
|
|
|
|
|
|
if lang_code != locale_code:
|
|
|
|
# try to get language from given language without giving the country
|
|
|
|
language = _match_language(lang_code, lang_list, custom_aliases)
|
|
|
|
|
|
|
|
return language or fallback
|
|
|
|
|
|
|
|
|
2016-11-19 17:51:19 +01:00
|
|
|
def load_module(filename, module_dir):
|
|
|
|
modname = splitext(filename)[0]
|
|
|
|
if modname in sys.modules:
|
|
|
|
del sys.modules[modname]
|
|
|
|
filepath = join(module_dir, filename)
|
|
|
|
module = load_source(modname, filepath)
|
|
|
|
module.name = modname
|
|
|
|
return module
|
2017-07-20 15:44:02 +02:00
|
|
|
|
|
|
|
|
|
|
|
def new_hmac(secret_key, url):
|
2019-07-17 10:09:09 +02:00
|
|
|
try:
|
|
|
|
secret_key_bytes = bytes(secret_key, 'utf-8')
|
|
|
|
except TypeError as err:
|
|
|
|
if isinstance(secret_key, bytes):
|
|
|
|
secret_key_bytes = secret_key
|
|
|
|
else:
|
|
|
|
raise err
|
2020-08-06 17:42:46 +02:00
|
|
|
return hmac.new(secret_key_bytes, url, hashlib.sha256).hexdigest()
|
2017-12-01 20:45:24 +01:00
|
|
|
|
|
|
|
|
|
|
|
def to_string(obj):
|
2020-08-06 17:42:46 +02:00
|
|
|
if isinstance(obj, str):
|
2017-12-01 20:45:24 +01:00
|
|
|
return obj
|
|
|
|
if isinstance(obj, Number):
|
2020-08-06 17:42:46 +02:00
|
|
|
return str(obj)
|
2017-12-01 20:45:24 +01:00
|
|
|
if hasattr(obj, '__str__'):
|
|
|
|
return obj.__str__()
|
|
|
|
if hasattr(obj, '__repr__'):
|
|
|
|
return obj.__repr__()
|
2019-08-02 13:37:13 +02:00
|
|
|
|
|
|
|
|
|
|
|
def ecma_unescape(s):
|
|
|
|
"""
|
|
|
|
python implementation of the unescape javascript function
|
|
|
|
|
|
|
|
https://www.ecma-international.org/ecma-262/6.0/#sec-unescape-string
|
|
|
|
https://developer.mozilla.org/fr/docs/Web/JavaScript/Reference/Objets_globaux/unescape
|
|
|
|
"""
|
|
|
|
# s = unicode(s)
|
|
|
|
# "%u5409" becomes "吉"
|
2020-08-06 17:42:46 +02:00
|
|
|
s = ecma_unescape4_re.sub(lambda e: chr(int(e.group(1), 16)), s)
|
2019-08-02 13:37:13 +02:00
|
|
|
# "%20" becomes " ", "%F3" becomes "ó"
|
2020-08-06 17:42:46 +02:00
|
|
|
s = ecma_unescape2_re.sub(lambda e: chr(int(e.group(1), 16)), s)
|
2019-08-02 13:37:13 +02:00
|
|
|
return s
|
2019-09-23 17:14:32 +02:00
|
|
|
|
|
|
|
|
|
|
|
def get_engine_from_settings(name):
|
|
|
|
"""Return engine configuration from settings.yml of a given engine name"""
|
|
|
|
|
|
|
|
if 'engines' not in settings:
|
|
|
|
return {}
|
|
|
|
|
2019-09-30 14:27:13 +02:00
|
|
|
for engine in settings['engines']:
|
2019-09-23 17:14:32 +02:00
|
|
|
if 'name' not in engine:
|
|
|
|
continue
|
|
|
|
if name == engine['name']:
|
|
|
|
return engine
|
|
|
|
|
|
|
|
return {}
|
2019-11-15 09:31:37 +01:00
|
|
|
|
|
|
|
|
|
|
|
def get_xpath(xpath_str):
|
|
|
|
result = xpath_cache.get(xpath_str, None)
|
|
|
|
if result is None:
|
|
|
|
result = XPath(xpath_str)
|
|
|
|
xpath_cache[xpath_str] = result
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
|
|
def eval_xpath(element, xpath_str):
|
|
|
|
xpath = get_xpath(xpath_str)
|
|
|
|
return xpath(element)
|