Merge pull request #296 from return42/engine-logger

one logger per engine
This commit is contained in:
Alexandre Flament 2021-09-06 19:50:58 +02:00 committed by GitHub
commit 17e739a859
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 3 additions and 67 deletions

3
manage
View file

@ -37,7 +37,7 @@ PYLINT_SEARX_DISABLE_OPTION="\
I,C,R,\ I,C,R,\
W0105,W0212,W0511,W0603,W0613,W0621,W0702,W0703,W1401,\ W0105,W0212,W0511,W0603,W0613,W0621,W0702,W0703,W1401,\
E1136" E1136"
PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES="supported_languages,language_aliases" PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES="supported_languages,language_aliases,logger"
PYLINT_OPTIONS="-m pylint -j 0 --rcfile .pylintrc" PYLINT_OPTIONS="-m pylint -j 0 --rcfile .pylintrc"
help() { help() {
@ -588,6 +588,7 @@ test.pylint() {
( set -e ( set -e
build_msg TEST "[pylint] \$PYLINT_FILES" build_msg TEST "[pylint] \$PYLINT_FILES"
pyenv.cmd python ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \ pyenv.cmd python ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
--additional-builtins="${PYLINT_ADDITIONAL_BUILTINS_FOR_ENGINES}" \
"${PYLINT_FILES[@]}" "${PYLINT_FILES[@]}"
build_msg TEST "[pylint] searx/engines" build_msg TEST "[pylint] searx/engines"

View file

@ -111,6 +111,7 @@ def load_engine(engine_data):
if is_missing_required_attributes(engine): if is_missing_required_attributes(engine):
return None return None
engine.logger = logger.getChild(engine_name)
return engine return engine

View file

@ -8,15 +8,12 @@
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath_list, eval_xpath_list,
eval_xpath_getindex, eval_xpath_getindex,
extract_text, extract_text,
) )
logger = logger.getChild('APKMirror engine')
about = { about = {
"website": 'https://www.apkmirror.com', "website": 'https://www.apkmirror.com',
"wikidata_id": None, "wikidata_id": None,

View file

@ -13,9 +13,6 @@ Explore thousands of artworks from The Art Institute of Chicago.
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
logger = logger.getChild('APKMirror engine')
about = { about = {
"website": 'https://www.artic.edu', "website": 'https://www.artic.edu',
"wikidata_id": 'Q239303', "wikidata_id": 'Q239303',

View file

@ -6,11 +6,8 @@
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import eval_xpath, extract_text, match_language from searx.utils import eval_xpath, extract_text, match_language
logger = logger.getChild('bing engine')
# about # about
about = { about = {
"website": 'https://www.bing.com', "website": 'https://www.bing.com',

View file

@ -9,11 +9,8 @@ from json import loads
from datetime import datetime from datetime import datetime
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
from searx.exceptions import SearxEngineAPIException from searx.exceptions import SearxEngineAPIException
logger = logger.getChild('CORE engine')
about = { about = {
"website": 'https://core.ac.uk', "website": 'https://core.ac.uk',
"wikidata_id": 'Q22661180', "wikidata_id": 'Q22661180',
@ -29,8 +26,6 @@ nb_per_page = 10
api_key = 'unset' api_key = 'unset'
logger = logger.getChild('CORE engine')
base_url = 'https://core.ac.uk:443/api-v2/search/' base_url = 'https://core.ac.uk:443/api-v2/search/'
search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}' search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'

View file

@ -9,15 +9,12 @@ import json
from urllib.parse import urlencode, urlparse, urljoin from urllib.parse import urlencode, urlparse, urljoin
from lxml import html from lxml import html
from searx import logger
from searx.data import WIKIDATA_UNITS from searx.data import WIKIDATA_UNITS
from searx.engines.duckduckgo import language_aliases from searx.engines.duckduckgo import language_aliases
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
logger = logger.getChild('duckduckgo_definitions')
# about # about
about = { about = {
"website": 'https://duckduckgo.com/', "website": 'https://duckduckgo.com/',

View file

@ -7,11 +7,8 @@ from json import loads
from time import time from time import time
import re import re
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.engines import logger
from searx.utils import ecma_unescape, html_to_text from searx.utils import ecma_unescape, html_to_text
logger = logger.getChild('flickr-noapi')
# about # about
about = { about = {
"website": 'https://www.flickr.com', "website": 'https://www.flickr.com',

View file

@ -9,9 +9,6 @@ from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from datetime import datetime from datetime import datetime
from searx import logger
logger = logger.getChild('genius engine')
# about # about
about = { about = {
"website": 'https://genius.com/', "website": 'https://genius.com/',

View file

@ -8,7 +8,6 @@
import re import re
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
# from searx import logger
from searx.network import get from searx.network import get
# about # about

View file

@ -29,12 +29,9 @@ The google WEB engine itself has a special setup option:
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
from searx.exceptions import SearxEngineCaptchaException from searx.exceptions import SearxEngineCaptchaException
logger = logger.getChild('google engine')
# about # about
about = { about = {
"website": 'https://www.google.com', "website": 'https://www.google.com',

View file

@ -16,7 +16,6 @@
from urllib.parse import urlencode, unquote from urllib.parse import urlencode, unquote
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
eval_xpath_list, eval_xpath_list,
@ -37,8 +36,6 @@ from searx.engines.google import (
) )
# pylint: enable=unused-import # pylint: enable=unused-import
logger = logger.getChild('google images')
# about # about
about = { about = {
"website": 'https://images.google.com', "website": 'https://images.google.com',

View file

@ -20,7 +20,6 @@ from urllib.parse import urlencode
from base64 import b64decode from base64 import b64decode
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
eval_xpath_list, eval_xpath_list,
@ -50,8 +49,6 @@ about = {
"results": 'HTML', "results": 'HTML',
} }
logger = logger.getChild('google news')
# compared to other google engines google-news has a different time range # compared to other google engines google-news has a different time range
# support. The time range is included in the search term. # support. The time range is included in the search term.
time_range_dict = { time_range_dict = {

View file

@ -14,7 +14,6 @@ Definitions`_.
from urllib.parse import urlencode from urllib.parse import urlencode
from datetime import datetime from datetime import datetime
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
@ -53,8 +52,6 @@ use_locale_domain = True
time_range_support = True time_range_support = True
safesearch = False safesearch = False
logger = logger.getChild('google scholar')
def time_range_url(params): def time_range_url(params):
"""Returns a URL query component for a google-Scholar time range based on """Returns a URL query component for a google-Scholar time range based on
``params['time_range']``. Google-Scholar does only support ranges in years. ``params['time_range']``. Google-Scholar does only support ranges in years.

View file

@ -20,7 +20,6 @@ import re
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath, eval_xpath,
eval_xpath_list, eval_xpath_list,
@ -59,8 +58,6 @@ about = {
"results": 'HTML', "results": 'HTML',
} }
logger = logger.getChild('google video')
# engine dependent config # engine dependent config
categories = ['videos'] categories = ['videos']

View file

@ -8,9 +8,6 @@
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
logger = logger.getChild('solidtor engine')
about = { about = {
"website": 'https://www.solidtorrents.net/', "website": 'https://www.solidtorrents.net/',

View file

@ -8,7 +8,6 @@ from json import loads
from lxml import html from lxml import html
from dateutil import parser from dateutil import parser
from urllib.parse import quote_plus, urlencode from urllib.parse import quote_plus, urlencode
from searx import logger
from searx.network import get as http_get from searx.network import get as http_get
# about # about

View file

@ -10,11 +10,8 @@ from datetime import datetime
from json import loads from json import loads
from urllib.parse import urlencode from urllib.parse import urlencode
from searx import logger
from searx.exceptions import SearxEngineAPIException from searx.exceptions import SearxEngineAPIException
logger = logger.getChild('Springer Nature engine')
about = { about = {
"website": 'https://www.springernature.com/', "website": 'https://www.springernature.com/',
"wikidata_id": 'Q21096327', "wikidata_id": 'Q21096327',

View file

@ -9,11 +9,6 @@
import sqlite3 import sqlite3
import contextlib import contextlib
from searx import logger
logger = logger.getChild('SQLite engine')
engine_type = 'offline' engine_type = 'offline'
database = "" database = ""
query_str = "" query_str = ""

View file

@ -8,9 +8,6 @@
from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl from urllib.parse import urlencode, urlparse, urlunparse, parse_qsl
from json import loads from json import loads
from searx import logger
logger = logger.getChild('unsplash engine')
# about # about
about = { about = {
"website": 'https://unsplash.com', "website": 'https://unsplash.com',

View file

@ -10,15 +10,12 @@ from json import loads
from dateutil.parser import isoparse from dateutil.parser import isoparse
from babel.dates import format_datetime, format_date, format_time, get_datetime_format from babel.dates import format_datetime, format_date, format_time, get_datetime_format
from searx import logger
from searx.data import WIKIDATA_UNITS from searx.data import WIKIDATA_UNITS
from searx.network import post, get from searx.network import post, get
from searx.utils import match_language, searx_useragent, get_string_replaces_function from searx.utils import match_language, searx_useragent, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
logger = logger.getChild('wikidata')
# about # about
about = { about = {
"website": 'https://wikidata.org/', "website": 'https://wikidata.org/',

View file

@ -4,12 +4,9 @@
""" """
from lxml.html import fromstring from lxml.html import fromstring
from searx import logger
from searx.utils import extract_text from searx.utils import extract_text
from searx.network import raise_for_httperror from searx.network import raise_for_httperror
logger = logger.getChild('Wordnik engine')
# about # about
about = { about = {
"website": 'https://www.wordnik.com', "website": 'https://www.wordnik.com',

View file

@ -23,9 +23,6 @@ from urllib.parse import urlencode
from lxml import html from lxml import html
from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
from searx import logger
logger = logger.getChild('XPath engine')
search_url = None search_url = None
""" """

View file

@ -14,7 +14,6 @@ from datetime import datetime, timedelta
from dateutil import parser from dateutil import parser
from lxml import html from lxml import html
from searx import logger
from searx.utils import ( from searx.utils import (
eval_xpath_list, eval_xpath_list,
eval_xpath_getindex, eval_xpath_getindex,
@ -23,8 +22,6 @@ from searx.utils import (
from searx.engines.yahoo import parse_url from searx.engines.yahoo import parse_url
logger = logger.getChild('yahoo_news engine')
# about # about
about = { about = {
"website": 'https://news.yahoo.com', "website": 'https://news.yahoo.com',