2021-03-01 15:02:11 +01:00
|
|
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
2021-04-26 20:18:20 +02:00
|
|
|
# lint: pylint
|
2021-03-01 15:02:11 +01:00
|
|
|
"""Google (Scholar)
|
|
|
|
|
|
|
|
For detailed description of the *REST-full* API see: `Query Parameter
|
|
|
|
Definitions`_.
|
|
|
|
|
|
|
|
.. _Query Parameter Definitions:
|
|
|
|
https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
|
|
|
|
"""
|
|
|
|
|
2021-09-07 13:26:59 +02:00
|
|
|
# pylint: disable=invalid-name
|
2021-03-01 15:02:11 +01:00
|
|
|
|
|
|
|
from urllib.parse import urlencode
|
|
|
|
from datetime import datetime
|
2022-08-26 18:10:12 +02:00
|
|
|
from typing import Optional
|
2021-03-01 15:02:11 +01:00
|
|
|
from lxml import html
|
|
|
|
|
|
|
|
from searx.utils import (
|
|
|
|
eval_xpath,
|
2022-08-26 18:10:12 +02:00
|
|
|
eval_xpath_getindex,
|
2021-03-01 15:02:11 +01:00
|
|
|
eval_xpath_list,
|
|
|
|
extract_text,
|
|
|
|
)
|
|
|
|
|
|
|
|
from searx.engines.google import (
|
|
|
|
get_lang_info,
|
|
|
|
time_range_dict,
|
|
|
|
detect_google_sorry,
|
|
|
|
)
|
|
|
|
|
|
|
|
# pylint: disable=unused-import
|
|
|
|
from searx.engines.google import (
|
2022-10-08 11:32:08 +02:00
|
|
|
fetch_traits,
|
2021-03-01 15:02:11 +01:00
|
|
|
supported_languages_url,
|
|
|
|
_fetch_supported_languages,
|
|
|
|
)
|
2021-12-27 09:26:22 +01:00
|
|
|
|
2021-03-01 15:02:11 +01:00
|
|
|
# pylint: enable=unused-import
|
|
|
|
|
|
|
|
# about
|
|
|
|
about = {
|
|
|
|
"website": 'https://scholar.google.com',
|
|
|
|
"wikidata_id": 'Q494817',
|
|
|
|
"official_api_documentation": 'https://developers.google.com/custom-search',
|
|
|
|
"use_official_api": False,
|
|
|
|
"require_api_key": False,
|
|
|
|
"results": 'HTML',
|
|
|
|
}
|
|
|
|
|
|
|
|
# engine dependent config
|
2022-08-26 18:10:12 +02:00
|
|
|
categories = ['science', 'scientific publications']
|
2021-03-01 15:02:11 +01:00
|
|
|
paging = True
|
|
|
|
language_support = True
|
|
|
|
use_locale_domain = True
|
|
|
|
time_range_support = True
|
|
|
|
safesearch = False
|
2022-08-01 17:01:59 +02:00
|
|
|
send_accept_language_header = True
|
2021-03-01 15:02:11 +01:00
|
|
|
|
2021-12-27 09:26:22 +01:00
|
|
|
|
2021-03-01 15:02:11 +01:00
|
|
|
def time_range_url(params):
|
|
|
|
"""Returns a URL query component for a google-Scholar time range based on
|
|
|
|
``params['time_range']``. Google-Scholar does only support ranges in years.
|
|
|
|
To have any effect, all the Searx ranges (*day*, *week*, *month*, *year*)
|
|
|
|
are mapped to *year*. If no range is set, an empty string is returned.
|
|
|
|
Example::
|
|
|
|
|
|
|
|
&as_ylo=2019
|
|
|
|
"""
|
|
|
|
# as_ylo=2016&as_yhi=2019
|
|
|
|
ret_val = ''
|
|
|
|
if params['time_range'] in time_range_dict:
|
2021-12-27 09:26:22 +01:00
|
|
|
ret_val = urlencode({'as_ylo': datetime.now().year - 1})
|
2021-03-01 15:02:11 +01:00
|
|
|
return '&' + ret_val
|
|
|
|
|
|
|
|
|
|
|
|
def request(query, params):
|
|
|
|
"""Google-Scholar search request"""
|
|
|
|
|
|
|
|
offset = (params['pageno'] - 1) * 10
|
2021-12-27 09:26:22 +01:00
|
|
|
lang_info = get_lang_info(params, supported_languages, language_aliases, False)
|
2021-06-11 16:31:50 +02:00
|
|
|
|
2021-03-01 15:02:11 +01:00
|
|
|
# subdomain is: scholar.google.xy
|
|
|
|
lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")
|
|
|
|
|
2021-12-27 09:26:22 +01:00
|
|
|
query_url = (
|
|
|
|
'https://'
|
|
|
|
+ lang_info['subdomain']
|
|
|
|
+ '/scholar'
|
|
|
|
+ "?"
|
2022-07-25 12:53:56 +02:00
|
|
|
+ urlencode({'q': query, **lang_info['params'], 'ie': "utf8", 'oe': "utf8", 'start': offset})
|
2021-12-27 09:26:22 +01:00
|
|
|
)
|
2021-03-01 15:02:11 +01:00
|
|
|
|
|
|
|
query_url += time_range_url(params)
|
|
|
|
params['url'] = query_url
|
|
|
|
|
2022-07-25 12:53:56 +02:00
|
|
|
params['cookies']['CONSENT'] = "YES+"
|
2021-06-06 08:18:07 +02:00
|
|
|
params['headers'].update(lang_info['headers'])
|
2021-12-27 09:26:22 +01:00
|
|
|
params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
|
2021-03-01 15:02:11 +01:00
|
|
|
|
2021-12-27 09:26:22 +01:00
|
|
|
# params['google_subdomain'] = subdomain
|
2021-03-01 15:02:11 +01:00
|
|
|
return params
|
|
|
|
|
2021-12-27 09:26:22 +01:00
|
|
|
|
2022-08-26 18:10:12 +02:00
|
|
|
def parse_gs_a(text: Optional[str]):
|
|
|
|
"""Parse the text written in green.
|
|
|
|
|
|
|
|
Possible formats:
|
|
|
|
* "{authors} - {journal}, {year} - {publisher}"
|
|
|
|
* "{authors} - {year} - {publisher}"
|
|
|
|
* "{authors} - {publisher}"
|
|
|
|
"""
|
|
|
|
if text is None or text == "":
|
|
|
|
return None, None, None, None
|
|
|
|
|
|
|
|
s_text = text.split(' - ')
|
|
|
|
authors = s_text[0].split(', ')
|
|
|
|
publisher = s_text[-1]
|
|
|
|
if len(s_text) != 3:
|
|
|
|
return authors, None, publisher, None
|
|
|
|
|
|
|
|
# the format is "{authors} - {journal}, {year} - {publisher}" or "{authors} - {year} - {publisher}"
|
|
|
|
# get journal and year
|
|
|
|
journal_year = s_text[1].split(', ')
|
|
|
|
# journal is optional and may contains some coma
|
|
|
|
if len(journal_year) > 1:
|
|
|
|
journal = ', '.join(journal_year[0:-1])
|
|
|
|
if journal == '…':
|
|
|
|
journal = None
|
|
|
|
else:
|
|
|
|
journal = None
|
|
|
|
# year
|
|
|
|
year = journal_year[-1]
|
|
|
|
try:
|
|
|
|
publishedDate = datetime.strptime(year.strip(), '%Y')
|
|
|
|
except ValueError:
|
|
|
|
publishedDate = None
|
|
|
|
return authors, journal, publisher, publishedDate
|
|
|
|
|
|
|
|
|
|
|
|
def response(resp): # pylint: disable=too-many-locals
|
2021-03-01 15:02:11 +01:00
|
|
|
"""Get response from google's search request"""
|
|
|
|
results = []
|
|
|
|
|
|
|
|
detect_google_sorry(resp)
|
|
|
|
|
|
|
|
# which subdomain ?
|
|
|
|
# subdomain = resp.search_params.get('google_subdomain')
|
|
|
|
|
|
|
|
# convert the text to dom
|
|
|
|
dom = html.fromstring(resp.text)
|
|
|
|
|
|
|
|
# parse results
|
2022-08-26 18:10:12 +02:00
|
|
|
for result in eval_xpath_list(dom, '//div[@data-cid]'):
|
2021-03-01 15:02:11 +01:00
|
|
|
|
2022-08-26 18:10:12 +02:00
|
|
|
title = extract_text(eval_xpath(result, './/h3[1]//a'))
|
2021-03-01 15:02:11 +01:00
|
|
|
|
|
|
|
if not title:
|
|
|
|
# this is a [ZITATION] block
|
|
|
|
continue
|
|
|
|
|
|
|
|
pub_type = extract_text(eval_xpath(result, './/span[@class="gs_ct1"]'))
|
|
|
|
if pub_type:
|
2022-08-26 18:10:12 +02:00
|
|
|
pub_type = pub_type[1:-1].lower()
|
|
|
|
|
|
|
|
url = eval_xpath_getindex(result, './/h3[1]//a/@href', 0)
|
|
|
|
content = extract_text(eval_xpath(result, './/div[@class="gs_rs"]'))
|
|
|
|
authors, journal, publisher, publishedDate = parse_gs_a(
|
|
|
|
extract_text(eval_xpath(result, './/div[@class="gs_a"]'))
|
|
|
|
)
|
|
|
|
if publisher in url:
|
|
|
|
publisher = None
|
|
|
|
|
|
|
|
# cited by
|
|
|
|
comments = extract_text(eval_xpath(result, './/div[@class="gs_fl"]/a[starts-with(@href,"/scholar?cites=")]'))
|
|
|
|
|
|
|
|
# link to the html or pdf document
|
|
|
|
html_url = None
|
|
|
|
pdf_url = None
|
|
|
|
doc_url = eval_xpath_getindex(result, './/div[@class="gs_or_ggsm"]/a/@href', 0, default=None)
|
|
|
|
doc_type = extract_text(eval_xpath(result, './/span[@class="gs_ctg2"]'))
|
|
|
|
if doc_type == "[PDF]":
|
|
|
|
pdf_url = doc_url
|
|
|
|
else:
|
|
|
|
html_url = doc_url
|
2021-03-01 15:02:11 +01:00
|
|
|
|
2021-12-27 09:26:22 +01:00
|
|
|
results.append(
|
|
|
|
{
|
2022-08-26 18:10:12 +02:00
|
|
|
'template': 'paper.html',
|
|
|
|
'type': pub_type,
|
2021-12-27 09:26:22 +01:00
|
|
|
'url': url,
|
|
|
|
'title': title,
|
2022-08-26 18:10:12 +02:00
|
|
|
'authors': authors,
|
|
|
|
'publisher': publisher,
|
|
|
|
'journal': journal,
|
|
|
|
'publishedDate': publishedDate,
|
2021-12-27 09:26:22 +01:00
|
|
|
'content': content,
|
2022-08-26 18:10:12 +02:00
|
|
|
'comments': comments,
|
|
|
|
'html_url': html_url,
|
|
|
|
'pdf_url': pdf_url,
|
2021-12-27 09:26:22 +01:00
|
|
|
}
|
|
|
|
)
|
2021-03-01 15:02:11 +01:00
|
|
|
|
|
|
|
# parse suggestion
|
|
|
|
for suggestion in eval_xpath(dom, '//div[contains(@class, "gs_qsuggest_wrap")]//li//a'):
|
|
|
|
# append suggestion
|
|
|
|
results.append({'suggestion': extract_text(suggestion)})
|
|
|
|
|
|
|
|
for correction in eval_xpath(dom, '//div[@class="gs_r gs_pda"]/a'):
|
|
|
|
results.append({'correction': extract_text(correction)})
|
|
|
|
|
|
|
|
return results
|