Ponysearch/searx/engines/nyaa.py

127 lines
3.2 KiB
Python
Raw Permalink Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""Nyaa.si (Anime Bittorrent tracker)
2016-03-24 19:24:37 +01:00
"""
from urllib.parse import urlencode
from lxml import html
from searx.utils import (
eval_xpath_getindex,
extract_text,
get_torrent_size,
int_or_zero,
)
2016-03-24 19:24:37 +01:00
# about
about = {
"website": 'https://nyaa.si/',
"wikidata_id": None,
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
2016-03-24 19:24:37 +01:00
# engine dependent config
categories = ['files']
2016-03-24 19:24:37 +01:00
paging = True
# search-url
2018-04-27 16:55:42 +02:00
base_url = 'https://nyaa.si/'
2016-03-24 19:24:37 +01:00
# xpath queries
2017-08-31 21:32:30 +02:00
xpath_results = '//table[contains(@class, "torrent-list")]//tr[not(th)]'
xpath_category = './/td[1]/a[1]'
xpath_title = './/td[2]/a[last()]'
xpath_torrent_links = './/td[3]/a'
xpath_filesize = './/td[4]/text()'
xpath_seeds = './/td[6]/text()'
xpath_leeches = './/td[7]/text()'
xpath_downloads = './/td[8]/text()'
2016-03-24 19:24:37 +01:00
2016-03-27 00:23:17 +01:00
2016-03-24 19:24:37 +01:00
# do search-request
def request(query, params):
args = urlencode(
{
'q': query,
'p': params['pageno'],
}
)
params['url'] = base_url + '?' + args #
logger.debug("query_url --> %s", params['url'])
2016-03-24 19:24:37 +01:00
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath(xpath_results):
2017-08-31 21:32:30 +02:00
# defaults
filesize = 0
magnet_link = ""
torrent_link = ""
2016-03-24 19:24:37 +01:00
# category in which our torrent belongs
category = eval_xpath_getindex(result, xpath_category, 0, '')
if category:
category = category.attrib.get('title')
2016-03-24 19:24:37 +01:00
# torrent title
page_a = result.xpath(xpath_title)[0]
title = extract_text(page_a)
2016-03-24 19:24:37 +01:00
# link to the page
2017-08-31 21:32:30 +02:00
href = base_url + page_a.attrib.get('href')
for link in result.xpath(xpath_torrent_links):
url = link.attrib.get('href')
if 'magnet' in url:
# link to the magnet
magnet_link = url
else:
# link to the torrent file
torrent_link = url
# seed count
seed = int_or_zero(result.xpath(xpath_seeds))
# leech count
leech = int_or_zero(result.xpath(xpath_leeches))
# torrent downloads count
downloads = int_or_zero(result.xpath(xpath_downloads))
2016-03-24 19:24:37 +01:00
2017-08-31 21:32:30 +02:00
# let's try to calculate the torrent size
filesize = None
filesize_info = eval_xpath_getindex(result, xpath_filesize, 0, '')
if filesize_info:
2017-08-31 21:32:30 +02:00
filesize_info = result.xpath(xpath_filesize)[0]
filesize = get_torrent_size(*filesize_info.split())
2016-03-24 19:24:37 +01:00
# content string contains all information not included into template
content = 'Category: "{category}". Downloaded {downloads} times.'
content = content.format(category=category, downloads=downloads)
results.append(
{
'url': href,
'title': title,
'content': content,
'seed': seed,
'leech': leech,
'filesize': filesize,
'torrentfile': torrent_link,
'magnetlink': magnet_link,
'template': 'torrent.html',
}
)
2016-03-24 19:24:37 +01:00
return results