2015-01-12 23:10:20 +03:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
'''
|
|
|
|
Torrenter plugin for XBMC
|
|
|
|
Copyright (C) 2012 Vadim Skorba
|
|
|
|
vadim.skorba@gmail.com
|
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
'''
|
|
|
|
import re
|
2015-06-23 23:00:27 +03:00
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
import Content
|
2015-01-31 23:28:30 +03:00
|
|
|
from BeautifulSoup import BeautifulSoup
|
2015-01-12 23:10:20 +03:00
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
class KickAssSo(Content.Content):
|
|
|
|
category_dict = {
|
2015-06-23 23:00:27 +03:00
|
|
|
'hot': ('Most Recent', '/new/?field=seeders&sorder=desc',
|
|
|
|
{'page': '/new/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
2015-01-28 22:53:45 +03:00
|
|
|
'anime': ('Anime', '/anime/', {'page': '/anime/%d/', 'increase': 1, 'second_page': 2,
|
2015-06-23 23:00:27 +03:00
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
|
|
|
'tvshows': ('TV Shows', '/tv/?field=seeders&sorder=desc',
|
|
|
|
{'page': '/tv/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
|
|
|
'movies': ('Movies', '/movies/?field=seeders&sorder=desc',
|
|
|
|
{'page': '/movies/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
2015-01-12 23:10:20 +03:00
|
|
|
}
|
|
|
|
|
2015-05-23 11:25:24 +03:00
|
|
|
baseurl = "http://kat.cr"
|
2015-01-12 23:10:20 +03:00
|
|
|
headers = [('User-Agent',
|
|
|
|
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124' + \
|
|
|
|
' YaBrowser/14.10.2062.12061 Safari/537.36'),
|
|
|
|
('Referer', 'http://kickass.so/'), ('Accept-Encoding', 'gzip')]
|
|
|
|
'''
|
|
|
|
Weight of source with this searcher provided.
|
|
|
|
Will be multiplied on default weight.
|
|
|
|
Default weight is seeds number
|
|
|
|
'''
|
|
|
|
sourceWeight = 1
|
|
|
|
|
2015-07-15 19:14:22 +03:00
|
|
|
def isTracker(self):
|
2015-01-12 23:10:20 +03:00
|
|
|
return True
|
|
|
|
|
2015-07-15 19:14:22 +03:00
|
|
|
def isSearcher(self):
|
|
|
|
return False
|
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
def isScrappable(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def isInfoLink(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def isPages(self):
|
|
|
|
return True
|
|
|
|
|
2015-01-28 22:53:45 +03:00
|
|
|
def isSort(self):
|
|
|
|
return True
|
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
def isSearchOption(self):
|
|
|
|
return False
|
|
|
|
|
2015-01-28 22:53:45 +03:00
|
|
|
def get_contentList(self, category, subcategory=None, apps_property=None):
|
2015-01-12 23:10:20 +03:00
|
|
|
contentList = []
|
2015-01-28 22:53:45 +03:00
|
|
|
url = self.get_url(category, subcategory, apps_property)
|
2015-01-12 23:10:20 +03:00
|
|
|
|
|
|
|
response = self.makeRequest(url, headers=self.headers)
|
|
|
|
|
|
|
|
if None != response and 0 < len(response):
|
2015-06-23 23:00:27 +03:00
|
|
|
# print response
|
2015-01-12 23:10:20 +03:00
|
|
|
if category:
|
|
|
|
contentList = self.mode(response)
|
2015-06-23 23:00:27 +03:00
|
|
|
# print str(contentList)
|
2015-01-12 23:10:20 +03:00
|
|
|
return contentList
|
|
|
|
|
|
|
|
def mode(self, response):
|
|
|
|
contentList = []
|
2015-06-23 23:00:27 +03:00
|
|
|
# print str(result)
|
2015-01-12 23:10:20 +03:00
|
|
|
num = 51
|
2015-06-23 23:00:27 +03:00
|
|
|
good_forums = ['TV', 'Anime', 'Movies']
|
2015-01-12 23:10:20 +03:00
|
|
|
result = re.compile(
|
2015-06-23 23:00:27 +03:00
|
|
|
r'''title="Download torrent file" href="(.+?\.torrent).+?" class=".+?"><i.+?<a.+?<a.+?<a href="(.+?html)" class=".+?">(.+?)</a>.+? in <span.+?"><strong>.+?">(.+?)</a>.+?<td class="nobr center">(.+?)</td>.+?<td class="center">(\d+ .+?)</td>.+?<td class="green center">(\d+?)</td>.+?<td class="red lasttd center">(\d+?)</td>''',
|
|
|
|
re.DOTALL).findall(response)
|
|
|
|
for link, infolink, title, forum, size, date, seeds, leechers in result:
|
|
|
|
# main
|
2015-01-12 23:10:20 +03:00
|
|
|
if forum in good_forums:
|
|
|
|
info = {}
|
|
|
|
num = num - 1
|
|
|
|
original_title = None
|
|
|
|
year = 0
|
|
|
|
img = ''
|
2015-06-23 23:00:27 +03:00
|
|
|
# info
|
2015-01-12 23:10:20 +03:00
|
|
|
|
|
|
|
info['label'] = info['title'] = self.unescape(title)
|
|
|
|
info['link'] = link
|
2015-06-23 23:00:27 +03:00
|
|
|
info['infolink'] = self.baseurl + infolink
|
2015-02-16 19:41:45 +03:00
|
|
|
size = self.unescape(self.stripHtml(size))
|
2015-06-23 23:00:27 +03:00
|
|
|
date = self.unescape(self.stripHtml(date))
|
|
|
|
info['plot'] = info['title'] + '\r\n[I](%s) [S/L: %s/%s] [/I]\r\nAge: %s' % (
|
|
|
|
size, seeds, leechers, date)
|
2015-01-12 23:10:20 +03:00
|
|
|
|
|
|
|
contentList.append((
|
|
|
|
int(int(self.sourceWeight) * (int(num))),
|
|
|
|
original_title, title, int(year), img, info,
|
|
|
|
))
|
|
|
|
return contentList
|
2015-01-31 23:28:30 +03:00
|
|
|
|
|
|
|
def get_info(self, url):
|
2015-06-23 23:00:27 +03:00
|
|
|
movieInfo = {}
|
|
|
|
color = '[COLOR blue]%s:[/COLOR] %s\r\n'
|
2015-01-31 23:28:30 +03:00
|
|
|
response = self.makeRequest(url, headers=self.headers)
|
|
|
|
|
|
|
|
if None != response and 0 < len(response):
|
|
|
|
Soup = BeautifulSoup(response)
|
|
|
|
result = Soup.find('div', 'torrentMediaInfo')
|
|
|
|
if not result:
|
|
|
|
return None
|
2015-06-23 23:00:27 +03:00
|
|
|
li = result.findAll('li')
|
|
|
|
info, movieInfo = {'Cast': ''}, {'desc': '', 'poster': '', 'title': '', 'views': '0', 'rating': '50',
|
|
|
|
'kinopoisk': ''}
|
2015-01-31 23:28:30 +03:00
|
|
|
try:
|
2015-06-23 23:00:27 +03:00
|
|
|
img = result.find('a', {'class': 'movieCover'}).find('img').get('src')
|
|
|
|
movieInfo['poster'] = 'http:' + img
|
2015-01-31 23:28:30 +03:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
try:
|
2015-06-23 23:00:27 +03:00
|
|
|
movie = re.compile('View all <strong>(.+?)</strong> episodes</a>').match(str(result))
|
2015-01-31 23:28:30 +03:00
|
|
|
if movie:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['Movie'] = movie.group(1)
|
2015-01-31 23:28:30 +03:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
for i in li:
|
2015-06-23 23:00:27 +03:00
|
|
|
name = i.find('strong').text
|
2015-01-31 23:28:30 +03:00
|
|
|
if name:
|
2015-06-23 23:00:27 +03:00
|
|
|
info[name.rstrip(':')] = i.text.replace(name, '', 1)
|
|
|
|
plot = result.find('div', {'id': 'summary'})
|
2015-01-31 23:28:30 +03:00
|
|
|
if plot:
|
2015-06-23 23:00:27 +03:00
|
|
|
cut = plot.find('strong').text
|
|
|
|
info['plot'] = plot.text.replace(cut, '', 1).replace('report summary', '')
|
|
|
|
# print str(result)
|
|
|
|
cast = re.compile('<a href="/movies/actor/.+?">(.+?)</a>').findall(str(result))
|
2015-01-31 23:28:30 +03:00
|
|
|
if cast:
|
|
|
|
for actor in cast:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['Cast'] += actor + ", "
|
2015-01-31 23:28:30 +03:00
|
|
|
if 'Genres' in info:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['Genres'] = info['Genres'].replace(', ', ',').replace(',', ', ')
|
2015-01-31 23:28:30 +03:00
|
|
|
for key in info.keys():
|
2015-06-23 23:00:27 +03:00
|
|
|
if not 'Movie' in info and info[key] == 'addto bookmarks':
|
|
|
|
movieInfo['title'] = self.unescape(key)
|
|
|
|
info['TV Show'] = self.unescape(key)
|
2015-01-31 23:28:30 +03:00
|
|
|
if not 'plot' in info and 'Summary' in key:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['plot'] = info[key]
|
2015-01-31 23:28:30 +03:00
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
for i in ['Movie', 'TV Show', 'Release date', 'Original run', 'Episode', 'Air date', 'Genres', 'Language',
|
|
|
|
'Director', 'Writers', 'Cast', 'Original run', 'IMDb rating', 'AniDB rating']:
|
2015-01-31 23:28:30 +03:00
|
|
|
if info.get(i) and info.get(i) not in ['']:
|
2015-06-23 23:00:27 +03:00
|
|
|
movieInfo['desc'] += color % (i, info.get(i))
|
|
|
|
if i == 'Movie':
|
|
|
|
movieInfo['title'] = info.get(i)
|
2015-01-31 23:28:30 +03:00
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
for i in ['plot', 'IMDb link', 'RottenTomatoes']:
|
2015-01-31 23:28:30 +03:00
|
|
|
if info.get(i) and info.get(i) not in ['']:
|
2015-06-23 23:00:27 +03:00
|
|
|
if i == 'plot':
|
2015-06-27 11:43:59 +03:00
|
|
|
movieInfo['desc'] += '\r\n[COLOR blue]Plot:[/COLOR]\r\n' + self.unescape(info.get(i))
|
2015-06-23 23:00:27 +03:00
|
|
|
if i == 'RottenTomatoes':
|
|
|
|
movieInfo['rating'] = str(info.get(i).split('%')[0])
|
|
|
|
if i == 'IMDb link':
|
|
|
|
movieInfo['kinopoisk'] = 'http://imdb.snick.ru/ratefor/02/tt%s.png' % info.get(i)
|
2015-01-31 23:28:30 +03:00
|
|
|
|
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
# print str(info)
|
2015-01-31 23:28:30 +03:00
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
return movieInfo
|