2015-01-12 23:10:20 +03:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
'''
|
|
|
|
Torrenter plugin for XBMC
|
|
|
|
Copyright (C) 2012 Vadim Skorba
|
|
|
|
vadim.skorba@gmail.com
|
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
'''
|
|
|
|
import re
|
2015-06-23 23:00:27 +03:00
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
import Content
|
2015-01-31 23:28:30 +03:00
|
|
|
from BeautifulSoup import BeautifulSoup
|
2015-01-12 23:10:20 +03:00
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
class KickAssSo(Content.Content):
|
|
|
|
category_dict = {
|
2016-01-25 12:33:41 +03:00
|
|
|
'hot': ('Most Recent', '/new/?field=seeders&sorder=desc',
|
2015-06-23 23:00:27 +03:00
|
|
|
{'page': '/new/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
2015-01-28 22:53:45 +03:00
|
|
|
'anime': ('Anime', '/anime/', {'page': '/anime/%d/', 'increase': 1, 'second_page': 2,
|
2015-06-23 23:00:27 +03:00
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
|
|
|
'tvshows': ('TV Shows', '/tv/?field=seeders&sorder=desc',
|
|
|
|
{'page': '/tv/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
|
|
|
'movies': ('Movies', '/movies/?field=seeders&sorder=desc',
|
|
|
|
{'page': '/movies/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
2016-01-25 12:33:41 +03:00
|
|
|
'': [{'name': '', 'url_after': '?field=seeders&sorder=desc'}]}),
|
|
|
|
'3Dmovies': ('3D Movies', '/3d-movies/?field=seeders&sorder=desc',
|
|
|
|
{'page': '/3d-movies/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
|
|
|
'sort ': [{'name': 'by Seeders', 'url_after': '?field=seeders&sorder=desc'},
|
|
|
|
{'name': 'by Date', 'url_after': '?field=time_add&sorder=desc'}]}),
|
|
|
|
'highres movies': ('High Resolution Movies', '/highres-movies/?field=seeders&sorder=desc',
|
|
|
|
{'page': '/highres-movies/%d/?field=seeders&sorder=desc', 'increase': 1, 'second_page': 2,
|
|
|
|
' ': [{'name': ' ', 'url_after': '?field=seeders&sorder=desc'}]}),
|
|
|
|
'heb_movies': ('hebdub movies', '/usearch/heb%20dub%20category%3Amovies/?field=seeders&sorder=desc'),
|
2016-01-13 17:31:07 +03:00
|
|
|
|
2016-01-25 12:33:41 +03:00
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
}
|
|
|
|
|
2016-07-22 22:32:57 +03:00
|
|
|
baseurl = "http://kat.am"
|
2015-01-12 23:10:20 +03:00
|
|
|
headers = [('User-Agent',
|
|
|
|
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.124' + \
|
|
|
|
' YaBrowser/14.10.2062.12061 Safari/537.36'),
|
2016-01-13 17:31:07 +03:00
|
|
|
('Referer', 'http://kickass.so/'), ('Accept-Encoding', 'gzip'), ('Accept-Language', 'he;q=0.8')]
|
2015-01-12 23:10:20 +03:00
|
|
|
'''
|
|
|
|
Weight of source with this searcher provided.
|
|
|
|
Will be multiplied on default weight.
|
|
|
|
Default weight is seeds number
|
|
|
|
'''
|
|
|
|
sourceWeight = 1
|
|
|
|
|
2015-07-15 19:14:22 +03:00
|
|
|
def isTracker(self):
|
2015-01-12 23:10:20 +03:00
|
|
|
return True
|
|
|
|
|
2015-07-15 19:14:22 +03:00
|
|
|
def isSearcher(self):
|
2015-09-26 19:38:48 +03:00
|
|
|
return True
|
2015-07-15 19:14:22 +03:00
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
def isScrappable(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def isInfoLink(self):
|
|
|
|
return True
|
|
|
|
|
|
|
|
def isPages(self):
|
|
|
|
return True
|
|
|
|
|
2015-01-28 22:53:45 +03:00
|
|
|
def isSort(self):
|
|
|
|
return True
|
|
|
|
|
2015-01-12 23:10:20 +03:00
|
|
|
def isSearchOption(self):
|
|
|
|
return False
|
|
|
|
|
2015-01-28 22:53:45 +03:00
|
|
|
def get_contentList(self, category, subcategory=None, apps_property=None):
|
2016-11-26 11:56:53 +03:00
|
|
|
self.debug = self.log
|
2015-01-12 23:10:20 +03:00
|
|
|
contentList = []
|
2015-01-28 22:53:45 +03:00
|
|
|
url = self.get_url(category, subcategory, apps_property)
|
2015-01-12 23:10:20 +03:00
|
|
|
|
|
|
|
response = self.makeRequest(url, headers=self.headers)
|
|
|
|
|
|
|
|
if None != response and 0 < len(response):
|
2016-11-26 11:56:53 +03:00
|
|
|
self.debug(response)
|
2015-01-12 23:10:20 +03:00
|
|
|
if category:
|
|
|
|
contentList = self.mode(response)
|
2016-11-26 11:56:53 +03:00
|
|
|
self.debug(str(contentList))
|
2015-01-12 23:10:20 +03:00
|
|
|
return contentList
|
|
|
|
|
|
|
|
def mode(self, response):
|
|
|
|
contentList = []
|
|
|
|
num = 51
|
2015-06-23 23:00:27 +03:00
|
|
|
good_forums = ['TV', 'Anime', 'Movies']
|
2015-08-26 20:54:04 +03:00
|
|
|
regex = '''<tr class=".+?" id=.+?</tr>'''
|
2016-11-04 17:31:31 +03:00
|
|
|
regex_tr = r'''<a data-download .+? href="(.+?)" class=".+?"><i.+?<a.+?<a.+?<a href="(.+?html)" class=".+?">(.+?)</a>.+? in <span.+?"><strong>.+?">(.+?)</a>.+?<td class="nobr center">(.+?)</td>.+?<td class="center".+?>(\d+ .+?)</td>.+?<td class="green center">(\d+?)</td>.+?<td class="red lasttd center">(\d+?)</td>'''
|
2015-08-26 20:54:04 +03:00
|
|
|
for tr in re.compile(regex, re.DOTALL).findall(response):
|
|
|
|
result=re.compile(regex_tr, re.DOTALL).findall(tr)
|
|
|
|
if result:
|
|
|
|
(link, infolink, title, forum, size, date, seeds, leechers)=result[0]
|
2015-06-23 23:00:27 +03:00
|
|
|
# main
|
2015-08-26 20:54:04 +03:00
|
|
|
if forum in good_forums:
|
|
|
|
info = {}
|
|
|
|
num = num - 1
|
|
|
|
original_title = None
|
|
|
|
year = 0
|
|
|
|
img = ''
|
|
|
|
# info
|
|
|
|
|
|
|
|
info['label'] = info['title'] = self.unescape(title)
|
|
|
|
info['link'] = link
|
|
|
|
info['infolink'] = self.baseurl + infolink
|
|
|
|
size = self.unescape(self.stripHtml(size))
|
|
|
|
date = self.unescape(self.stripHtml(date))
|
|
|
|
info['plot'] = info['title'] + '\r\n[I](%s) [S/L: %s/%s] [/I]\r\nAge: %s' % (
|
|
|
|
size, seeds, leechers, date)
|
|
|
|
|
|
|
|
contentList.append((
|
|
|
|
int(int(self.sourceWeight) * (int(num))),
|
|
|
|
original_title, title, int(year), img, info,
|
|
|
|
))
|
2015-01-12 23:10:20 +03:00
|
|
|
return contentList
|
2015-01-31 23:28:30 +03:00
|
|
|
|
|
|
|
def get_info(self, url):
|
2016-11-26 11:56:53 +03:00
|
|
|
self.debug = self.log
|
2015-06-23 23:00:27 +03:00
|
|
|
movieInfo = {}
|
|
|
|
color = '[COLOR blue]%s:[/COLOR] %s\r\n'
|
2015-01-31 23:28:30 +03:00
|
|
|
response = self.makeRequest(url, headers=self.headers)
|
|
|
|
|
|
|
|
if None != response and 0 < len(response):
|
|
|
|
Soup = BeautifulSoup(response)
|
|
|
|
result = Soup.find('div', 'torrentMediaInfo')
|
|
|
|
if not result:
|
|
|
|
return None
|
2015-06-23 23:00:27 +03:00
|
|
|
li = result.findAll('li')
|
|
|
|
info, movieInfo = {'Cast': ''}, {'desc': '', 'poster': '', 'title': '', 'views': '0', 'rating': '50',
|
|
|
|
'kinopoisk': ''}
|
2015-01-31 23:28:30 +03:00
|
|
|
try:
|
2015-06-23 23:00:27 +03:00
|
|
|
img = result.find('a', {'class': 'movieCover'}).find('img').get('src')
|
2016-11-23 21:13:05 +03:00
|
|
|
movieInfo['poster'] = img if img.startswith('http:') else 'http:' + img
|
2015-01-31 23:28:30 +03:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
try:
|
2015-06-23 23:00:27 +03:00
|
|
|
movie = re.compile('View all <strong>(.+?)</strong> episodes</a>').match(str(result))
|
2015-01-31 23:28:30 +03:00
|
|
|
if movie:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['Movie'] = movie.group(1)
|
2015-01-31 23:28:30 +03:00
|
|
|
except:
|
|
|
|
pass
|
|
|
|
for i in li:
|
2015-06-23 23:00:27 +03:00
|
|
|
name = i.find('strong').text
|
2015-01-31 23:28:30 +03:00
|
|
|
if name:
|
2015-06-23 23:00:27 +03:00
|
|
|
info[name.rstrip(':')] = i.text.replace(name, '', 1)
|
|
|
|
plot = result.find('div', {'id': 'summary'})
|
2015-01-31 23:28:30 +03:00
|
|
|
if plot:
|
2015-06-23 23:00:27 +03:00
|
|
|
cut = plot.find('strong').text
|
|
|
|
info['plot'] = plot.text.replace(cut, '', 1).replace('report summary', '')
|
|
|
|
# print str(result)
|
|
|
|
cast = re.compile('<a href="/movies/actor/.+?">(.+?)</a>').findall(str(result))
|
2015-01-31 23:28:30 +03:00
|
|
|
if cast:
|
|
|
|
for actor in cast:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['Cast'] += actor + ", "
|
2015-01-31 23:28:30 +03:00
|
|
|
if 'Genres' in info:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['Genres'] = info['Genres'].replace(', ', ',').replace(',', ', ')
|
2015-01-31 23:28:30 +03:00
|
|
|
for key in info.keys():
|
2015-06-23 23:00:27 +03:00
|
|
|
if not 'Movie' in info and info[key] == 'addto bookmarks':
|
|
|
|
movieInfo['title'] = self.unescape(key)
|
|
|
|
info['TV Show'] = self.unescape(key)
|
2015-01-31 23:28:30 +03:00
|
|
|
if not 'plot' in info and 'Summary' in key:
|
2015-06-23 23:00:27 +03:00
|
|
|
info['plot'] = info[key]
|
2015-01-31 23:28:30 +03:00
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
for i in ['Movie', 'TV Show', 'Release date', 'Original run', 'Episode', 'Air date', 'Genres', 'Language',
|
|
|
|
'Director', 'Writers', 'Cast', 'Original run', 'IMDb rating', 'AniDB rating']:
|
2015-01-31 23:28:30 +03:00
|
|
|
if info.get(i) and info.get(i) not in ['']:
|
2015-06-23 23:00:27 +03:00
|
|
|
movieInfo['desc'] += color % (i, info.get(i))
|
|
|
|
if i == 'Movie':
|
|
|
|
movieInfo['title'] = info.get(i)
|
2015-01-31 23:28:30 +03:00
|
|
|
|
2015-06-23 23:00:27 +03:00
|
|
|
for i in ['plot', 'IMDb link', 'RottenTomatoes']:
|
2015-01-31 23:28:30 +03:00
|
|
|
if info.get(i) and info.get(i) not in ['']:
|
2015-06-23 23:00:27 +03:00
|
|
|
if i == 'plot':
|
2015-06-27 11:43:59 +03:00
|
|
|
movieInfo['desc'] += '\r\n[COLOR blue]Plot:[/COLOR]\r\n' + self.unescape(info.get(i))
|
2015-06-23 23:00:27 +03:00
|
|
|
if i == 'RottenTomatoes':
|
|
|
|
movieInfo['rating'] = str(info.get(i).split('%')[0])
|
|
|
|
if i == 'IMDb link':
|
|
|
|
movieInfo['kinopoisk'] = 'http://imdb.snick.ru/ratefor/02/tt%s.png' % info.get(i)
|
2015-01-31 23:28:30 +03:00
|
|
|
|
2016-11-26 11:56:53 +03:00
|
|
|
self.debug(str(movieInfo))
|
2015-06-23 23:00:27 +03:00
|
|
|
return movieInfo
|