id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
7,800
bitsoup.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/bitsoup.py
import traceback from bs4 import BeautifulSoup, SoupStrainer from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://www.bitsoup.me/', 'login': 'https://www.bitsoup.me/takelogin.php', 'login_check': 'https://www.bitsoup.me/my.php', 'search': 'https://www.bitsoup.me/browse.php?%s', 'baseurl': 'https://www.bitsoup.me/%s', } http_time_between_calls = 1 # Seconds login_fail_msg = 'Login failed!' only_tables_tags = SoupStrainer('table') torrent_name_cell = 1 torrent_download_cell = 2 def _searchOnTitle(self, title, movie, quality, results): url = self.urls['search'] % self.buildUrl(title, movie, quality) data = self.getHTMLData(url) if data: html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags) try: result_table = html.find('table', attrs = {'class': 'koptekst'}) if not result_table or 'nothing found!' in data.lower(): return entries = result_table.find_all('tr') for result in entries[1:]: all_cells = result.find_all('td') torrent = all_cells[self.torrent_name_cell].find('a') download = all_cells[self.torrent_download_cell].find('a') torrent_id = torrent['href'] torrent_id = torrent_id.replace('details.php?id=', '') torrent_id = torrent_id.replace('&hit=1', '') torrent_name = torrent.getText() torrent_size = self.parseSize(all_cells[8].getText()) torrent_seeders = tryInt(all_cells[10].getText()) torrent_leechers = tryInt(all_cells[11].getText()) torrent_url = self.urls['baseurl'] % download['href'] torrent_detail_url = self.urls['baseurl'] % torrent['href'] results.append({ 'id': torrent_id, 'name': torrent_name, 'size': torrent_size, 'seeders': torrent_seeders, 'leechers': torrent_leechers, 'url': torrent_url, 'detail_url': torrent_detail_url, }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'ssl': 'yes', } def loginSuccess(self, output): return 'logout.php' in output.lower() loginCheckSuccess = loginSuccess config = [{ 'name': 'bitsoup', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'Bitsoup', 'description': '<a href="https://bitsoup.me" target="_blank">Bitsoup</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB8ElEQVR4AbWSS2sTURiGz3euk0mswaE37HhNhIrajQheFgF3rgR/lAt/gOBCXNZlo6AbqfUWRVCxi04wqUnTRibpJLaJzdzOOZ6WUumyC5/VHOb9eN/FA91uFx0FjI4IPfgiGLTWH73tn348GKmN7ijD0d2b41fO5qJEaX24AWNIUrVQCTTJ3Llx6vbV6Vtzk7Gi9+ebi996guFDDYAQAVj4FExP5qdOZB49W62t/zH3hECcwsPnbWeMXz6Xi2K1f0ApeK3hMCHHbP5gvvoriBgFAAQJEAxhjJ4u+YWTNsVI6b1JgtPWZkoIefKy4fcii2OTw2BABs7wj3bYDlLL4rvjGWOdTser1j5Xf7c3Q/MbHQYApxItvnm31mhQQ71eX2vUB76/vsWB2hg0QuogrMwLIG8P3InM2/eVGXeDViqVwWB79vRU2lgJYmdHcgXCTAXQFJTN5HguvDCR2Hxsxe8EvT54nlcul5vNpqDIEgwRQanAhAAABgRIyiQcjpIkkTOuWyqVoN/vSylX67XXH74uV1vHRUyxxFqbLBCSmBpiXSq6xcL5QrGYzWZ3XQIAwdlOJB+/aL764ucdmncYs0WsCI7kvTnn+qyDMEnTVCn1Tz5KsBFg6fvWcmsUAcnYNC/g2hnromvvqbHvxv+39S+MX+bWkFXwAgAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,411
Python
.py
113
32.964602
763
0.539541
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,801
yts.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/yts.py
from datetime import datetime from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider import random log = CPLog(__name__) class Base(TorrentMagnetProvider): # Only qualities allowed: 720p/1080p/3D - the rest will fail. # All YTS.ag torrents are verified urls = { 'detail': 'https://yts.am/api#list_movies', 'search': 'https://yts.am/api/v2/list_movies.json?query_term=%s&limit=%s&page=%s' } def _search(self, movie, quality, results): limit = 10 page = 1 data = self.getJsonData(self.urls['search'] % (getIdentifier(movie), limit, page)) if data: movie_count = tryInt(data['data']['movie_count']) if movie_count == 0: log.debug('%s - found no results', (self.getName())) else: movie_results = data['data']['movies'] for i in range(0,len(movie_results)): result = data['data']['movies'][i] name = result['title'] year = result['year'] detail_url = result['url'] for torrent in result['torrents']: t_quality = torrent['quality'] if t_quality in quality['label']: hash = torrent['hash'] size = tryInt(torrent['size_bytes'] / 1048576) seeders = tryInt(torrent['seeds']) leechers = tryInt(torrent['peers']) pubdate = torrent['date_uploaded'] # format: 2017-02-17 18:40:03 pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S') age = (datetime.now() - pubdate).days results.append({ 'id': random.randint(100, 9999), 'name': '%s (%s) %s %s %s' % (name, year, 'YTS', t_quality, 'BR-Rip'), 'url': self.make_magnet(hash, name), 'size': size, 'seeders': seeders, 'leechers': leechers, 'age': age, 'detail_url': detail_url, 'score': 1 }) return def make_magnet(self, hash, name): url_encoded_trackers = 'udp%3A%2F%2Fopen.demonii.com%3A1337%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.openbittorr' \ 'ent.com%3A80&tr=%0Audp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=%0Audp%3A%2F%2Fglot' \ 'orrents.pw%3A6969%2Fannounce&tr=%0Audp%3A%2F%2Ftracker.opentrackr.org%3A1337%2Fannou' \ 'nce&tr=%0Audp%3A%2F%2Ftorrent.gresille.org%3A80%2Fannounce&tr=%0Audp%3A%2F%2Fp4p.are' \ 'nabg.com%3A1337&tr=%0Audp%3A%2F%2Ftracker.leechers-paradise.org%3A6969]' return 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (hash, name.replace(' ', '+'), url_encoded_trackers) config = [{ 'name': 'yts', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'YTS', 'description': '<a href="https://yts.ag/" target="_blank">YTS</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqG' 'iQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+W' 'gEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mY' 'Ybjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9Kumpjg' 'vwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeRO' 'st0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nE' 'KPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9e' 'IlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTy' 'iGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'info', 'label': 'Info', 'type':'bool', 'default':'False', 'description': 'YTS will only work if you set the minimum size for 720p to 500 and 1080p to 800', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], } ] }]
5,979
Python
.py
114
34.5
120
0.508976
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,802
torrentpotato.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/torrentpotato.py
from urlparse import urlparse import re import traceback from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import splitString, tryInt, tryFloat from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import ResultList from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = {} limits_reached = {} http_time_between_calls = 1 # Seconds def search(self, media, quality): hosts = self.getHosts() results = ResultList(self, media, quality, imdb_results = True) for host in hosts: if self.isDisabled(host): continue self._searchOnHost(host, media, quality, results) return results def _searchOnHost(self, host, media, quality, results): torrents = self.getJsonData(self.buildUrl(media, host), cache_timeout = 1800) if torrents: try: if torrents.get('error'): log.error('%s: %s', (torrents.get('error'), host['host'])) elif torrents.get('results'): for torrent in torrents.get('results', []): results.append({ 'id': torrent.get('torrent_id'), 'protocol': 'torrent' if re.match('^(http|https|ftp)://.*$', torrent.get('download_url')) else 'torrent_magnet', 'provider_extra': urlparse(host['host']).hostname or host['host'], 'name': toUnicode(torrent.get('release_name')), 'url': torrent.get('download_url'), 'detail_url': torrent.get('details_url'), 'size': torrent.get('size'), 'score': host['extra_score'], 'seeders': torrent.get('seeders'), 'leechers': torrent.get('leechers'), 'seed_ratio': host['seed_ratio'], 'seed_time': host['seed_time'], }) except: log.error('Failed getting results from %s: %s', (host['host'], traceback.format_exc())) def getHosts(self): uses = splitString(str(self.conf('use')), clean = False) hosts = splitString(self.conf('host'), clean = False) names = splitString(self.conf('name'), clean = False) seed_times = splitString(self.conf('seed_time'), clean = False) seed_ratios = splitString(self.conf('seed_ratio'), clean = False) pass_keys = splitString(self.conf('pass_key'), clean = False) extra_score = splitString(self.conf('extra_score'), clean = False) host_list = [] for nr in range(len(hosts)): try: key = pass_keys[nr] except: key = '' try: host = hosts[nr] except: host = '' try: name = names[nr] except: name = '' try: ratio = seed_ratios[nr] except: ratio = '' try: seed_time = seed_times[nr] except: seed_time = '' host_list.append({ 'use': uses[nr], 'host': host, 'name': name, 'seed_ratio': tryFloat(ratio), 'seed_time': tryInt(seed_time), 'pass_key': key, 'extra_score': tryInt(extra_score[nr]) if len(extra_score) > nr else 0 }) return host_list def belongsTo(self, url, provider = None, host = None): hosts = self.getHosts() for host in hosts: result = super(Base, self).belongsTo(url, host = host['host'], provider = provider) if result: return result def isDisabled(self, host = None): return not self.isEnabled(host) def isEnabled(self, host = None): # Return true if at least one is enabled and no host is given if host is None: for host in self.getHosts(): if self.isEnabled(host): return True return False return TorrentProvider.isEnabled(self) and host['host'] and host['pass_key'] and int(host['use']) config = [{ 'name': 'torrentpotato', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'TorrentPotato', 'order': 10, 'description': 'CouchPotato torrent provider. Checkout <a href="https://github.com/CouchPotato/CouchPotatoServer/wiki/CouchPotato-Torrent-Provider" target="_blank">the wiki page about this provider</a> for more info.', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABSElEQVR4AZ2Nz0oCURTGv8t1YMpqUxt9ARFxoQ/gQtppgvUKcu/sxB5iBJkogspaBC6iVUplEC6kv+oiiKDNhAtt16roP0HQgdsMLgaxfvy4nHP4Pi48qE2g4v91JOqT1CH/UnA7w7icUlLawyEdj+ZI/7h6YluWbRiddHonHh9M70aj7VTKzuXuikUMci/EO/ACnAI15599oAk8AR/AgxBQNCzreD7bmpl+FOIVuAHqQDUcJo+AK+CZFKLt95/MpSmMt0TiW9POxse6UvYZ6zB2wFgjFiNpOGesR0rZ0PVPXf8KhUCl22CwClz4eN8weoZBb9c0bdPsOWvHx/cYu9Y0CoNoZTJrwAbn5DrnZc6XOV+igVbnsgo0IxEomlJuA1vUIYGyq3PZBChwmExCUSmVZgMBDIUCK4UCFIv5vHIhm/XUDeAf/ADbcpd5+aXSWQAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'use', 'default': '' }, { 'name': 'host', 'default': '', 'description': 'The url path of your TorrentPotato provider.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'default': '0', 'description': 'Starting score for each release found via this provider.', }, { 'name': 'name', 'label': 'Username', 'default': '', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'default': '1', 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'default': '40', 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'pass_key', 'default': ',', 'label': 'Pass Key', 'description': 'Can be found on your profile page', 'type': 'combined', 'combine': ['use', 'host', 'pass_key', 'name', 'seed_ratio', 'seed_time', 'extra_score'], }, ], }, ], }]
7,208
Python
.py
153
32.20915
563
0.520655
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,803
torrentbytes.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/torrentbytes.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://www.torrentbytes.net/', 'login': 'https://www.torrentbytes.net/takelogin.php', 'login_check': 'https://www.torrentbytes.net/inbox.php', 'detail': 'https://www.torrentbytes.net/details.php?id=%s', 'search': 'https://www.torrentbytes.net/browse.php?search=%s&cat=%d', 'download': 'https://www.torrentbytes.net/download.php?id=%s&name=%s', } cat_ids = [ ([5], ['720p', '1080p', 'bd50']), ([19], ['cam']), ([19], ['ts', 'tc']), ([19], ['r5', 'scr']), ([19], ['dvdrip']), ([19], ['brrip']), ([20], ['dvdr']), ] http_time_between_calls = 1 # Seconds login_fail_msg = 'Username or password incorrect' cat_backup_id = None def _searchOnTitle(self, title, movie, quality, results): url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0]) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs = {'border': '1'}) if not result_table: return entries = result_table.find_all('tr') for result in entries[1:]: cells = result.find_all('td') link = cells[1].find('a', attrs = {'class': 'index'}) full_id = link['href'].replace('details.php?id=', '') torrent_id = full_id[:7] name = toUnicode(link.get('title', link.contents[0]).encode('ISO-8859-1')).strip() results.append({ 'id': torrent_id, 'name': name, 'url': self.urls['download'] % (torrent_id, name), 'detail_url': self.urls['detail'] % torrent_id, 'size': self.parseSize(cells[6].contents[0] + cells[6].contents[2]), 'seeders': tryInt(cells[8].find('span').contents[0]), 'leechers': tryInt(cells[9].find('span').contents[0]), }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'login': 'submit', } def loginSuccess(self, output): return 'logout.php' in output.lower() or 'Welcome' in output.lower() loginCheckSuccess = loginSuccess config = [{ 'name': 'torrentbytes', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'TorrentBytes', 'description': '<a href="http://torrentbytes.net" target="_blank">TorrentBytes</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAeFBMVEUAAAAAAEQAA1QAEmEAKnQALHYAMoEAOokAQpIASYsASZgAS5UATZwATosATpgAVJ0AWZwAYZ4AZKAAaZ8Ab7IAcbMAfccAgcQAgcsAhM4AiscAjMkAmt0AoOIApecAp/EAqvQAs+kAt+wA3P8A4f8A//8VAAAfDbiaAl08AAAAjUlEQVQYGQXBO04DQRAFwHqz7Z8sECIl5f73ISRD5GBs7UxTlWfg9vYXnvJRQJqOL88D6BAwJtMMumHUVCl60aa6H93IrIv0b+157f1lpk+fm87lMWrZH0vncKbXdRUQrRmrh9C6Iwkq6rg4PXZcyXmbizzeV/g+rDra0rGve8jPKLSOJNi2AQAwAGjwD7ApPkEHdtPQAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,156
Python
.py
115
30.834783
463
0.509566
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,804
thepiratebay.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/thepiratebay.py
import re import traceback from bs4 import BeautifulSoup from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider import six log = CPLog(__name__) class Base(TorrentMagnetProvider): urls = { 'detail': '%s/torrent/%s', 'search': '%s/search/%%s/%%s/7/%%s' } cat_backup_id = 200 disable_provider = False http_time_between_calls = 0 proxy_list = [ 'https://pirateproxy.cat', 'https://pirateproxy.wf', 'https://pirateproxy.tf', 'https://urbanproxy.eu', 'https://piratebays.co', 'https://pirateproxy.yt', 'https://thepiratebay.uk.net', 'https://thebay.tv', 'https://thepirateproxy.co', 'https://theproxypirate.pw', 'https://arrr.xyz', 'https://tpb.dashitz.com' ] def __init__(self): super(Base, self).__init__() addEvent('app.test', self.doTest) def _search(self, media, quality, results): page = 0 total_pages = 1 cats = self.getCatId(quality) base_search_url = self.urls['search'] % self.getDomain() while page < total_pages: search_url = base_search_url % self.buildUrl(media, page, cats) page += 1 data = self.getHTMLData(search_url) if data: try: soup = BeautifulSoup(data) results_table = soup.find('table', attrs = {'id': 'searchResult'}) if not results_table: return try: total_pages = len(soup.find('div', attrs = {'align': 'center'}).find_all('a')) except: pass entries = results_table.find_all('tr') for result in entries[1:]: link = result.find(href = re.compile('torrent\/\d+\/')) download = result.find(href = re.compile('magnet:')) try: size = re.search('Size (?P<size>.+),', six.text_type(result.select('font.detDesc')[0])).group('size') except: continue if link and download: if self.conf('trusted_only'): if result.find('img', alt = re.compile('Trusted')) is None and \ result.find('img', alt = re.compile('VIP')) is None and \ result.find('img', alt = re.compile('Helpers')) is None and \ result.find('img', alt = re.compile('Moderator')) is None: log.info('Skipped torrent %s, untrusted.' % link.string) continue def extra_score(item): trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None] vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None] confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None] moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None] return confirmed + trusted + vip + moderated results.append({ 'id': re.search('/(?P<id>\d+)/', link['href']).group('id'), 'name': six.text_type(link.string), 'url': download['href'], 'detail_url': self.getDomain(link['href']), 'size': self.parseSize(size), 'seeders': tryInt(result.find_all('td')[2].string), 'leechers': tryInt(result.find_all('td')[3].string), 'extra_score': extra_score, 'get_more_info': self.getMoreInfo }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def isEnabled(self): return super(Base, self).isEnabled() and self.getDomain() def correctProxy(self, data): return 'title="Pirate Search"' in data def getMoreInfo(self, item): full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) html = BeautifulSoup(full_description) nfo_pre = html.find('div', attrs = {'class': 'nfo'}) description = '' try: description = toUnicode(nfo_pre.text) except: pass item['description'] = description return item def doTest(self): for url in self.proxy_list: try: data = self.urlopen(url + '/search/test+search') if 'value="test+search"' in data: log.info('Success %s', url) continue except: log.error('%s', traceback.format_exc(0)) config = [{ 'name': 'thepiratebay', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'ThePirateBay', 'description': 'The world\'s largest bittorrent tracker. <a href="https://thepiratebay.se/" target="_blank">ThePirateBay</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAA3UlEQVQY02P4DwT/YADIZvj//7qnozMYODmtAAusZoCDELDAegYGViZhAWZmRoYoqIDupfhNN1M3dTBEggXWMZg9jZRXV77YxhAOFpjDwMAPMoCXmcHsF1SAQZ6bQY2VgUEbKHClcAYzg3mINEO8jSCD478/DPsZmvqWblu1bOmStes3Pp0ezVDF4Gif0Hfx9///74/ObRZ2YNiZ47C8XIRBxFJR0jbSSUud4f9zAQWn8NTuziAt2zy5xIMM/z8LFX0E+fD/x0MRDCeA1v7Z++Y/FDzyvAtyBxIA+h8A8ZKLeT+lJroAAAAASUVORK5CYII=', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False }, { 'name': 'domain', 'advanced': True, 'label': 'Proxy server', 'description': 'Domain for requests, keep empty to let CouchPotato pick.', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', }, { 'name': 'trusted_only', 'advanced': True, 'label': 'Trusted/VIP Only', 'type': 'bool', 'default': False, 'description': 'Only download releases marked as Trusted or VIP' } ], } ] }]
7,927
Python
.py
169
30.08284
395
0.479668
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,805
ilovetorrents.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py
import re import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'download': 'https://www.ilovetorrents.me/%s', 'detail': 'https://www.ilovetorrents.me/%s', 'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s', 'test': 'https://www.ilovetorrents.me/', 'login': 'https://www.ilovetorrents.me/takelogin.php', 'login_check': 'https://www.ilovetorrents.me' } login_fail_msg = 'Login failed!' cat_ids = [ (['80'], ['720p', '1080p']), (['41'], ['brrip']), (['19'], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), (['20'], ['dvdr']) ] cat_backup_id = 200 disable_provider = False http_time_between_calls = 1 def _searchOnTitle(self, title, movie, quality, results): page = 0 total_pages = 1 cats = self.getCatId(quality) while page < total_pages: movieTitle = tryUrlencode('"%s" %s' % (title, movie['info']['year'])) search_url = self.urls['search'] % (movieTitle, page, cats[0]) page += 1 data = self.getHTMLData(search_url) if data: try: results_table = None data_split = splitString(data, '<table') soup = None for x in data_split: soup = BeautifulSoup(x) results_table = soup.find('table', attrs = {'class': 'koptekst'}) if results_table: break if not results_table: return try: pagelinks = soup.findAll(href = re.compile('page')) page_numbers = [int(re.search('page=(?P<page_number>.+'')', i['href']).group('page_number')) for i in pagelinks] total_pages = max(page_numbers) except: pass entries = results_table.find_all('tr') for result in entries[1:]: prelink = result.find(href = re.compile('details.php')) link = prelink['href'] download = result.find('a', href = re.compile('download.php'))['href'] if link and download: def extra_score(item): trusted = (0, 10)[result.find('img', alt = re.compile('Trusted')) is not None] vip = (0, 20)[result.find('img', alt = re.compile('VIP')) is not None] confirmed = (0, 30)[result.find('img', alt = re.compile('Helpers')) is not None] moderated = (0, 50)[result.find('img', alt = re.compile('Moderator')) is not None] return confirmed + trusted + vip + moderated id = re.search('id=(?P<id>\d+)&', link).group('id') url = self.urls['download'] % download fileSize = self.parseSize(result.select('td.rowhead')[8].text) results.append({ 'id': id, 'name': toUnicode(prelink.find('b').text), 'url': url, 'detail_url': self.urls['detail'] % link, 'size': fileSize, 'seeders': tryInt(result.find_all('td')[2].string), 'leechers': tryInt(result.find_all('td')[3].string), 'extra_score': extra_score, 'get_more_info': self.getMoreInfo }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'submit': 'Welcome to ILT', } def getMoreInfo(self, item): cache_key = 'ilt.%s' % item['id'] description = self.getCache(cache_key) if not description: try: full_description = self.getHTMLData(item['detail_url']) html = BeautifulSoup(full_description) nfo_pre = html.find('td', attrs = {'class': 'main'}).findAll('table')[1] description = toUnicode(nfo_pre.text) if nfo_pre else '' except: log.error('Failed getting more info for %s', item['name']) description = '' self.setCache(cache_key, description, timeout = 25920000) item['description'] = description return item def loginSuccess(self, output): return 'logout.php' in output.lower() loginCheckSuccess = loginSuccess config = [{ 'name': 'ilovetorrents', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'ILoveTorrents', 'description': 'Where the Love of Torrents is Born. <a href="https://www.ilovetorrents.me" target="_blank">ILoveTorrents</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACPUlEQVR4AYWM0U9SbxjH3+v266I/oNvWZTfd2J1d0ZqbZEFwWrUImOKs4YwtumFKZvvlJJADR2TCQQlMPKg5NmpREgaekAPnBATKgmK1LqQlx6awHnZWF1Tr2Xfvvs+7z+dB0mlO7StpAh+M4S/2jbo3w8+xvJvlnSneEt+10zwer5ujNUOoChjALWFw5XOwdCAk/P57cGvPl+Oht0W7VJHN5NC1uW1BON4hGjXbwpVWMZhsy9v7sEIXAsDNYBXgdkEoIKyWD2CF8ut/aOXTZc/fBSgLWw1BgA4BDHOV0GkT90cBQpXahU5TFomsb38XhJC5/Tbh1P8c6rJlBeGfAeyMhUFwNVcs9lxV9Ot0dwmyd+mrNvRtbJ2fSPC6Z3Vsvub2z3sDFACAAYzk0+kUyxEkyfN7PopqNBro55A+P6yPKIrL5zF1HwjdeBJJCObIsZO79bo3sHhWhglo5WMV3mazuVPb4fLvSL8/FAkB1hK6rXQPwYhMyROK8VK5LAiH/jsMt0HQjxiN4/ePdoilllcqDyt3Mkg8mRBNbIhMb8RERkowQA/p76g0/UDDdCoNmDminM0qSK5vlpE5kugCHhNPxntwWmJPYTMZtYcFR6ABHQsVRlYLukVORaaULvqKI46keFSCv77kSPS6kxrPptLNDHgz16fWBtyxe6v5h08LUy+KI8ushqTPWWIX8Sg6b45IrGtyW6zXFb/hpQf9m3oqfWuB0fpSw0uZ4WB69En69uOk2rmO2V52PXj+A/mI4ESKpb2HAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False }, { 'name': 'username', 'label': 'Username', 'type': 'string', 'default': '', 'description': 'The user name for your ILT account', }, { 'name': 'password', 'label': 'Password', 'type': 'password', 'default': '', 'description': 'The password for your ILT account.', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], } ] }]
8,181
Python
.py
162
33.746914
863
0.509959
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,806
torrentshack.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/torrentshack.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider import six log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://torrentshack.me/', 'login': 'https://torrentshack.me/login.php', 'login_check': 'https://torrentshack.me/inbox.php', 'detail': 'https://torrentshack.me/torrent/%s', 'search': 'https://torrentshack.me/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', 'download': 'https://torrentshack.me/%s', } http_time_between_calls = 1 # Seconds login_fail_msg = 'You entered an invalid' def _search(self, media, quality, results): url = self.urls['search'] % self.buildUrl(media, quality) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs = {'id': 'torrent_table'}) if not result_table: return entries = result_table.find_all('tr', attrs = {'class': 'torrent'}) for result in entries: link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent url = result.find('td', attrs = {'class': 'torrent_td'}).find('a') size = result.find('td', attrs = {'class': 'size'}).contents[0].strip('\n ') tds = result.find_all('td') results.append({ 'id': link['href'].replace('torrents.php?torrentid=', ''), 'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}), 'url': self.urls['download'] % url['href'], 'detail_url': self.urls['download'] % link['href'], 'size': self.parseSize(size), 'seeders': tryInt(tds[len(tds)-2].string), 'leechers': tryInt(tds[len(tds)-1].string), }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'keeplogged': '1', 'login': 'Login', } def loginSuccess(self, output): return 'logout.php' in output.lower() loginCheckSuccess = loginSuccess def getSceneOnly(self): return '1' if self.conf('scene_only') else '' config = [{ 'name': 'torrentshack', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'TorrentShack', 'description': '<a href="https://torrentshack.me/" target="_blank">TorrentShack</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'scene_only', 'type': 'bool', 'default': False, 'description': 'Only allow scene releases.' }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,256
Python
.py
113
31.867257
643
0.521968
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,807
morethantv.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/morethantv.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider import six log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://www.morethan.tv/', 'login': 'https://www.morethan.tv/login.php', 'login_check': 'https://www.morethan.tv/inbox.php', 'detail': 'https://www.morethan.tv/torrents.php?torrentid=%s', 'search': 'https://www.morethan.tv/torrents.php?%s&filter_cat%%5B1%%5D=1&action=advanced&searchstr=%s', 'download': 'https://www.morethan.tv/%s', } http_time_between_calls = 1 # Seconds login_fail_msg = 'You entered an invalid password.' def _searchOnTitle(self, title, movie, quality, results): movieTitle = tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])) url = self.urls['search'] % (self.getSceneOnly(), movieTitle) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs = {'id': 'torrent_table'}) if not result_table: return entries = result_table.find_all('tr', attrs = {'class': 'torrent'}) for result in entries: link = result.find('a', attrs = {'dir': 'ltr'}) url = result.find('span', attrs = {'title': 'Download'}).parent tds = result.find_all('td') size = tds[5].contents[0].strip('\n ') results.append({ 'id': link['href'].replace('torrents.php?id=', '').split('&')[0], 'name': link.contents[0], 'url': self.urls['download'] % url['href'], 'detail_url': self.urls['download'] % link['href'], 'size': self.parseSize(size), 'seeders': tryInt(tds[len(tds)-2].string), 'leechers': tryInt(tds[len(tds)-1].string), }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'login': 'Log in', } def loginSuccess(self, output): return 'logout.php' in output.lower() loginCheckSuccess = loginSuccess def getSceneOnly(self): return 'releasetype=24' if self.conf('scene_only') else '' config = [{ 'name': 'morethantv', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'MoreThanTV', 'description': '<a href="http://morethan.tv/" target="_blank">MoreThanTV</a>', 'wizard': True, 'icon': 'AAABAAEAEBAAAAEAIABoBAAAFgAAACgAAAAQAAAAIAAAAAEAIAAAAAAAQAQAABMLAAATCwAAAAAAAAAAAAAiHaEEIh2hYCIdoaEiHaGaIh2hmCIdoZgiHaGYIh2hmCIdoZgiHaGYIh2hlyIdoZUiHaHAIh2htiIdoUEAAAAAIh2hJyIdoW0iHaFsIh2hbCIdoWsiHaFrIh2hayIdoWsiHaFrIh2hayIdoWoiHaFbIh2hsyIdof8iHaH7Ih2hQSIdoQciHaEDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiHaG8Ih2h/yIdoZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIh2hoSIdof8iHaGeAAAAAAAAAAAAAAAAIh2hIiIdoZkiHaGZIh2hIiIdoSIiHaGZIh2hiAAAAAAAAAAAAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAAAAAACIdoaoiHaH/Ih2h/yIdoUQiHaF3Ih2h/yIdof8iHaFEAAAAAAAAAAAiHaGiIh2h/yIdoZ4AAAAAAAAAAAAAAAAiHaG7Ih2h/yIdoREAAAAAIh2h7iIdof8iHaH/Ih2hqgAAAAAAAAAAIh2hoiIdof8iHaGeAAAAAAAAAAAAAAAAIh2huyIdof8AAAAAIh2hVSIdof8iHaGZIh2hzCIdof8iHaERAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAIh2hZiIdod0iHaH/Ih2hmSIdobsiHaH/Ih2hVSIdoXciHaH/Ih2hdwAAAAAiHaGhIh2h/yIdoZ4AAAAAAAAAACIdoZkiHaH/Ih2h/yIdof8iHaH/Ih2h7gAAAAAiHaEzIh2h/yIdobsAAAAAIh2hoSIdof8iHaGeAAAAAAAAAAAAAAAAIh2huyIdof8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACIdoaEiHaH/Ih2hngAAAAAAAAAAAAAAACIdobsiHaH/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiHaGhIh2h/yIdoZ4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIh2hoSIdof8iHaGeIh2hCyIdoQYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACIdocUiHaH/Ih2hlSIdoSMiHaFwIh2hfSIdoXEiHaF3Ih2heiIdoXkiHaF5Ih2heSIdoXoiHaFzIh2hYiIdocIiHaH/Ih2h5yIdoS4AAAAAIh2hLyIdoXoiHaGMIh2hcyIdoXMiHaFzIh2hcyIdoXMiHaFyIh2heSIdoY0iHaFsIh2hSSIdoQoAAAAAAAEgNgAAb2Q/+CA1//hTdOA4cGngGCA54hhHZeQIaW7ACG50wIgAUOf4Q0Xn+E9S//hFVj/4PTYAAFJPgAFTUw==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'scene_only', 'type': 'bool', 'default': False, 'description': 'Only allow scene releases.' }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
6,235
Python
.py
114
40.210526
1,559
0.594098
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,808
sceneaccess.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/sceneaccess.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://www.sceneaccess.eu/', 'login': 'https://www.sceneaccess.eu/login', 'login_check': 'https://www.sceneaccess.eu/inbox', 'detail': 'https://www.sceneaccess.eu/details?id=%s', 'search': 'https://www.sceneaccess.eu/browse?c%d=%d', 'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d', 'download': 'https://www.sceneaccess.eu/%s', } http_time_between_calls = 1 # Seconds login_fail_msg = 'Username or password incorrect' def _searchOnTitle(self, title, media, quality, results): url = self.buildUrl(title, media, quality) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: resultsTable = html.find('table', attrs = {'id': 'torrents-table'}) if resultsTable is None: return entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'}) for result in entries: link = result.find('td', attrs = {'class': 'ttr_name'}).find('a') url = result.find('td', attrs = {'class': 'td_dl'}).find('a') seeders = result.find('td', attrs = {'class': 'ttr_seeders'}).find('a') leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a') torrent_id = link['href'].replace('details?id=', '') results.append({ 'id': torrent_id, 'name': link['title'], 'url': self.urls['download'] % url['href'], 'detail_url': self.urls['detail'] % torrent_id, 'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]), 'seeders': tryInt(seeders.string) if seeders else 0, 'leechers': tryInt(leechers.string) if leechers else 0, 'get_more_info': self.getMoreInfo, }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def getMoreInfo(self, item): full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) html = BeautifulSoup(full_description) nfo_pre = html.find('div', attrs = {'id': 'details_table'}) description = toUnicode(nfo_pre.text) if nfo_pre else '' item['description'] = description return item # Login def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'submit': 'come on in', } def loginSuccess(self, output): return '/inbox' in output.lower() loginCheckSuccess = loginSuccess config = [{ 'name': 'sceneaccess', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'SceneAccess', 'description': '<a href="https://sceneaccess.eu/" target="_blank">SceneAccess</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,825
Python
.py
115
36.486957
911
0.559951
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,809
rarbg.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/rarbg.py
import re import traceback import random from datetime import datetime from couchpotato import fireEvent from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider log = CPLog(__name__) class Base(TorrentMagnetProvider): urls = { 'test': 'https://torrentapi.org/pubapi_v2.php?app_id=couchpotato', 'token': 'https://torrentapi.org/pubapi_v2.php?get_token=get_token&app_id=couchpotato', 'search': 'https://torrentapi.org/pubapi_v2.php?token=%s&mode=search&search_imdb=%s&min_seeders=%s&min_leechers' '=%s&ranked=%s&category=movies&format=json_extended&app_id=couchpotato', } http_time_between_calls = 2 # Seconds _token = 0 def _search(self, movie, quality, results): hasresults = 0 curryear = datetime.now().year movieid = getIdentifier(movie) try: movieyear = movie['info']['year'] except: log.error('RARBG: Couldn\'t get movie year') movieyear = 0 self.getToken() if (self._token != 0) and (movieyear == 0 or movieyear <= curryear): data = self.getJsonData(self.urls['search'] % (self._token, movieid, self.conf('min_seeders'), self.conf('min_leechers'), self.conf('ranked_only')), headers = self.getRequestHeaders()) if data: if 'error_code' in data: if data['error'] == 'No results found': log.debug('RARBG: No results returned from Rarbg') else: if data['error_code'] == 10: log.error(data['error'], movieid) else: log.error('RARBG: There is an error in the returned JSON: %s', data['error']) else: hasresults = 1 try: if hasresults: for result in data['torrent_results']: name = result['title'] titlesplit = re.split('-', name) releasegroup = titlesplit[len(titlesplit)-1] xtrainfo = self.find_info(name) encoding = xtrainfo[0] resolution = xtrainfo[1] # source = xtrainfo[2] pubdate = result['pubdate'] # .strip(' +0000') try: pubdate = datetime.strptime(pubdate, '%Y-%m-%d %H:%M:%S +0000') now = datetime.utcnow() age = (now - pubdate).days except ValueError: log.debug('RARBG: Bad pubdate') age = 0 torrentscore = self.conf('extra_score') seeders = tryInt(result['seeders']) torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders) if seeders == 0: torrentscore = 0 sliceyear = result['pubdate'][0:4] year = tryInt(sliceyear) results.append({ 'id': random.randint(100, 9999), 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)), 'url': result['download'], 'detail_url': result['info_page'], 'size': tryInt(result['size']/1048576), # rarbg sends in bytes 'seeders': tryInt(result['seeders']), 'leechers': tryInt(result['leechers']), 'age': tryInt(age), 'score': torrentscore }) except RuntimeError: log.error('RARBG: Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def getToken(self): tokendata = self.getJsonData(self.urls['token'], cache_timeout = 900, headers = self.getRequestHeaders()) if tokendata: try: token = tokendata['token'] if self._token != token: log.debug('RARBG: GOT TOKEN: %s', token) self._token = token except: log.error('RARBG: Failed getting token from Rarbg: %s', traceback.format_exc()) self._token = 0 def getRequestHeaders(self): return { 'User-Agent': fireEvent('app.version', single = True) } @staticmethod def find_info(filename): # CODEC # codec = 'x264' v = re.search('(?i)(x265|h265|h\.265)', filename) if v: codec = 'x265' v = re.search('(?i)(xvid)', filename) if v: codec = 'xvid' # RESOLUTION # resolution = 'SD' a = re.search('(?i)(720p)', filename) if a: resolution = '720p' a = re.search('(?i)(1080p)', filename) if a: resolution = '1080p' a = re.search('(?i)(2160p)', filename) if a: resolution = '2160p' # SOURCE # source = 'HD-Rip' s = re.search('(?i)(WEB-DL|WEB_DL|WEB\.DL)', filename) if s: source = 'WEB-DL' s = re.search('(?i)(WEBRIP)', filename) if s: source = 'WEBRIP' s = re.search('(?i)(DVDR|DVDRip|DVD-Rip)', filename) if s: source = 'DVD-R' s = re.search('(?i)(BRRIP|BDRIP|BluRay)', filename) if s: source = 'BR-Rip' s = re.search('(?i)BluRay(.*)REMUX', filename) if s: source = 'BluRay-Remux' s = re.search('(?i)BluRay(.*)\.(AVC|VC-1)\.', filename) if s: source = 'BluRay-Full' return_info = [codec, resolution, source] return return_info config = [{ 'name': 'rarbg', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'RARBG', 'wizard': True, 'description': '<a href="https://rarbg.to/torrents.php" target="_blank">RARBG</a>', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAB+UlEQVQ4jYXTP2hcRxDH8c8JJZjbYNy8V7gIr0qhg5AiFnETX' '+PmVAtSmKDaUhUiFyGxjXFlp0hhHy5cqFd9lSGcU55cBU6EEMIj5dsmMewSjNGmOJ3852wysMyww37n94OdXimlh49xDR/hxGr' '8hZ/xx0qnlHK5lPKk/H/8U0r5oZTyQSmltzzr+AKfT+ed8UFLeHNAH1UVbA2r88NBfQcX8O2yv74sUqKNWT+T01sy2+zpUbS/w' '/awvo7H+O0NQEA/LPKlQWXrSgUmR9HxcZQwmbZGw/pc4MsVAIT+IjcNw80aTjaaem1vPCNlGakj1C6uWFiqeDtyTvoyqAKhBn+' '+E7CkxC6Zzjop57XpUSenpIuMhpXAc/zyHkAicRSjw6fHZ1ewPdqwszWAB2hXACln8+NWSlld9zX9YN7GhajQXz5+joPXR66de' 'U1J27Zi7FzaqE0OdmwNGzF2Ymzt3j+E8/gJH64AFlozKS4+Be7tjwyaIKVsOpnavX0II9x8ByDLKco5SwvjL0MI/z64tyOcwsf' 'jQw8PJvAdvsb6GSBlxI7UyTnD37i7OWhe3NrflvOit3djbDKdwR181SulXMXdrkubbdvKaOpK09S/4jP8iG9m8zmJjCoEg0HzO' '77vna7zp7ju1TqfYIyZxT7dwCd4eWr7BR7h2X8S6gShJlbKYQAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'ranked_only', 'advanced': True, 'label': 'Ranked Only', 'type': 'int', 'default': 1, 'description': 'Only ranked torrents (internal), scene releases, rarbg releases. ' 'Enter 1 (true) or 0 (false)', }, { 'name': 'min_seeders', 'advanced': True, 'label': 'Minimum Seeders', 'type': 'int', 'default': 10, 'description': 'Minium amount of seeders the release must have.', }, { 'name': 'min_leechers', 'advanced': True, 'label': 'Minimum leechers', 'type': 'int', 'default': 0, 'description': 'Minium amount of leechers the release must have.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
9,172
Python
.py
198
29.545455
148
0.481324
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,810
hd4free.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/hd4free.py
import re import json import traceback from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://hd4free.xyz/', 'detail': 'https://hd4free.xyz/details.php?id=%s', 'search': 'https://hd4free.xyz/searchapi.php?apikey=%s&username=%s&imdbid=%s&internal=%s', 'download': 'https://hd4free.xyz/download.php?torrent=%s&torrent_pass=%s', } http_time_between_calls = 1 # Seconds login_fail_msg = 'Your apikey is not valid! Go to HD4Free and reset your apikey.' def _search(self, movie, quality, results): data = self.getJsonData(self.urls['search'] % (self.conf('apikey'), self.conf('username'), getIdentifier(movie), self.conf('internal_only'))) if data: if 'error' in data: if self.login_fail_msg in data['error']: # Check for login failure self.disableAccount() else: log.error('%s returned an error (possible rate limit): %s', (self.getName(), data['error'])) return try: #for result in data[]: for key, result in data.iteritems(): if tryInt(result['total_results']) == 0: return torrentscore = self.conf('extra_score') releasegroup = result['releasegroup'] resolution = result['resolution'] encoding = result['encoding'] freeleech = tryInt(result['freeleech']) seeders = tryInt(result['seeders']) torrent_desc = '/ %s / %s / %s / %s seeders' % (releasegroup, resolution, encoding, seeders) if freeleech > 0 and self.conf('prefer_internal'): torrent_desc += '/ Internal' torrentscore += 200 if seeders == 0: torrentscore = 0 name = result['release_name'] year = tryInt(result['year']) results.append({ 'id': tryInt(result['torrentid']), 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)), 'url': self.urls['download'] % (result['torrentid'], result['torrentpass']), 'detail_url': self.urls['detail'] % result['torrentid'], 'size': tryInt(result['size']), 'seeders': tryInt(result['seeders']), 'leechers': tryInt(result['leechers']), 'age': tryInt(result['age']), 'score': torrentscore }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) config = [{ 'name': 'hd4free', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'HD4Free', 'wizard': True, 'description': '<a href="https://hd4free.xyz" target="_blank">HD4Free</a>', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAABX1BMVEUF6nsH33cJ03EJ1XIJ1nMKzXIKz28Lym4MxGsMxWsMx2wNvmgNv2kNwGkNwWwOuGgOuWYOuWcOumcOu2cOvmgPtWQPtmUPt2UPt2YQr2IQsGIQsGMQsmMQs2QRqmARq2ARrmERrmISpV4SpmASp14SqF8ToFsToFwToVwTo10TpV0UnFoUn1sVllcVmFgWkFUWklYXjVQXjlMXkFUYh1EYilIYi1MZhlEafk0af04agE4agU4beEobeUsbe0wcdUkeaUQebUYfZEMfZ0QgX0AgYEAgYUEhWj4iVz0iWD0jTzkkSzcmQTMmQzQnPTInPjInPzIoNy8oOC8oODAoOTAoOjApMi0pNC4pNS4qLCoqLSsqLisqMCwrJygrKCgrKCkrKSkrKikrKiorKyosIyYsIycsJCcsJScsJigtHyUuGCIuGiMuGyMuHCMuHCQvEyAvFSEvFiEvFyE0ABU0ABY5lYz4AAAA3ElEQVR4AWNIQAMMiYmJCYkIkMCQnpKWkZ4KBGlARlpaLEOor194kI+Pj6+PT0CET0AYg46Alr22NDeHkBinnq6SkitDrolDgYtaapajdpGppoFfGkMhv2GxE0uuPwNfsk6mhHMOQ54isxmbUJKCtWx+tIZQcDpDtqSol7qIMqsRu3dIhJxxFkOBoF2JG5O7lSqjh5S/tkkWQ5SBTbqnfkymv2WGLa95YCSDhZiMvKIwj4GJCpesuDivK0N6VFRUYlRyfHJUchQQJDMkxsfHJcTHAxEIxMVj+BZDAACjwkqhYgsTAAAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', 'description': 'Enter your site username.', }, { 'name': 'apikey', 'default': '', 'label': 'API Key', 'description': 'Enter your site api key. This can be found on <a href="https://hd4free.xyz/usercp.php?action=security" target="_blank">Profile Security</a>', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 0, 'description': 'Will not be (re)moved until this seed ratio is met. HD4Free minimum is 1:1.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 0, 'description': 'Will not be (re)moved until this seed time (in hours) is met. HD4Free minimum is 72 hours.', }, { 'name': 'prefer_internal', 'advanced': True, 'type': 'bool', 'default': 1, 'description': 'Favors internal releases over non-internal releases.', }, { 'name': 'internal_only', 'advanced': True, 'label': 'Internal Only', 'type': 'bool', 'default': False, 'description': 'Only download releases marked as HD4Free internal', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
6,504
Python
.py
125
35.368
870
0.525613
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,811
hdbits.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/hdbits.py
import re import json import traceback from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://hdbits.org/', 'detail': 'https://hdbits.org/details.php?id=%s', 'download': 'https://hdbits.org/download.php?id=%s&passkey=%s', 'api': 'https://hdbits.org/api/torrents' } http_time_between_calls = 1 # Seconds login_fail_msg = 'Invalid authentication credentials' def _post_query(self, **params): post_data = { 'username': self.conf('username'), 'passkey': self.conf('passkey') } post_data.update(params) if self.conf('internal_only'): post_data.update({'origin': [1]}) try: result = self.getJsonData(self.urls['api'], data = json.dumps(post_data)) if result: if result['status'] != 0: if self.login_fail_msg in result['message']: # Check for login failure self.disableAccount() return log.error('Error searching hdbits: %s' % result['message']) else: return result['data'] except: pass return None def _search(self, movie, quality, results): match = re.match(r'tt(\d{7})', getIdentifier(movie)) data = self._post_query(imdb = {'id': match.group(1)}) if data: try: for result in data: results.append({ 'id': result['id'], 'name': result['name'], 'url': self.urls['download'] % (result['id'], self.conf('passkey')), 'detail_url': self.urls['detail'] % result['id'], 'size': tryInt(result['size']) / 1024 / 1024, 'seeders': tryInt(result['seeders']), 'leechers': tryInt(result['leechers']) }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) config = [{ 'name': 'hdbits', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'HDBits', 'wizard': True, 'description': '<a href="http://hdbits.org" target="_blank">HDBits</a>', 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'passkey', 'default': '', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', }, { 'name': 'internal_only', 'advanced': True, 'label': 'Internal Only', 'type': 'bool', 'default': False, 'description': 'Only download releases marked as HDBits internal' } ], }, ], }]
4,890
Python
.py
112
28.741071
627
0.504518
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,812
passthepopcorn.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py
import htmlentitydefs import json import re import time import traceback from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider from dateutil.parser import parse import six log = CPLog(__name__) class Base(TorrentProvider): urls = { 'domain': 'https://passthepopcorn.me', 'detail': 'https://passthepopcorn.me/torrents.php?torrentid=%s', 'torrent': 'https://passthepopcorn.me/torrents.php', 'login': 'https://passthepopcorn.me/ajax.php?action=login', 'login_check': 'https://passthepopcorn.me/ajax.php?action=login', 'search': 'https://passthepopcorn.me/search/%s/0/7/%d' } login_errors = 0 http_time_between_calls = 2 def _search(self, media, quality, results): movie_title = getTitle(media) quality_id = quality['identifier'] params = mergeDicts(self.quality_search_params[quality_id].copy(), { 'order_by': 'relevance', 'order_way': 'descending', 'searchstr': getIdentifier(media) }) url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params)) res = self.getJsonData(url) try: if not 'Movies' in res: return authkey = res['AuthKey'] passkey = res['PassKey'] for ptpmovie in res['Movies']: if not 'Torrents' in ptpmovie: log.debug('Movie %s (%s) has NO torrents', (ptpmovie['Title'], ptpmovie['Year'])) continue log.debug('Movie %s (%s) has %d torrents', (ptpmovie['Title'], ptpmovie['Year'], len(ptpmovie['Torrents']))) for torrent in ptpmovie['Torrents']: torrent_id = tryInt(torrent['Id']) torrentdesc = '' torrentscore = 0 if 'GoldenPopcorn' in torrent and torrent['GoldenPopcorn']: torrentdesc += ' HQ' if self.conf('prefer_golden'): torrentscore += 5000 if 'FreeleechType' in torrent: torrentdesc += ' Freeleech' if self.conf('prefer_freeleech'): torrentscore += 7000 if 'Scene' in torrent and torrent['Scene']: torrentdesc += ' Scene' if self.conf('prefer_scene'): torrentscore += 2000 if self.conf('no_scene'): torrentscore -= 2000 if 'RemasterTitle' in torrent and torrent['RemasterTitle']: torrentdesc += self.htmlToASCII(' %s' % torrent['RemasterTitle']) torrent_name = torrent['ReleaseName'] + ' - %s' % torrentdesc def extra_check(item): return self.torrentMeetsQualitySpec(item, quality_id) results.append({ 'id': torrent_id, 'name': torrent_name, 'Source': torrent['Source'], 'Checked': 'true' if torrent['Checked'] else 'false', 'Resolution': torrent['Resolution'], 'url': '%s?action=download&id=%d&authkey=%s&torrent_pass=%s' % (self.urls['torrent'], torrent_id, authkey, passkey), 'detail_url': self.urls['detail'] % torrent_id, 'date': tryInt(time.mktime(parse(torrent['UploadTime']).timetuple())), 'size': tryInt(torrent['Size']) / (1024 * 1024), 'seeders': tryInt(torrent['Seeders']), 'leechers': tryInt(torrent['Leechers']), 'score': torrentscore, 'extra_check': extra_check, }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def torrentMeetsQualitySpec(self, torrent, quality): if not quality in self.post_search_filters: return True reqs = self.post_search_filters[quality].copy() if self.conf('require_approval'): log.debug('Config: Require staff-approval activated') reqs['Checked'] = ['true'] for field, specs in reqs.items(): matches_one = False seen_one = False if not field in torrent: log.debug('Torrent with ID %s has no field "%s"; cannot apply post-search-filter for quality "%s"', (torrent['id'], field, quality)) continue for spec in specs: if len(spec) > 0 and spec[0] == '!': # a negative rule; if the field matches, return False if torrent[field] == spec[1:]: return False else: # a positive rule; if any of the possible positive values match the field, return True log.debug('Checking if torrents field %s equals %s' % (field, spec)) seen_one = True if torrent[field] == spec: log.debug('Torrent satisfied %s == %s' % (field, spec)) matches_one = True if seen_one and not matches_one: log.debug('Torrent did not satisfy requirements, ignoring') return False return True def htmlToUnicode(self, text): def fixup(m): txt = m.group(0) if txt[:2] == "&#": # character reference try: if txt[:3] == "&#x": return unichr(int(txt[3:-1], 16)) else: return unichr(int(txt[2:-1])) except ValueError: pass else: # named entity try: txt = unichr(htmlentitydefs.name2codepoint[txt[1:-1]]) except KeyError: pass return txt # leave as is return re.sub("&#?\w+;", fixup, six.u('%s') % text) def unicodeToASCII(self, text): import unicodedata return ''.join(c for c in unicodedata.normalize('NFKD', text) if unicodedata.category(c) != 'Mn') def htmlToASCII(self, text): return self.unicodeToASCII(self.htmlToUnicode(text)) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'passkey': self.conf('passkey'), 'keeplogged': '1', 'login': 'Login' } def loginSuccess(self, output): try: if json.loads(output).get('Result', '').lower() == 'ok': self.login_errors = 0 return True except: pass self.login_errors += 1 if self.login_errors >= 3: log.error('Disabling PTP provider after repeated failed logins. ' 'Please check your configuration. Re-enabling without ' 'solving the problem may cause an IP ban. response=%s', output) self.conf('enabled', value=False) self.login_errors = 0 return False loginCheckSuccess = loginSuccess config = [{ 'name': 'passthepopcorn', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'PassThePopcorn', 'description': '<a href="https://passthepopcorn.me" target="_blank">PassThePopcorn.me</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f5' '32+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False }, { 'name': 'domain', 'advanced': True, 'label': 'Proxy server', 'description': 'Domain for requests (HTTPS only!), keep empty to use default (passthepopcorn.me).', }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'passkey', 'default': '', }, { 'name': 'prefer_golden', 'advanced': True, 'type': 'bool', 'label': 'Prefer golden', 'default': 1, 'description': 'Favors Golden Popcorn-releases over all other releases.' }, { 'name': 'prefer_freeleech', 'advanced': True, 'type': 'bool', 'label': 'Prefer Freeleech', 'default': 1, 'description': 'Favors torrents marked as freeleech over all other releases.' }, { 'name': 'prefer_scene', 'advanced': True, 'type': 'bool', 'label': 'Prefer scene', 'default': 0, 'description': 'Favors scene-releases over non-scene releases.' }, { 'name': 'no_scene', 'advanced': True, 'type': 'bool', 'label': 'Reject scene', 'default': 0, 'description': 'Reject scene-releases over non-scene releases.' }, { 'name': 'require_approval', 'advanced': True, 'type': 'bool', 'label': 'Require approval', 'default': 0, 'description': 'Require staff-approval for releases to be accepted.' }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 2, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 96, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', } ], } ] }]
11,598
Python
.py
264
27.594697
148
0.471802
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,813
magnetdl.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/magnetdl.py
import re import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider import six log = CPLog(__name__) class Base(TorrentMagnetProvider): urls = { 'search': 'http://www.magnetdl.com/%s/%s/se/desc/%s/', 'detail': 'http://www.magnetdl.com/%s' } http_time_between_calls = 1 # Seconds def _searchOnTitle(self, title, movie, quality, results): movieTitle = tryUrlencode('%s-%s' % (title.replace(':', '').replace(' ', '-'), movie['info']['year'])) next_page = True current_page = 1 max_page = self.conf('max_pages') while next_page and current_page <= max_page and not self.shuttingDown(): next_page = False url = self.urls['search'] % (movieTitle[:1], movieTitle, current_page) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs = {'class': 'download'}) if not result_table: return entries = result_table.find_all('tr') for result in entries: if result.find('td', attrs = {'class': 'n'}): link = result.find('td', attrs = {'class': 'n'}).find('a') url = result.find('td', attrs = {'class': 'm'}).find('a') tds = result.find_all('td') size = tds[5].contents[0].strip('\n ') age = tds[2].contents[0].strip('\n ') results.append({ 'id': link['href'].split('/')[2], 'name': link['title'], 'url': url['href'], 'detail_url': self.urls['detail'] % link['href'], 'size': self.parseSize(size), 'age' : self.ageToDays(age), 'seeders': tryInt(tds[len(tds)-2].string), 'leechers': tryInt(tds[len(tds)-1].string), }) elif result.find('td', attrs = {'id': 'pages'}): page_td = result.find('td', attrs = {'id': 'pages'}) next_title = 'Downloads | Page %s' % (current_page + 1) if page_td.find('a', attrs = {'title': next_title}): next_page = True except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) current_page += 1 def ageToDays(self, age_str): age = 0 age_str = age_str.replace('&nbsp;', ' ') regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+' matches = re.findall(regex, age_str) for match in matches: nr, size = match mult = 1 if size == 'week': mult = 7 elif size == 'month': mult = 30.5 elif size == 'year': mult = 365 age += tryInt(nr) * mult return tryInt(age) config = [{ 'name': 'magnetdl', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'MagnetDL', 'description': '<a href="http://www.magnetdl.com/" target="_blank">MagnetDL</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAhBJREFUOBGFU89rE0EYfbObpk2qbpqY0ItV1NarFy1CqIeeehF68p6bP1Dx4Mn/QEQ8aDWHnEW8CLZo0ZMopQQtHiyWoqAgCdGNaxP3x8zOOjPJDBUW+2D4vtnvfW/mfcwSDNFoNO6L9MJwu1Sr1S7qmo7/5dTr9aTX66klc920O6ZxMprAGEO73VZbmachjWMEKKXwPE/1yTwNaRwjkFt/i1dRpPqcjWZaP3LNtUhwsrLofHinyEagtLqChfy2alxf3UoVKL14hoXxL+AxR/P5pi9JRiAGAQsH3mWehjghWRaE4NyG5hgBJubOooGAzNOgOEEETkagOUZAKtK9bjDkcELMDSx9UgzE1KdgAQW3LDwGbF2TUeyziW2rOouoEBjACNAErcBnysZY5SB2SoVzQ44KXtFZzE1WVD3oi4MEXxaMAE+s5e6OmIOwcfzsLMQ0rj4oOucfTkxMyZjY1qNjc6dU3fViMQeyLAXMuO8VCidz+0ffz0wC+UNHYJ04ja2Xr9H/6WK8VMT0fBV8cw29b1/x6TsHjaPpS53f28bnShC05jMjB/6EOJMPu7B9D4fnqjhanUV5qgJ/4w36ovlzJ4Efxjcv//Ce/nMDuZG4WyzcHs1Y18v7Ejhj4qEIk4wDv8Sz6fQJQpbcuuZ2bwzYuyzoDzLeEXZAiPy1F8UqC58tofEkQ8jSFdf9KDkafwGzPw7miJh+wQAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'max_pages', 'label': 'Max Pages', 'type': 'int', 'default': 3, 'description': 'Maximum number of pages to scan.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,984
Python
.py
121
32.933884
823
0.502482
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,814
kickasstorrents.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py
import re import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider log = CPLog(__name__) class Base(TorrentMagnetProvider): urls = { 'detail': '%s/%s', 'search': '%s/%s-i%s/', } cat_ids = [ (['cam'], ['cam']), (['telesync'], ['ts', 'tc']), (['screener', 'tvrip'], ['screener']), (['x264', '720p', '1080p', 'blu-ray', 'hdrip'], ['bd50', '1080p', '720p', 'brrip']), (['dvdrip'], ['dvdrip']), (['dvd'], ['dvdr']), ] http_time_between_calls = 1 # Seconds cat_backup_id = None proxy_list = [ 'http://flowtorrent.com', 'http://katcr.to/span', 'http://dx-torrente.com', 'https://kickass.unblocked.vip', 'https://katcr.co', 'https://kat.how', 'https://kickass.cd', 'https://kickass.unlockproject.online', 'https://kickasstorrents.video', 'https://kat.al', 'https://katproxy.al', 'https://kattor.xyz', 'https://kickass.unblocked.video', 'https://kickass.unblocked.rocks', 'https://kickass.immunicity.live', 'https://kickass.immunicity.red', 'https://kickass.immunicity.video', 'https://kickass.bypassed.live', 'https://kickass.bypassed.video', 'https://kickass.bypassed.red', 'https://kickass.unblocked.pw', 'https://katproxy.com' ] def _search(self, media, quality, results): data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', ''))) if data: cat_ids = self.getCatId(quality) table_order = ['name', 'size', None, 'age', 'seeds', 'leechers'] try: html = BeautifulSoup(data) resultdiv = html.find('div', attrs = {'class': 'tabs'}) for result in resultdiv.find_all('div', recursive = False): if result.get('id').lower().strip('tab-') not in cat_ids: continue try: for temp in result.find_all('tr'): if temp['class'] is 'firstr' or not temp.get('id'): continue new = {} nr = 0 for td in temp.find_all('td'): column_name = table_order[nr] if column_name: if column_name == 'name': link = td.find('div', {'class': 'torrentname'}).find_all('a')[2] new['id'] = temp.get('id')[-7:] new['name'] = link.text new['url'] = td.find('a', {'href': re.compile('magnet:*')})['href'] new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:]) new['verified'] = True if td.find('i', {'class': re.compile('verify')}) else False new['score'] = 100 if new['verified'] else 0 elif column_name is 'size': new['size'] = self.parseSize(td.text) elif column_name is 'age': new['age'] = self.ageToDays(td.text) elif column_name is 'seeds': new['seeders'] = tryInt(td.text) elif column_name is 'leechers': new['leechers'] = tryInt(td.text) nr += 1 # Only store verified torrents if self.conf('only_verified') and not new['verified']: continue results.append(new) except: log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc()) except AttributeError: log.debug('No search results found.') def ageToDays(self, age_str): age = 0 age_str = age_str.replace('&nbsp;', ' ') regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+' matches = re.findall(regex, age_str) for match in matches: nr, size = match mult = 1 if size == 'week': mult = 7 elif size == 'month': mult = 30.5 elif size == 'year': mult = 365 age += tryInt(nr) * mult return tryInt(age) def isEnabled(self): return super(Base, self).isEnabled() and self.getDomain() def correctProxy(self, data): return 'search query' in data.lower() config = [{ 'name': 'kickasstorrents', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'KickAssTorrents', 'description': '<a href="https://kat.ph/" target="_blank">KickAssTorrents</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': True, }, { 'name': 'domain', 'advanced': True, 'label': 'Proxy server', 'description': 'Domain for requests, keep empty to let CouchPotato pick.', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'only_verified', 'advanced': True, 'type': 'bool', 'default': False, 'description': 'Only search for verified releases.' }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
7,900
Python
.py
166
30.843373
803
0.483515
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,815
awesomehd.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/awesomehd.py
import re import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://awesome-hd.me/', 'detail': 'https://awesome-hd.me/torrents.php?torrentid=%s', 'search': 'https://awesome-hd.me/searchapi.php?action=imdbsearch&passkey=%s&imdb=%s&internal=%s', 'download': 'https://awesome-hd.me/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s', } http_time_between_calls = 1 login_fail_msg = 'Please check that you provided a valid API Key, username, and action.' def _search(self, movie, quality, results): data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal'))) if data: if self.login_fail_msg in data: self.disableAccount() return try: soup = BeautifulSoup(data) if soup.find('error'): log.info(soup.find('error').get_text()) return authkey = soup.find('authkey').get_text() entries = soup.find_all('torrent') for entry in entries: torrentscore = 0 torrent_id = entry.find('id').get_text() name = entry.find('name').get_text() year = entry.find('year').get_text() releasegroup = entry.find('releasegroup').get_text() resolution = entry.find('resolution').get_text() encoding = entry.find('encoding').get_text() freeleech = entry.find('freeleech').get_text() media = entry.find('media').get_text() audioformat = entry.find('audioformat').get_text() # skip audio channel only releases if resolution == '': continue torrent_desc = '%s.%s.%s.%s-%s' % (resolution, media, audioformat, encoding, releasegroup) if self.conf('prefer_internal') and freeleech in ['0.25', '0.50']: torrentscore += 200 if encoding == 'x264' and self.conf('favor') in ['encode', 'both']: torrentscore += 200 elif re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']: torrentscore += 200 name = re.sub(r'\W', '.', name) name = re.sub(r'\.+', '.', name) results.append({ 'id': torrent_id, 'name': '%s.%s.%s' % (name, year, torrent_desc), 'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')), 'detail_url': self.urls['detail'] % torrent_id, 'size': tryInt(entry.find('size').get_text()) / 1048576, 'seeders': tryInt(entry.find('seeders').get_text()), 'leechers': tryInt(entry.find('leechers').get_text()), 'score': torrentscore }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) config = [{ 'name': 'awesomehd', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'Awesome-HD', 'description': '<a href="https://awesome-hd.net" target="_blank">AHD</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+UlEQVR4AV1SO0y6dxQ9H4g8CoIoohZ5NA0aR2UgkYpNB5uocTSaLlrDblMH09Gt8d90r3YpJkanxjA4GGkbO7RNxSABq8jDGnkpD+UD5NV7Bxvbk9wvv+/3uPece66A/yEWi42FQqHVfD7/cbPZtIEglUpjOp3uZHR0dBvAn3gDIRqNgjE4OKj0+Xzf3NzcfD4wMCCjf5TLZbTbbajVatzf3+Pu7q5uNpt35ufnvwBQAScQRREEldfr9RWLxan+/n5YrVa+jFarhVfQQyQSCU4EhULhX15engEgSrjC0dHRVqlUmjQYDBgaGgKtuTqz4mTgIoVCASaTCX19fajVapOHh4dbFJBks9mxcDi8qtFoJEajkfVyJWi1WkxMTMDhcIAT8x6D7/Dd6+vr1fHx8TGp2+3+iqo5+YCzBwIBToK5ubl/mQwPDyMSibAs2Gw2UHNRrValz8/PDUk8Hv9EqVRCr9fj4uICTNflcqFer+Pg4AB7e3uoVCq8x9Rxfn6O7u5uqFQq8FspZXxHTekggByA3W4Hr9PpNDeRL3I1cMhkMrBrnZ2dyGQyvNYIs7OzVbJNPjIyAraLwYdcjR8wXl5eIJfLwRIFQQDLYkm3t7c1CdGPPT4+cpOImp4PODMeaK+n10As2jBbrHifHOjS6qAguVFimkqlwAMmIQnHV1dX4NDQhVwuhyZTV6pgIktzDzkkk0lEwhEEzs7ASQr5Ai4vL1nuccfCwsLO/v6+p9FoyJhF6ekJro/cPCzIZLNQa7rQoK77/SdgWWpKkCaJ5EB9aWnpe6nH40nRMBnJV4f5gw+FX3/5GX/8/htXRZdOzzqhJWn6nl6YbTZqqhrhULD16fT0d8FgcFtYW1vD5uamfGVl5cd4IjldKhZACdkJvKfWUANrxEaJV4hiGVaL1b+7653hXzwRZQr2X76xsfG1xWIRaZzbNPv/CdrjEL9cX/+WXFBSgEPgzxuwG3Yans9OT0+naBZMIJDNfzudzp8WFxd/APAX3uAf9WOTxOPLdosAAAAASUVORK5CYII=', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'passkey', 'default': '', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'only_internal', 'advanced': True, 'type': 'bool', 'default': 1, 'description': 'Only search for internal releases.' }, { 'name': 'prefer_internal', 'advanced': True, 'type': 'bool', 'default': 1, 'description': 'Favors internal releases over non-internal releases.' }, { 'name': 'favor', 'advanced': True, 'default': 'both', 'type': 'dropdown', 'values': [('Encodes & Remuxes', 'both'), ('Encodes', 'encode'), ('Remuxes', 'remux'), ('None', 'none')], 'description': 'Give extra scoring to encodes or remuxes.' }, { 'name': 'extra_score', 'advanced': True, 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', }, ], }, ], }]
7,064
Python
.py
131
37.022901
1,115
0.531983
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,816
torrentday.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/torrentday.py
import re from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://www.torrentday.com/', 'login': 'https://www.torrentday.com/t', 'login_check': 'https://www.torrentday.com/userdetails.php', 'detail': 'https://www.torrentday.com/details.php?id=%s', 'search': 'https://www.torrentday.com/t.json?q=%s', 'download': 'https://www.torrentday.com/download.php/%s/%s.torrent', } http_time_between_calls = 1 # Seconds def loginDownload(self, url = '', nzb_id = ''): try: if not self.login(): log.error('Failed downloading from %s', self.getName()) return self.urlopen(url, headers=self.getRequestHeaders()) except: log.error('Failed downloading from %s: %s', (self.getName(), traceback.format_exc())) def _searchOnTitle(self, title, media, quality, results): query = '"%s" %s' % (title, media['info']['year']) data = { 'q': query, } data = self.getJsonData(self.urls['search'] % query, headers = self.getRequestHeaders()) for torrent in data: results.append({ 'id': torrent['t'], 'name': torrent['name'], 'url': self.urls['download'] % (torrent['t'], torrent['t']), 'detail_url': self.urls['detail'] % torrent['t'], 'size': tryInt(torrent['size']) / (1024 * 1024), 'seeders': torrent['seeders'], 'leechers': torrent['leechers'], }) def getRequestHeaders(self): return { 'Cookie': self.conf('cookiesetting') or '' } def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'submit.x': 18, 'submit.y': 11, 'submit': 'submit', } def loginSuccess(self, output): often = re.search('You tried too often, please wait .*</div>', output) if often: raise Exception(often.group(0)[:-6].strip()) return 'Password not correct' not in output def loginCheckSuccess(self, output): return 'logout.php' in output.lower() config = [{ 'name': 'torrentday', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'TorrentDay', 'description': '<a href="https://www.torrentday.com/" target="_blank">TorrentDay</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'cookiesetting', 'label': 'Cookies', 'default': '', 'description': 'Cookies', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,409
Python
.py
114
34.04386
1,087
0.56263
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,817
bithdtv.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/bithdtv.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'detail': 'https://www.bit-hdtv.com/details.php?id=%s', 'search': 'https://www.bit-hdtv.com/torrents.php?', 'download': 'https://www.bit-hdtv.com/download.php?id=%s', } # Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken http_time_between_calls = 1 # Seconds login_fail_msg = 'Username or password incorrect.' def _search(self, media, quality, results): query = self.buildUrl(media, quality) url = "%s&%s" % (self.urls['search'], query) data = self.getHTMLData(url, headers = self.getRequestHeaders()) if data: # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML split_data = data.partition('-->') if '## SELECT COUNT(' in split_data[0]: data = split_data[2] html = BeautifulSoup(data, 'html.parser') try: result_tables = html.find_all('table', attrs = {'width': '800', 'class': ''}) if result_tables is None: return # Take first result result_table = result_tables[0] if result_table is None: return entries = result_table.find_all('tr') for result in entries[1:]: cells = result.find_all('td') link = cells[2].find('a') torrent_id = link['href'].split('id=')[1] results.append({ 'id': torrent_id, 'name': link.contents[0].get_text(), 'url': self.urls['download'] % torrent_id, 'detail_url': self.urls['detail'] % torrent_id, 'size': self.parseSize(cells[6].get_text()), 'seeders': tryInt(cells[8].string), 'leechers': tryInt(cells[9].string), 'get_more_info': self.getMoreInfo, }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def getRequestHeaders(self): cookies = 'h_sl={};h_sp={};h_su={}'.format(self.conf('cookiesettingsl') or '', self.conf('cookiesettingsp') or '', self.conf('cookiesettingsu') or '') return { 'Cookie': cookies } def getMoreInfo(self, item): full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) html = BeautifulSoup(full_description) nfo_pre = html.find('table', attrs = {'class': 'detail'}) description = toUnicode(nfo_pre.text) if nfo_pre else '' item['description'] = description return item def download(self, url = '', nzb_id = ''): try: return self.urlopen(url, headers=self.getRequestHeaders()) except: log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc())) return 'try_next' config = [{ 'name': 'bithdtv', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'BiT-HDTV', 'description': '<a href="https://bit-hdtv.com" target="_blank">BiT-HDTV</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'cookiesettingsl', 'label': 'Cookies (h_sl)', 'default': '', 'description': 'Cookie h_sl from session', }, { 'name': 'cookiesettingsp', 'label': 'Cookies (h_sp)', 'default': '', 'description': 'Cookie h_sp from session', }, { 'name': 'cookiesettingsu', 'label': 'Cookies (h_su)', 'default': '', 'description': 'Cookie h_su from session', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
6,086
Python
.py
129
32.488372
531
0.522846
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,818
torrentleech.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/torrentleech.py
import traceback import json from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider import six log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://www.torrentleech.org/', 'login': 'https://www.torrentleech.org/user/account/login/', 'login_check': 'https://torrentleech.org/user/messages', 'detail': 'https://www.torrentleech.org/torrent/%s', 'search': 'https://www.torrentleech.org/torrents/browse/list/categories/%s/query/%s', 'download': 'https://www.torrentleech.org/download/%s/%s', } http_time_between_calls = 1 # Seconds login_fail_msg = 'Invalid Username/password combination!' cat_backup_id = None def _searchOnTitle(self, title, media, quality, results): urlParms = self.buildUrl(title, media, quality) url = self.urls['search'] % (urlParms[1], urlParms[0]) data = self.getHTMLData(url) jsonResults = json.loads(data) if jsonResults: try: for torrent in jsonResults['torrentList']: link = self.urls['detail'] % torrent['fid'] url = self.urls['download'] % (torrent['fid'], torrent['filename']) currentResult = { 'id': torrent['fid'], 'name': six.text_type(torrent['name']), 'url': url, 'detail_url': link, 'size': torrent['size']/1024/1024, 'seeders': torrent['seeders'], 'leechers': torrent['leechers'], } results.append(currentResult) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'remember_me': 'on', 'login': 'submit', } def loginSuccess(self, output): return '/user/account/logout' in output.lower() or 'welcome back' in output.lower() loginCheckSuccess = loginSuccess config = [{ 'name': 'torrentleech', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'TorrentLeech', 'description': '<a href="http://torrentleech.org" target="_blank">TorrentLeech</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
4,828
Python
.py
103
33.07767
823
0.561266
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,819
torrentz.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/torrentz.py
import re import traceback from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider import six log = CPLog(__name__) class Base(TorrentMagnetProvider, RSS): urls = { 'detail': 'https://torrentz2.eu/%s', 'search': 'https://torrentz2.eu/feed?f=%s' } http_time_between_calls = 0 def _searchOnTitle(self, title, media, quality, results): search_url = self.urls['search'] # Create search parameters search_params = self.buildUrl(title, media, quality) min_seeds = tryInt(self.conf('minimal_seeds')) if min_seeds: search_params += ' seed > %s' % (min_seeds - 1) rss_data = self.getRSSData(search_url % search_params) if rss_data: try: for result in rss_data: name = self.getTextElement(result, 'title') detail_url = self.getTextElement(result, 'link') description = self.getTextElement(result, 'description') magnet = splitString(detail_url, '/')[-1] magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce')) reg = re.search('Size: (?P<size>\d+) (?P<unit>[KMG]B) Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description)) size = reg.group('size') unit = reg.group('unit') seeds = reg.group('seeds').replace(',', '') peers = reg.group('peers').replace(',', '') multiplier = 1 if unit == 'GB': multiplier = 1000 elif unit == 'KB': multiplier = 0 results.append({ 'id': magnet, 'name': six.text_type(name), 'url': magnet_url, 'detail_url': detail_url, 'size': tryInt(size)*multiplier, 'seeders': tryInt(seeds), 'leechers': tryInt(peers), }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) config = [{ 'name': 'torrentz', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'Torrentz', 'description': 'Torrentz.eu was a free, fast and powerful meta-search engine combining results from dozens of search engines, Torrentz2.eu is trying to replace it. <a href="https://torrentz2.eu/" target="_blank">Torrentz2</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': True }, { 'name': 'minimal_seeds', 'type': 'int', 'default': 1, 'advanced': True, 'description': 'Only return releases with minimal X seeds', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], } ] }]
4,577
Python
.py
101
29.861386
239
0.492366
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,820
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/__init__.py
config = [{ 'name': 'torrent_providers', 'groups': [ { 'label': 'Torrent Providers', 'description': 'Providers searching torrent sites for new releases', 'wizard': True, 'type': 'list', 'name': 'torrent_providers', 'tab': 'searcher', 'options': [], }, ], }]
370
Python
.py
14
17.428571
80
0.460674
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,821
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/base.py
import time import traceback from couchpotato.core.helpers.variable import getImdb, md5, cleanHost from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import YarrProvider from couchpotato.environment import Env log = CPLog(__name__) class TorrentProvider(YarrProvider): protocol = 'torrent' proxy_domain = None proxy_list = [] def imdbMatch(self, url, imdbId): if getImdb(url) == imdbId: return True if url[:4] == 'http': try: cache_key = md5(url) data = self.getCache(cache_key, url) except IOError: log.error('Failed to open %s.', url) return False return getImdb(data) == imdbId return False def getDomain(self, url = ''): forced_domain = self.conf('domain') if forced_domain: return cleanHost(forced_domain).rstrip('/') + url if not self.proxy_domain: for proxy in self.proxy_list: prop_name = 'proxy.%s' % proxy last_check = float(Env.prop(prop_name, default = 0)) if last_check > time.time() - 86400: continue data = '' try: data = self.urlopen(proxy, timeout = 3, show_error = False) except: log.debug('Failed %s proxy %s: %s', (self.getName(), proxy, traceback.format_exc())) if self.correctProxy(data): log.debug('Using proxy for %s: %s', (self.getName(), proxy)) self.proxy_domain = proxy break Env.prop(prop_name, time.time()) if not self.proxy_domain: log.error('No %s proxies left, please add one in settings, or let us know which one to add on the forum.', self.getName()) return None return cleanHost(self.proxy_domain).rstrip('/') + url def correctProxy(self, data): return True class TorrentMagnetProvider(TorrentProvider): protocol = 'torrent_magnet' download = None
2,162
Python
.py
52
30.230769
134
0.574376
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,822
scenetime.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/scenetime.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://www.scenetime.com/', 'login': 'https://www.scenetime.com/takelogin.php', 'login_check': 'https://www.scenetime.com/inbox.php', 'detail': 'https://www.scenetime.com/details.php?id=%s', 'search': 'https://www.scenetime.com/browse.php?search=%s&cat=%d', 'download': 'https://www.scenetime.com/download.php/%s/%s', } cat_ids = [ ([59], ['720p', '1080p']), ([81], ['brrip']), ([102], ['bd50']), ([3], ['dvdrip']), ] http_time_between_calls = 1 # Seconds login_fail_msg = 'Username or password incorrect' cat_backup_id = None def _searchOnTitle(self, title, movie, quality, results): url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0]) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: result_table = html.find(attrs = {'id': 'torrenttable'}) if not result_table: log.error('failed to generate result_table') return entries = result_table.find_all('tr') for result in entries[1:]: cells = result.find_all('td') link = result.find('a', attrs = {'class': 'index'}) torrent_id = link['href'].replace('download.php/','').split('/')[0] torrent_file = link['href'].replace('download.php/','').split('/')[1] size = self.parseSize(cells[5].contents[0] + cells[5].contents[2]) name_row = cells[1].contents[0] name = name_row.getText() seeders_row = cells[6].contents[0] seeders = seeders_row.getText() results.append({ 'id': torrent_id, 'name': name, 'url': self.urls['download'] % (torrent_id,torrent_file), 'detail_url': self.urls['detail'] % torrent_id, 'size': size, 'seeders': seeders, }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'login': 'submit', 'username': self.conf('username'), 'password': self.conf('password'), } def loginSuccess(self, output): return 'logout.php' in output.lower() or 'Welcome' in output.lower() loginCheckSuccess = loginSuccess config = [{ 'name': 'scenetime', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'SceneTime', 'description': '<a href="https://www.scenetime.com" target="_blank">SceneTime</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAAYdEVYdFNvZnR3YXJlAHBhaW50Lm5ldCA0LjAuNWWFMmUAAAIwSURBVDhPZZFbSBRRGMePs7Mzjma7+9AWWxpeYrXLkrcIfUwIpIeK3tO1hWhfltKwhyJMFIqgCz2EpdHWRun2oGG02O2hlYyypY21CygrlbhRIYHizO6/mdk5szPtB785hzm//zeXj7Q89q4I4QaQBx6ZHQY84Efq4Rrbg4rxVmx61AJ2pFY/twzvhP1hU4ZwIQ8K7mw1wdzdhrrxQ7g8E0Q09R6flubw+mcM7tHWPJcwt91ghuTQUDWYW8rejbrRA3i1OA0xLYGWJO8bxw6q50YIc70CRoQbNbj2MQgpkwsrpTYI7ze5CoS5UgYjpTd3YWphWg1l1CuwLC4jufQNtaG9JleBWM67YKR6oBlzf+bVoPIOUiaNwVgIzcF9sF3aknMvZFfCnnNCp9eJqqsNSKQ+qw2USssNzrzoh9Dnynmaq6yEPe2AkfX9lXjy5akWz9ZkcgqVFz0mj0KsJ0tgROh2oCfSJ3/3ihaHPA0Rh+/7UNhtN7kKhAsI+J+a3u2If49r8WxFZiawtsuR5xLumBUU3s/B2bkOm0+V4V3yrTwFOgcg8SMBe8CmuxTC+SygFB3l8TzxDLOpWYiSqEWzFf0ahc2/RncphPcSUIqPWPFhPqZFcrUqraLzXkA+Z3WXQvh2eaNR3MHmNVB+YPjNMMqPb9Q9I6YGRR0WTMQj6hOV+f/++wuDLwfg7iqH4GVMQQrh28w3Nvgd2H22Hk09jag6UYoSH4/C9gKTo9NG8A8MPUM4DJp74gAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 20, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,679
Python
.py
116
34.62069
963
0.549819
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,823
alpharatio.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/alpharatio.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider import six log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://alpharatio.cc/', 'login': 'https://alpharatio.cc/login.php', 'login_check': 'https://alpharatio.cc/inbox.php', 'detail': 'https://alpharatio.cc/torrents.php?torrentid=%s', 'search': 'https://alpharatio.cc/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', 'download': 'https://alpharatio.cc/%s', } http_time_between_calls = 1 # Seconds login_fail_msg = '</span> attempts remaining.' def _search(self, media, quality, results): url = self.urls['search'] % self.buildUrl(media, quality) cleaned_url = url.replace('%3A', '') data = self.getHTMLData(cleaned_url) if data: html = BeautifulSoup(data) try: result_table = html.find('table', attrs = {'id': 'torrent_table'}) if not result_table: return entries = result_table.find_all('tr', attrs = {'class': 'torrent'}) for result in entries: link = result.find('a', attrs = {'dir': 'ltr'}) url = result.find('a', attrs = {'title': 'Download'}) tds = result.find_all('td') results.append({ 'id': link['href'].replace('torrents.php?id=', '').split('&')[0], 'name': link.contents[0], 'url': self.urls['download'] % url['href'], 'detail_url': self.urls['download'] % link['href'], 'size': self.parseSize(tds[len(tds)-4].string), 'seeders': tryInt(tds[len(tds)-2].string), 'leechers': tryInt(tds[len(tds)-1].string), }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) def getLoginParams(self): return { 'username': self.conf('username'), 'password': self.conf('password'), 'keeplogged': '1', 'login': 'Login', } def loginSuccess(self, output): return 'logout.php' in output.lower() loginCheckSuccess = loginSuccess def getSceneOnly(self): return '1' if self.conf('scene_only') else '' config = [{ 'name': 'alpharatio', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'AlphaRatio', 'description': '<a href="http://alpharatio.cc/" target="_blank">AlphaRatio</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACX0lEQVQ4jbWTX0hTURzHv+fu3umdV9GtOZ3pcllGBomJ9RCmkiWIEJUQET2EMqF86aFeegqLHgoio1ICScoieugPiBlFFmpROUjNIub+NKeba2rqvdvuPKeXDIcsgugHB378fj8+X37fcw5hjOFfgvtTc8o7mdveHWv0+YJ5iWb45SQWi2kc7olCnteoHCGUMqbpejBkO99rPDlW5rjV3FjZkmXU+3SiKK8EkOUVxj2+9bZOe8ebhZxSRTCIQmAES1oLQADKp4EIc8gRFr3t+/SNe0oLelatYM0zO56dqS3fmh4eXkoxIrWvAwXegLta8bymYyak9lyGR7d57eHHtOt7aNaQ0AORU8OEqlg0HURTnXi96cCaK0AYEW0l+MAoQoIp48PHke0JAYwyBkYhameUQ3vz7lTt3NRdKH0ajxgqQMJzAMdBkRVdYgAAEA71G2Z6MnOyvSmSJB/bFblN5DHEsosghf3zZduK+1fdQhyEcKitr+r0B2dMAyPOcmd02oxiC2jUjJaSwbPZpoLJhAA1Ci3hGURRlO0Of8nN9/MNUUXSkrQsFQ4meNORG6/G2O/jGXdZ044OKzg3z3r77TUre81tL1pxirLMWnsoMB00LtfjPLh67/OJH3xRMgiHb96JOCVbxbobRONBQNqScffJ6JE4E2VZFvv6BirbXpkboGcA4eGaDOV73G4LAFBKSWRhNsmqfnHCosG159Lxt++GdgC/XuLD3sH60/fdFxjJBNMDAAVZ8CNfVJxPLzbs/uqa2Lj/0stHkWSDFlwS4FIhRKei3a3VNeS//sa/iZ/B6hMIr7Fq4QAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'scene_only', 'type': 'bool', 'default': False, 'description': 'Only allow scene releases.' }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
5,411
Python
.py
113
33.353982
911
0.540838
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,824
iptorrents.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/torrent/iptorrents.py
import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.base import TorrentProvider import six log = CPLog(__name__) class Base(TorrentProvider): urls = { 'test': 'https://iptorrents.com/', 'base_url': 'https://iptorrents.com', 'search': 'https://iptorrents.com/t?%s%%s&q=%s&qf=ti#torrents&p=%%d', } http_time_between_calls = 1 # Seconds login_fail_msg = 'Invalid username and cookie combination' cat_backup_id = None def buildUrl(self, title, media, quality): return self._buildUrl(title.replace(':', ''), quality) def _buildUrl(self, query, quality): cat_ids = self.getCatId(quality) if not cat_ids: log.warning('Unable to find category ids for identifier "%s"', quality.get('identifier')) return None query = query.replace('"', '') return self.urls['search'] % ("&".join(("%d=" % x) for x in cat_ids), tryUrlencode(query).replace('%', '%%')) def _searchOnTitle(self, title, media, quality, results): freeleech = '' if not self.conf('freeleech') else '&free=on' base_url = self.buildUrl(title, media, quality) if not base_url: return pages = 1 current_page = 1 while current_page <= pages and not self.shuttingDown(): data = self.getHTMLData(base_url % (freeleech, current_page), headers = self.getRequestHeaders()) if data: html = BeautifulSoup(data) try: page_nav = html.find('span', attrs = {'class': 'page_nav'}) if page_nav: next_link = page_nav.find("a", text = "Next") if next_link: final_page_link = next_link.previous_sibling.previous_sibling pages = int(final_page_link.string) result_table = html.find('table', id="torrents") if not result_table or 'nothing found!' in data.lower(): return entries = result_table.find_all('tr') for result in entries[1:]: torrent = result.find_all('td') if len(torrent) <= 1: break torrent = torrent[1].find('a') torrent_id = torrent['href'].replace('/details.php?id=', '') torrent_name = six.text_type(torrent.string) torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.') torrent_details_url = self.urls['base_url'] + torrent['href'] torrent_size = self.parseSize(result.find_all('td')[5].string) torrent_seeders = tryInt(result.find('td', attrs = {'class': 'ac t_seeders'}).string) torrent_leechers = tryInt(result.find('td', attrs = {'class': 'ac t_leechers'}).string) results.append({ 'id': torrent_id, 'name': torrent_name, 'url': torrent_download_url, 'detail_url': torrent_details_url, 'size': torrent_size, 'seeders': torrent_seeders, 'leechers': torrent_leechers, }) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) break current_page += 1 def getRequestHeaders(self): return { 'Cookie': self.conf('cookiesetting') or '' } def download(self, url = '', nzb_id = ''): try: return self.urlopen(url, headers=self.getRequestHeaders()) except: log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc())) return 'try_next' config = [{ 'name': 'iptorrents', 'groups': [ { 'tab': 'searcher', 'list': 'torrent_providers', 'name': 'IPTorrents', 'description': '<a href="https://iptorrents.com" target="_blank">IPTorrents</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': False, }, { 'name': 'username', 'default': '', }, { 'name': 'freeleech', 'default': 0, 'type': 'bool', 'description': 'Only search for [FreeLeech] torrents.', }, { 'name': 'seed_ratio', 'label': 'Seed ratio', 'type': 'float', 'default': 1, 'description': 'Will not be (re)moved until this seed ratio is met.', }, { 'name': 'seed_time', 'label': 'Seed time', 'type': 'int', 'default': 40, 'description': 'Will not be (re)moved until this seed time (in hours) is met.', }, { 'name': 'cookiesetting', 'label': 'Cookies', 'default': 'uid=1234;pass=567845439634987', 'description': 'Use DevTools or Firebug to get these values after logging in on your browser', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
6,867
Python
.py
140
32.878571
535
0.512096
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,825
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/automation/base.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import Provider log = CPLog(__name__) class AutomationBase(Provider): pass
173
Python
.py
5
32.2
64
0.812121
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,826
omgwtfnzbs.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py
from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.nzb.base import NZBProvider log = CPLog(__name__) class Base(NZBProvider, RSS): urls = { 'search': 'https://api.omgwtfnzbs.me/json/?%s', } http_time_between_calls = 1 # Seconds cat_ids = [ ([15], ['dvdrip', 'scr', 'r5', 'tc', 'ts', 'cam']), ([15, 16], ['brrip']), ([16], ['720p', '1080p', 'bd50']), ([17], ['dvdr']), ] cat_backup_id = 'movie' def _searchOnTitle(self, title, movie, quality, results): q = '%s %s' % (title, movie['info']['year']) params = tryUrlencode({ 'search': q, 'catid': ','.join([str(x) for x in self.getCatId(quality)]), 'user': self.conf('username', default = ''), 'api': self.conf('api_key', default = ''), }) if len(self.conf('custom_tag')) > 0: params = '%s&%s' % (params, self.conf('custom_tag')) nzbs = self.getJsonData(self.urls['search'] % params) if isinstance(nzbs, list): for nzb in nzbs: results.append({ 'id': nzb.get('nzbid'), 'name': toUnicode(nzb.get('release')), 'age': self.calculateAge(tryInt(nzb.get('usenetage'))), 'size': tryInt(nzb.get('sizebytes')) / 1024 / 1024, 'url': nzb.get('getnzb'), 'detail_url': nzb.get('details'), 'description': nzb.get('weblink') }) config = [{ 'name': 'omgwtfnzbs', 'groups': [ { 'tab': 'searcher', 'list': 'nzb_providers', 'name': 'OMGWTFNZBs', 'description': 'See <a href="https://omgwtfnzbs.me/" target="_blank">OMGWTFNZBs</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAIAAADAAbR1AAADbElEQVR4AZ2UW0ybZRiAy/OvdHaLYvB0YTRIFi7GkM44zRLmIfNixkWdiRMyYoxRE8/TC7MYvXCGEBmr3mxLwVMwY0wYA7e6Wso4lB6h/U9taSlMGIfBXLYlJMyo0S///2dJI5lxN8/F2/f9nu9737e/jYmXr6KTbN9BGG9HE/NotQ76UWziNzrXFiETk/5ARUNH+7+0kW7fSgTl0VKGOLZzidOkmuuIo7q2oTArNLPIzhdIkqXkerFOm2CaD/5bcKrjIL2c3fkhPxOq93Kcb91v46fV9TQKF4TgV/TbUsQtzfCaK6jMOd5DJrguSIIhexmqqVxN0FXbRR8/ND/LYTTj6J7nl2gnL47OkDW4KJhnQHCa6JpKVNJGA3OC58nwBJoZ//ebbIyKpBxjrr0o1q1FMRkrKXZnHWF85VvxMrJxibwhGyd0f5bLnKzqJs1k0Sfo+EU8hdAUvkbcwKEgs2D0OiV4jmmD1zb+Tp6er0JMMvDxPo5xev9zTBF683NS+N56n1YiB95B5crr93KRuKhKI0tb0Kw2mgLLqTjLEWO8424i9IvURaYeOckwf3+/yCC9e3bQQ/MuD+Monk0k+XFXMUfx7z5EEP+XlXi5tLlMxH8zLppw7idJrugcus30kC86gc7UrQqjLIukM8zWHOACeU+TiMxXN6ExVOkgz4lvPEzice1GIVhxhG4CrZvpl6TH55giKWqXGLy9hZh5aUtgDSew/msSyCKpl+DDNfxJc8NBIsxUxUnz14O/oONu+IIIvso9TLBQ1SY5rUhuSzUhAqJ2mRXBLDOCeUtgUZXsaObT8BffhUJPqWgiV+3zKKzYH0ClvTRLhD77HIqVkyh5jThnivehoG+qJctIRSPn6bxvO4FCgTl9c1DmbpjLajbQFE8aW5SU3rg+zOPGUjTUF9NFpLEbH2c/KmGYlY69/GQJVtGMSUcEp9eCbB1nctbxHTLRdTUkGDf+B02uGWRG3OvpJ/zSMwzif+oxVBID3cQKBavLCiPmB2PM2UuSCUPgrX4VDb97AwEG67bh4+KTOlncvu3M31BwA5rLHbCfEjwkNDky9e/SSbSxnD46Pg0RJtpXRvhmBSZHpRjWtKwFybjuQeXaKxto4WjLZZZvVmC17pZLJFkwxm5++PS2Mrwc7nyIMYZe/IzoP5d6QgEybqTXAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', }, { 'name': 'username', 'default': '', }, { 'name': 'api_key', 'label': 'Api Key', 'default': '', }, { 'name': 'custom_tag', 'advanced': True, 'label': 'Custom tag', 'default': '', 'description': 'Add custom parameters, for example add catid=18 to get foreign (non-english) movies', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'default': 20, 'type': 'int', 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
4,434
Python
.py
84
39.559524
1,267
0.59926
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,827
nzbclub.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/nzb/nzbclub.py
import time from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.nzb.base import NZBProvider from dateutil.parser import parse log = CPLog(__name__) class Base(NZBProvider, RSS): urls = { 'search': 'https://www.nzbclub.com/nzbrss.aspx?%s', } http_time_between_calls = 4 # seconds def _search(self, media, quality, results): nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media)) for nzb in nzbs: nzbclub_id = tryInt(self.getTextElement(nzb, "link").split('/nzb_view/')[1].split('/')[0]) enclosure = self.getElement(nzb, "enclosure").attrib size = enclosure['length'] date = self.getTextElement(nzb, "pubDate") def extra_check(item): full_description = self.getCache('nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout = 25920000) for ignored in ['ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible']: if ignored in full_description: log.info('Wrong: Seems to be passworded or corrupted files: %s', item['name']) return False return True results.append({ 'id': nzbclub_id, 'name': toUnicode(self.getTextElement(nzb, "title")), 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': tryInt(size) / 1024 / 1024, 'url': enclosure['url'].replace(' ', '_'), 'detail_url': self.getTextElement(nzb, "link"), 'get_more_info': self.getMoreInfo, 'extra_check': extra_check }) def getMoreInfo(self, item): full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) html = BeautifulSoup(full_description) nfo_pre = html.find('pre', attrs = {'class': 'nfo'}) description = toUnicode(nfo_pre.text) if nfo_pre else '' item['description'] = description return item def extraCheck(self, item): full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) if 'ARCHIVE inside ARCHIVE' in full_description: log.info('Wrong: Seems to be passworded files: %s', item['name']) return False return True config = [{ 'name': 'nzbclub', 'groups': [ { 'tab': 'searcher', 'list': 'nzb_providers', 'name': 'NZBClub', 'description': 'Free provider, less accurate. See <a href="https://www.nzbclub.com/" target="_blank">NZBClub</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACEUlEQVQ4y3VSMWgUQRR9/8/s7OzeJSdnTsVGghLEYBNQjBpQiRBFhIB2EcHG1kbs0murhZAmVocExEZQ0c7CxkLINYcJJpoYj9wZcnu72fF21uJSXMzuhyne58/j/fcf4b+KokgBIOSU53lxP5b9oNVqDT36dH+5UjoiKvIwPFEEgWBshGZ3E7/NOupL9fMjx0e+ZhKsrq+c/FPZKJi0w4FsQXMBDEJsd7BNW9h2tuyP9vfTALIJkMIu1hYRtINM+dpzcWc0sbkreK4fUEogyraAmKGF3+7vcT/wtR9QwkCabSAzQQuvk0uglAo5YaQ5DASGYjfMXcHVOqKu6NmR7iehlKAdHWUqWPv1c3i+9uwVdRlEBGaGEAJCCrDo9ShhvF6qPq8tL57bp+DbRn2sHtUuCY9YphLMu5921VhrwYJ5tbt0tt6sjQP4vEfB2Ikz7/ytwbeR6ljHkXCUA6UcOLtPOg4MYhtH8ZcLw5er+xQMDAwEURRNl96X596Y6oxFwsw9fmtTOAr2Ik19nL365FZpsLSdnQPPM8aYewc+lDcX4rkHqbQMAGTJXulOLzycmr1bKBTi3DOGYagajcahiaOT89fbM0/dxEsUu3aidfPljWO3HzebzYNBELi5Z5RSJlrrHd/3w8lT114MrVTWOn875fHRiYVisRhorWMpZXdvNnLKGCOstb0AMlulVJI19w/+nceU4D0aCwAAAABJRU5ErkJggg==', 'options': [ { 'name': 'enabled', 'type': 'enabler', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
4,255
Python
.py
78
42.75641
807
0.63201
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,828
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/nzb/__init__.py
config = [{ 'name': 'nzb_providers', 'groups': [ { 'label': 'Usenet Providers', 'description': 'Providers searching usenet for new releases', 'wizard': True, 'type': 'list', 'name': 'nzb_providers', 'tab': 'searcher', 'options': [], }, ], }]
354
Python
.py
14
16.285714
73
0.438235
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,829
newznab.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/nzb/newznab.py
from urlparse import urlparse import time import traceback import re from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import ResultList from couchpotato.core.media._base.providers.nzb.base import NZBProvider from couchpotato.environment import Env from dateutil.parser import parse from requests import HTTPError log = CPLog(__name__) class Base(NZBProvider, RSS): urls = { 'detail': 'details/%s', 'download': 't=get&id=%s' } passwords_regex = 'password|wachtwoord' limits_reached = {} http_time_between_calls = 2 # Seconds def search(self, media, quality): hosts = self.getHosts() results = ResultList(self, media, quality, imdb_results = True) for host in hosts: if self.isDisabled(host): continue self._searchOnHost(host, media, quality, results) return results def _searchOnHost(self, host, media, quality, results): query = self.buildUrl(media, host) url = '%s%s' % (self.getUrl(host['host']), query) nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) for nzb in nzbs: date = None spotter = None for item in nzb: if date and spotter: break if item.attrib.get('name') == 'usenetdate': date = item.attrib.get('value') break # Get the name of the person who posts the spot if item.attrib.get('name') == 'poster': if "@spot.net" in item.attrib.get('value'): spotter = item.attrib.get('value').split("@")[0] continue if not date: date = self.getTextElement(nzb, 'pubDate') name = self.getTextElement(nzb, 'title') detail_url = self.getTextElement(nzb, 'guid') nzb_id = detail_url.split('/')[-1:].pop() try: link = self.getElement(nzb, 'enclosure').attrib['url'] except: link = self.getTextElement(nzb, 'link') if '://' not in detail_url: detail_url = (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id) if not link: link = ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host) if not name: continue name_extra = '' if spotter: name_extra = spotter description = '' if "@spot.net" in nzb_id: try: # Get details for extended description to retrieve passwords query = self.buildDetailsUrl(nzb_id, host['api_key']) url = '%s%s' % (self.getUrl(host['host']), query) nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0] description = self.getTextElement(nzb_details, 'description') # Extract a password from the description password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\<br\>|\n|$', description, flags = re.I).group(1) if password: name += ' {{%s}}' % password.strip() except: log.debug('Error getting details of "%s": %s', (name, traceback.format_exc())) results.append({ 'id': nzb_id, 'provider_extra': urlparse(host['host']).hostname or host['host'], 'name': toUnicode(name), 'name_extra': name_extra, 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, 'url': link, 'detail_url': detail_url, 'content': self.getTextElement(nzb, 'description'), 'description': description, 'score': host['extra_score'], }) def getHosts(self): uses = splitString(str(self.conf('use')), clean = False) hosts = splitString(self.conf('host'), clean = False) api_keys = splitString(self.conf('api_key'), clean = False) extra_score = splitString(self.conf('extra_score'), clean = False) custom_tags = splitString(self.conf('custom_tag'), clean = False) custom_categories = splitString(self.conf('custom_categories'), clean = False) list = [] for nr in range(len(hosts)): try: key = api_keys[nr] except: key = '' try: host = hosts[nr] except: host = '' try: score = tryInt(extra_score[nr]) except: score = 0 try: custom_tag = custom_tags[nr] except: custom_tag = '' try: custom_category = custom_categories[nr].replace(" ", ",") except: custom_category = '' list.append({ 'use': uses[nr], 'host': host, 'api_key': key, 'extra_score': score, 'custom_tag': custom_tag, 'custom_category' : custom_category }) return list def belongsTo(self, url, provider = None, host = None): hosts = self.getHosts() for host in hosts: result = super(Base, self).belongsTo(url, host = host['host'], provider = provider) if result: return result def getUrl(self, host): if '?page=newznabapi' in host: return cleanHost(host)[:-1] + '&' return cleanHost(host) + 'api?' def isDisabled(self, host = None): return not self.isEnabled(host) def isEnabled(self, host = None): # Return true if at least one is enabled and no host is given if host is None: for host in self.getHosts(): if self.isEnabled(host): return True return False return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use']) def getApiExt(self, host): return '&apikey=%s' % host['api_key'] def download(self, url = '', nzb_id = ''): host = urlparse(url).hostname if self.limits_reached.get(host): # Try again in 3 hours if self.limits_reached[host] > time.time() - 10800: return 'try_next' try: data = self.urlopen(url, show_error = False, headers = {'User-Agent': Env.getIdentifier()}) self.limits_reached[host] = False return data except HTTPError as e: sc = e.response.status_code if sc in [503, 429]: response = e.read().lower() if sc == 429 or 'maximum api' in response or 'download limit' in response: if not self.limits_reached.get(host): log.error('Limit reached / to many requests for newznab provider: %s', host) self.limits_reached[host] = time.time() return 'try_next' log.error('Failed download from %s: %s', (host, traceback.format_exc())) return 'try_next' def buildDetailsUrl(self, nzb_id, api_key): query = tryUrlencode({ 't': 'details', 'id': nzb_id, 'apikey': api_key, }) return query config = [{ 'name': 'newznab', 'groups': [ { 'tab': 'searcher', 'list': 'nzb_providers', 'name': 'newznab', 'order': 10, 'description': 'Enable <a href="http://newznab.com/" target="_blank">NewzNab</a> such as <a href="https://nzb.su" target="_blank">NZB.su</a>, \ <a href="https://nzbs.org" target="_blank">NZBs.org</a>, <a href="http://dognzb.cr/login" target="_blank">DOGnzb.cr</a>, \ <a href="https://github.com/spotweb/spotweb" target="_blank">Spotweb</a>, <a href="https://nzbgeek.info/" target="_blank">NZBGeek</a>, \ <a href="https://www.nzbfinder.ws" target="_blank">NZBFinder</a>, <a href="https://www.usenet-crawler.com" target="_blank">Usenet-Crawler</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=', 'options': [ { 'name': 'enabled', 'type': 'enabler', 'default': True, }, { 'name': 'use', 'default': '0,0,0,0,0,0' }, { 'name': 'host', 'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://api.nzbgeek.info,https://www.nzbfinder.ws,https://www.usenet-crawler.com', 'description': 'The hostname of your newznab provider', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'default': '0,0,0,0,0,0', 'description': 'Starting score for each release found via this provider.', }, { 'name': 'custom_tag', 'advanced': True, 'label': 'Custom tag', 'default': ',,,,,', 'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org', }, { 'name': 'custom_categories', 'advanced': True, 'label': 'Custom Categories', 'default': '2000,2000,2000,2000,2000,2000', 'description': 'Specify categories to search in seperated by a single space, defaults to all movies. EG: "2030 2040 2060" would only search in HD, SD, and 3D movie categories', }, { 'name': 'api_key', 'default': ',,,,,', 'label': 'Api Key', 'description': 'Can be found on your profile page', 'type': 'combined', 'combine': ['use', 'host', 'api_key', 'extra_score', 'custom_tag'], }, ], }, ], }]
10,933
Python
.py
233
33.124464
196
0.522176
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,830
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/nzb/base.py
import time from couchpotato.core.media._base.providers.base import YarrProvider class NZBProvider(YarrProvider): protocol = 'nzb' def calculateAge(self, unix): return int(time.time() - unix) / 24 / 60 / 60
228
Python
.py
6
33.5
68
0.728111
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,831
binsearch.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/providers/nzb/binsearch.py
import re import traceback from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt, simplifyString from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.nzb.base import NZBProvider log = CPLog(__name__) class Base(NZBProvider): urls = { 'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s', 'detail': 'https://www.binsearch.info%s', 'search': 'https://www.binsearch.info/index.php?%s', } http_time_between_calls = 4 # Seconds def _search(self, media, quality, results): data = self.getHTMLData(self.urls['search'] % self.buildUrl(media, quality)) if data: try: html = BeautifulSoup(data) main_table = html.find('table', attrs = {'id': 'r2'}) if not main_table: return items = main_table.find_all('tr') for row in items: title = row.find('span', attrs = {'class': 's'}) if not title: continue nzb_id = row.find('input', attrs = {'type': 'checkbox'})['name'] info = row.find('span', attrs = {'class':'d'}) size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text) age = 0 try: age = re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1] except: pass def extra_check(item): parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text) total = float(tryInt(parts.group('total'))) parts = float(tryInt(parts.group('parts'))) if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))): log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total)) return False if 'requires password' in info.text.lower(): log.info2('Wrong: \'%s\', passworded', (item['name'])) return False return True results.append({ 'id': nzb_id, 'name': simplifyString(title.text), 'age': tryInt(age), 'size': self.parseSize(size_match.group('size')), 'url': self.urls['download'] % nzb_id, 'detail_url': self.urls['detail'] % info.find('a')['href'], 'extra_check': extra_check }) except: log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc()) def download(self, url = '', nzb_id = ''): data = { 'action': 'nzb', nzb_id: 'on' } try: return self.urlopen(url, data = data, show_error = False) except: log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) return 'try_next' config = [{ 'name': 'binsearch', 'groups': [ { 'tab': 'searcher', 'list': 'nzb_providers', 'name': 'binsearch', 'description': 'Free provider, less accurate. See <a href="https://www.binsearch.info/" target="_blank">BinSearch</a>', 'wizard': True, 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAATklEQVQY02NwQAMMWAXOnz+PKvD//3/CAvM//z+fgiwAAs+RBab4PP//vwbFjPlAffgEChzOo2r5fBuIfRAC5w8D+QUofkkp8MHjOWQAAM3Sbogztg2wAAAAAElFTkSuQmCC', 'options': [ { 'name': 'enabled', 'type': 'enabler', }, { 'name': 'extra_score', 'advanced': True, 'label': 'Extra Score', 'type': 'int', 'default': 0, 'description': 'Starting score for each release found via this provider.', } ], }, ], }]
4,320
Python
.py
91
32.087912
203
0.485476
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,832
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/library/__init__.py
from .main import Library def autoload(): return Library() config = []
78
Python
.py
4
16.75
25
0.71831
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,833
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/library/base.py
from couchpotato.core.event import addEvent from couchpotato.core.plugins.base import Plugin class LibraryBase(Plugin): _type = None def initType(self): addEvent('library.types', self.getType) def getType(self): return self._type
263
Python
.py
8
27.75
48
0.736
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,834
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/library/main.py
from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.media._base.library.base import LibraryBase log = CPLog(__name__) class Library(LibraryBase): def __init__(self): addEvent('library.title', self.title) addEvent('library.related', self.related) addEvent('library.tree', self.tree) addEvent('library.root', self.root) addApiView('library.query', self.queryView) addApiView('library.related', self.relatedView) addApiView('library.tree', self.treeView) def queryView(self, media_id, **kwargs): db = get_db() media = db.get('id', media_id) return { 'result': fireEvent('library.query', media, single = True) } def relatedView(self, media_id, **kwargs): db = get_db() media = db.get('id', media_id) return { 'result': fireEvent('library.related', media, single = True) } def treeView(self, media_id, **kwargs): db = get_db() media = db.get('id', media_id) return { 'result': fireEvent('library.tree', media, single = True) } def title(self, library): return fireEvent( 'library.query', library, condense = False, include_year = False, include_identifier = False, single = True ) def related(self, media): result = {self.key(media['type']): media} db = get_db() cur = media while cur and cur.get('parent_id'): cur = db.get('id', cur['parent_id']) result[self.key(cur['type'])] = cur children = db.get_many('media_children', media['_id'], with_doc = True) for item in children: key = self.key(item['doc']['type']) + 's' if key not in result: result[key] = [] result[key].append(item['doc']) return result def root(self, media): db = get_db() cur = media while cur and cur.get('parent_id'): cur = db.get('id', cur['parent_id']) return cur def tree(self, media = None, media_id = None): db = get_db() if media: result = media elif media_id: result = db.get('id', media_id, with_doc = True) else: return None # Find children items = db.get_many('media_children', result['_id'], with_doc = True) keys = [] # Build children arrays for item in items: key = self.key(item['doc']['type']) + 's' if key not in result: result[key] = {} elif type(result[key]) is not dict: result[key] = {} if key not in keys: keys.append(key) result[key][item['_id']] = fireEvent('library.tree', item['doc'], single = True) # Unique children for key in keys: result[key] = result[key].values() # Include releases result['releases'] = fireEvent('release.for_media', result['_id'], single = True) return result def key(self, media_type): parts = media_type.split('.') return parts[-1]
3,396
Python
.py
92
27.130435
92
0.552632
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,835
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/_base/search/main.py
from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.variable import mergeDicts, getImdb from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) class Search(Plugin): def __init__(self): addApiView('search', self.search, docs = { 'desc': 'Search the info in providers for a movie', 'params': { 'q': {'desc': 'The (partial) movie name you want to search for'}, 'type': {'desc': 'Search for a specific media type. Leave empty to search all.'}, }, 'return': {'type': 'object', 'example': """{ 'success': True, 'movies': array, 'show': array, etc }"""} }) addEvent('app.load', self.addSingleSearches) def search(self, q = '', types = None, **kwargs): # Make sure types is the correct instance if isinstance(types, (str, unicode)): types = [types] elif isinstance(types, (list, tuple, set)): types = list(types) imdb_identifier = getImdb(q) if not types: if imdb_identifier: result = fireEvent('movie.info', identifier = imdb_identifier, merge = True) result = {result['type']: [result]} else: result = fireEvent('info.search', q = q, merge = True) else: result = {} for media_type in types: if imdb_identifier: result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier) else: result[media_type] = fireEvent('%s.search' % media_type, q = q) return mergeDicts({ 'success': True, }, result) def createSingleSearch(self, media_type): def singleSearch(q, **kwargs): return self.search(q, type = media_type, **kwargs) return singleSearch def addSingleSearches(self): for media_type in fireEvent('media.types', merge = True): addApiView('%s.search' % media_type, self.createSingleSearch(media_type))
2,211
Python
.py
52
32.519231
104
0.583294
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,836
suggestion.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/suggestion.py
import time from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.variable import splitString, removeDuplicate, getIdentifier, getTitle from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env autoload = 'Suggestion' class Suggestion(Plugin): def __init__(self): addApiView('suggestion.view', self.suggestView) addApiView('suggestion.ignore', self.ignoreView) def test(): time.sleep(1) self.suggestView() addEvent('app.load', test) def suggestView(self, limit = 6, **kwargs): if self.isDisabled(): return { 'success': True, 'movies': [] } movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return { 'success': True, 'movies': medias } def ignoreView(self, imdb = None, limit = 6, remove_only = False, mark_seen = False, **kwargs): ignored = splitString(Env.prop('suggest_ignore', default = '')) seen = splitString(Env.prop('suggest_seen', default = '')) new_suggestions = [] if imdb: if mark_seen: seen.append(imdb) Env.prop('suggest_seen', ','.join(set(seen))) elif not remove_only: ignored.append(imdb) Env.prop('suggest_ignore', ','.join(set(ignored))) new_suggestions = self.updateSuggestionCache(ignore_imdb = imdb, limit = limit, ignored = ignored, seen = seen) if len(new_suggestions) <= limit: return { 'result': False } # Only return new (last) item media = { 'status': 'suggested', 'title': getTitle(new_suggestions[limit]), 'type': 'movie', 'info': new_suggestions[limit], 'identifiers': { 'imdb': new_suggestions[limit].get('imdb') } } return { 'result': True, 'movie': media } def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): # Combine with previous suggestion_cache cached_suggestion = self.getCache('suggestion_cached') or [] new_suggestions = [] ignored = [] if not ignored else ignored seen = [] if not seen else seen if ignore_imdb: suggested_imdbs = [] for cs in cached_suggestion: if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs: suggested_imdbs.append(cs.get('imdb')) new_suggestions.append(cs) # Get new results and add them if len(new_suggestions) - 1 < limit: active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) movies = [getIdentifier(x) for x in active_movies] movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True) if suggestions: new_suggestions.extend(suggestions) self.setCache('suggestion_cached', new_suggestions, timeout = 3024000) return new_suggestions config = [{ 'name': 'suggestion', 'groups': [ { 'label': 'Suggestions', 'description': 'Displays suggestions on the home page', 'name': 'suggestions', 'tab': 'display', 'options': [ { 'name': 'enabled', 'default': True, 'type': 'enabler', }, ], }, ], }]
5,628
Python
.py
130
31.438462
123
0.551528
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,837
searcher.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/searcher.py
from datetime import date import random import re import time import traceback from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb, getIdentifier, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.searcher.base import SearcherBase from couchpotato.core.media.movie import MovieTypeBase from couchpotato.environment import Env log = CPLog(__name__) autoload = 'MovieSearcher' class MovieSearcher(SearcherBase, MovieTypeBase): in_progress = False def __init__(self): super(MovieSearcher, self).__init__() addEvent('movie.searcher.all', self.searchAll) addEvent('movie.searcher.all_view', self.searchAllView) addEvent('movie.searcher.single', self.single) addEvent('movie.searcher.try_next_release', self.tryNextRelease) addEvent('movie.searcher.could_be_released', self.couldBeReleased) addEvent('searcher.correct_release', self.correctRelease) addEvent('searcher.get_search_title', self.getSearchTitle) addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = { 'desc': 'Marks the snatched results as ignored and try the next best release', 'params': { 'media_id': {'desc': 'The id of the media'}, }, }) addApiView('movie.searcher.full_search', self.searchAllView, docs = { 'desc': 'Starts a full search for all wanted movies', }) addApiView('movie.searcher.progress', self.getProgress, docs = { 'desc': 'Get the progress of current full search', 'return': {'type': 'object', 'example': """{ 'progress': False || object, total & to_go, }"""}, }) if self.conf('run_on_launch'): addEvent('app.load', self.searchAll) def searchAllView(self, **kwargs): fireEventAsync('movie.searcher.all', manual = True) return { 'success': not self.in_progress } def searchAll(self, manual = False): if self.in_progress: log.info('Search already in progress') fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress') return self.in_progress = True fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started') medias = [x['_id'] for x in fireEvent('media.with_status', 'active', types = 'movie', with_doc = False, single = True)] random.shuffle(medias) total = len(medias) self.in_progress = { 'total': total, 'to_go': total, } try: search_protocols = fireEvent('searcher.protocols', single = True) for media_id in medias: media = fireEvent('media.get', media_id, single = True) if not media: continue try: self.single(media, search_protocols, manual = manual) except IndexError: log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc())) fireEvent('movie.update', media_id) except: log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc())) self.in_progress['to_go'] -= 1 # Break if CP wants to shut down if self.shuttingDown(): break except SearchSetupError: pass self.in_progress = False def single(self, movie, search_protocols = None, manual = False, force_download = False): # Find out search type try: if not search_protocols: search_protocols = fireEvent('searcher.protocols', single = True) except SearchSetupError: return if not movie['profile_id'] or (movie['status'] == 'done' and not manual): log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') fireEvent('media.restatus', movie['_id'], single = True) return default_title = getTitle(movie) if not default_title: log.error('No proper info found for movie, removing it from library to stop it from causing more issues.') fireEvent('media.delete', movie['_id'], single = True) return # Update media status and check if it is still not done (due to the stop searching after feature if fireEvent('media.restatus', movie['_id'], single = True) == 'done': log.debug('No better quality found, marking movie %s as done.', default_title) pre_releases = fireEvent('quality.pre_releases', single = True) release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True) found_releases = [] previous_releases = movie.get('releases', []) too_early_to_search = [] outside_eta_results = 0 always_search = self.conf('always_search') ignore_eta = manual total_result_count = 0 fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title) # Ignore eta once every 7 days if not always_search: prop_name = 'last_ignored_eta.%s' % movie['_id'] last_ignored_eta = float(Env.prop(prop_name, default = 0)) if last_ignored_eta < time.time() - 604800: ignore_eta = True Env.prop(prop_name, value = time.time()) db = get_db() profile = db.get('id', movie['profile_id']) ret = False for index, q_identifier in enumerate(profile.get('qualities', [])): quality_custom = { 'index': index, 'quality': q_identifier, 'finish': profile['finish'][index], 'wait_for': tryInt(profile['wait_for'][index]), '3d': profile['3d'][index] if profile.get('3d') else False, 'minimum_score': profile.get('minimum_score', 1), } could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']) if not always_search and could_not_be_released: too_early_to_search.append(q_identifier) # Skip release, if ETA isn't ignored if not ignore_eta: continue has_better_quality = 0 # See if better quality is available for release in movie.get('releases', []): if release['status'] not in ['available', 'ignored', 'failed']: is_higher = fireEvent('quality.ishigher', \ {'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \ {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \ profile, single = True) if is_higher != 'higher': has_better_quality += 1 # Don't search for quality lower then already available. if has_better_quality > 0: log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title)) fireEvent('media.restatus', movie['_id'], single = True) break quality = fireEvent('quality.single', identifier = q_identifier, single = True) log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if always_search or ignore_eta else '')) # Extend quality with profile customs quality['custom'] = quality_custom results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] # Check if movie isn't deleted while searching if not fireEvent('media.get', movie.get('_id'), single = True): break # Add them to this movie releases list found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True) results_count = len(found_releases) total_result_count += results_count if results_count == 0: log.debug('Nothing found for %s in %s', (default_title, quality['label'])) # Keep track of releases found outside ETA window outside_eta_results += results_count if could_not_be_released else 0 # Don't trigger download, but notify user of available releases if could_not_be_released and results_count > 0: log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) # Try find a valid result and download it if (force_download or not could_not_be_released or always_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True): ret = True # Remove releases that aren't found anymore temp_previous_releases = [] for release in previous_releases: if release.get('status') == 'available' and release.get('identifier') not in found_releases: fireEvent('release.delete', release.get('_id'), single = True) else: temp_previous_releases.append(release) previous_releases = temp_previous_releases del temp_previous_releases # Break if CP wants to shut down if self.shuttingDown() or ret: break if total_result_count > 0: fireEvent('media.tag', movie['_id'], 'recent', update_edited = True, single = True) if len(too_early_to_search) > 0: log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) if outside_eta_results > 0: message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % (outside_eta_results, default_title) log.info(message) if not manual: fireEvent('media.available', message = message, data = {}) fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']}) return ret def correctRelease(self, nzb = None, media = None, quality = None, **kwargs): if media.get('type') != 'movie': return media_title = fireEvent('searcher.get_search_title', media, single = True) imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section = 'nzb') if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False # Check for required and ignored words if not fireEvent('searcher.correct_words', nzb['name'], media, single = True): return False preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True) # Contains lower quality string contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True) if contains_other and isinstance(contains_other, dict): log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality')) return False # Contains lower quality string if not fireEvent('searcher.correct_3d', nzb, preferred_quality = preferred_quality, single = True): log.info2('Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label'])) return False # File to small if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(nzb['size']): log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(nzb['size']): log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if getImdb(nzb.get('description', '')) == getIdentifier(media): return True for raw_title in media['info']['titles']: for movie_title in possibleTitles(raw_title): movie_words = re.split('\W+', simplifyString(movie_title)) if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 1, single = True): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 0, single = True): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year'])) return False def couldBeReleased(self, is_pre_release, dates, year = None): now = int(time.time()) now_year = date.today().year now_month = date.today().month if (year is None or year < now_year - 1 or (year <= now_year - 1 and now_month > 4)) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)): return True else: # Don't allow movies with years to far in the future add_year = 1 if now_month > 10 else 0 # Only allow +1 year if end of the year if year is not None and year > (now_year + add_year): return False # For movies before 1972 if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0: return True if is_pre_release: # Prerelease 1 week before theaters if dates.get('theater') - 604800 < now: return True else: # 12 weeks after theater release if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now: return True if dates.get('dvd') > 0: # 4 weeks before dvd release if dates.get('dvd') - 2419200 < now: return True # Dvd should be released if dates.get('dvd') < now: return True return False def tryNextReleaseView(self, media_id = None, **kwargs): trynext = self.tryNextRelease(media_id, manual = True, force_download = True) return { 'success': trynext } def tryNextRelease(self, media_id, manual = False, force_download = False): try: rels = fireEvent('release.for_media', media_id, single = True) for rel in rels: if rel.get('status') in ['snatched', 'done']: fireEvent('release.update_status', rel.get('_id'), status = 'ignored') media = fireEvent('media.get', media_id, single = True) if media: log.info('Trying next release for: %s', getTitle(media)) self.single(media, manual = manual, force_download = force_download) return True return False except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False def getSearchTitle(self, media): if media['type'] == 'movie': return getTitle(media) class SearchSetupError(Exception): pass config = [{ 'name': 'moviesearcher', 'order': 20, 'groups': [ { 'tab': 'searcher', 'name': 'movie_searcher', 'label': 'Movie search', 'description': 'Search options for movies', 'advanced': True, 'options': [ { 'name': 'always_search', 'default': False, 'migrate_from': 'searcher', 'type': 'bool', 'label': 'Always search', 'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.', }, { 'name': 'run_on_launch', 'migrate_from': 'searcher', 'label': 'Run on launch', 'advanced': True, 'default': 0, 'type': 'bool', 'description': 'Force run the searcher after (re)start.', }, { 'name': 'search_on_add', 'label': 'Search after add', 'advanced': True, 'default': 1, 'type': 'bool', 'description': 'Disable this to only search for movies on cron.', }, { 'name': 'cron_day', 'migrate_from': 'searcher', 'label': 'Day', 'advanced': True, 'default': '*', 'type': 'string', 'description': '<strong>*</strong>: Every day, <strong>*/2</strong>: Every 2 days, <strong>1</strong>: Every first of the month. See <a href="https://apscheduler.readthedocs.org/en/latest/modules/triggers/cron.html" target="_blank">APScheduler</a> for details.', }, { 'name': 'cron_hour', 'migrate_from': 'searcher', 'label': 'Hour', 'advanced': True, 'default': random.randint(0, 23), 'type': 'string', 'description': '<strong>*</strong>: Every hour, <strong>*/8</strong>: Every 8 hours, <strong>3</strong>: At 3, midnight.', }, { 'name': 'cron_minute', 'migrate_from': 'searcher', 'label': 'Minute', 'advanced': True, 'default': random.randint(0, 59), 'type': 'string', 'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour." }, ], }, ], }]
19,854
Python
.py
371
40.010782
282
0.562074
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,838
library.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/library.py
from couchpotato.core.event import addEvent from couchpotato.core.helpers.variable import getTitle from couchpotato.core.logger import CPLog from couchpotato.core.media._base.library.base import LibraryBase log = CPLog(__name__) autoload = 'MovieLibraryPlugin' class MovieLibraryPlugin(LibraryBase): def __init__(self): addEvent('library.query', self.query) def query(self, media, first = True, include_year = True, **kwargs): if media.get('type') != 'movie': return default_title = getTitle(media) titles = media['info'].get('titles', []) titles.insert(0, default_title) # Add year identifier to titles if include_year: titles = [title + (' %s' % str(media['info']['year'])) for title in titles] if first: return titles[0] if titles else None return titles
885
Python
.py
21
35.095238
87
0.66354
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,839
couchpotatoapi.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/info/couchpotatoapi.py
import base64 import time from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import tryUrlencode, ss from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.environment import Env log = CPLog(__name__) autoload = 'CouchPotatoApi' class CouchPotatoApi(MovieProvider): urls = { 'validate': 'https://api.couchpota.to/validate/%s/', 'search': 'https://api.couchpota.to/search/%s/', 'info': 'https://api.couchpota.to/info/%s/', 'is_movie': 'https://api.couchpota.to/ismovie/%s/', 'eta': 'https://api.couchpota.to/eta/%s/', 'suggest': 'https://api.couchpota.to/suggest/', 'updater': 'https://api.couchpota.to/updater/?%s', 'messages': 'https://api.couchpota.to/messages/?%s', } http_time_between_calls = 0 api_version = 1 def __init__(self): addEvent('movie.info', self.getInfo, priority = 2) addEvent('movie.info.release_date', self.getReleaseDate) addEvent('info.search', self.search, priority = 1) addEvent('movie.search', self.search, priority = 1) addEvent('movie.suggest', self.getSuggestions) addEvent('movie.is_movie', self.isMovie) addEvent('release.validate', self.validate) addEvent('cp.api_call', self.call) addEvent('cp.source_url', self.getSourceUrl) addEvent('cp.messages', self.getMessages) def call(self, url, **kwargs): return self.getJsonData(url, headers = self.getRequestHeaders(), **kwargs) def getMessages(self, last_check = 0): data = self.getJsonData(self.urls['messages'] % tryUrlencode({ 'last_check': last_check, }), headers = self.getRequestHeaders(), cache_timeout = 10) return data def getSourceUrl(self, repo = None, repo_name = None, branch = None): return self.getJsonData(self.urls['updater'] % tryUrlencode({ 'repo': repo, 'name': repo_name, 'branch': branch, }), headers = self.getRequestHeaders()) def search(self, q, limit = 5): return self.getJsonData(self.urls['search'] % tryUrlencode(q) + ('?limit=%s' % limit), headers = self.getRequestHeaders()) def validate(self, name = None): if not name: return name_enc = base64.b64encode(ss(name)) return self.getJsonData(self.urls['validate'] % name_enc, headers = self.getRequestHeaders()) def isMovie(self, identifier = None, adding = False, **kwargs): if not identifier: return url = self.urls['is_movie'] % identifier url += '' if adding else '?ignore=1' data = self.getJsonData(url, headers = self.getRequestHeaders()) if data: return data.get('is_movie', True) return True def getInfo(self, identifier = None, adding = False, **kwargs): if not identifier: return url = self.urls['info'] % identifier url += '' if adding else '?ignore=1' result = self.getJsonData(url, headers = self.getRequestHeaders()) if result: return dict((k, v) for k, v in result.items() if v) return {} def getReleaseDate(self, identifier = None): if identifier is None: return {} dates = self.getJsonData(self.urls['eta'] % identifier, headers = self.getRequestHeaders()) log.debug('Found ETA for %s: %s', (identifier, dates)) return dates def getSuggestions(self, movies = None, ignore = None): if not ignore: ignore = [] if not movies: movies = [] suggestions = self.getJsonData(self.urls['suggest'], data = { 'movies': ','.join(movies), 'ignore': ','.join(ignore), }, headers = self.getRequestHeaders()) log.info('Found suggestions for %s movies, %s ignored', (len(movies), len(ignore))) return suggestions def getRequestHeaders(self): return { 'X-CP-Version': fireEvent('app.version', single = True), 'X-CP-API': self.api_version, 'X-CP-Time': time.time(), 'X-CP-Identifier': '+%s' % Env.setting('api_key', 'core')[:10], # Use first 10 as identifier, so we don't need to use IP address in api stats }
4,377
Python
.py
92
39.021739
154
0.625059
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,840
themoviedb.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/info/themoviedb.py
import random import traceback import itertools from base64 import b64decode as bd from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode, ss, tryUrlencode from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.environment import Env log = CPLog(__name__) autoload = 'TheMovieDb' class TheMovieDb(MovieProvider): http_time_between_calls = .35 configuration = { 'images': { 'secure_base_url': 'https://image.tmdb.org/t/p/', }, } ak = ['ZTIyNGZlNGYzZmVjNWY3YjU1NzA2NDFmN2NkM2RmM2E=', 'ZjZiZDY4N2ZmYTYzY2QyODJiNmZmMmM2ODc3ZjI2Njk='] languages = [ 'en' ] default_language = 'en' def __init__(self): addEvent('info.search', self.search, priority = 3) addEvent('movie.search', self.search, priority = 3) addEvent('movie.info', self.getInfo, priority = 3) addEvent('movie.info_by_tmdb', self.getInfo) addEvent('app.load', self.config) def config(self): # Reset invalid key if self.conf('api_key') == '9b939aee0aaafc12a65bf448e4af9543': self.conf('api_key', '') languages = self.getLanguages() # languages should never be empty, the first language is the default language used for all the description details self.default_language = languages[0] # en is always downloaded and it is the fallback if 'en' in languages: languages.remove('en') # default language has a special management if self.default_language in languages: languages.remove(self.default_language) self.languages = languages configuration = self.request('configuration') if configuration: self.configuration = configuration def search(self, q, limit = 3): """ Find movie by name """ if self.isDisabled(): return False log.debug('Searching for movie: %s', q) raw = None try: name_year = fireEvent('scanner.name_year', q, single = True) raw = self.request('search/movie', { 'query': name_year.get('name', q), 'year': name_year.get('year'), 'search_type': 'ngram' if limit > 1 else 'phrase' }, return_key = 'results') except: log.error('Failed searching TMDB for "%s": %s', (q, traceback.format_exc())) results = [] if raw: try: nr = 0 for movie in raw: parsed_movie = self.parseMovie(movie, extended = False) if parsed_movie: results.append(parsed_movie) nr += 1 if nr == limit: break log.info('Found: %s', [result['titles'][0] + ' (' + str(result.get('year', 0)) + ')' for result in results]) return results except SyntaxError as e: log.error('Failed to parse XML response: %s', e) return False return results def getInfo(self, identifier = None, extended = True, **kwargs): if not identifier: return {} result = self.parseMovie({ 'id': identifier }, extended = extended) return result or {} def parseMovie(self, movie, extended = True): # Do request, append other items movie = self.request('movie/%s' % movie.get('id'), { 'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''), 'language': 'en' }) if not movie: return movie_default = movie if self.default_language == 'en' else self.request('movie/%s' % movie.get('id'), { 'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''), 'language': self.default_language }) movie_default = movie_default or movie movie_others = [ self.request('movie/%s' % movie.get('id'), { 'append_to_response': 'alternative_titles' + (',images,casts' if extended else ''), 'language': language }) for language in self.languages] if self.languages else [] # Images poster = self.getImage(movie, type = 'poster', size = 'w154') poster_original = self.getImage(movie, type = 'poster', size = 'original') backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original') extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original') if extended else [] images = { 'poster': [poster] if poster else [], #'backdrop': [backdrop] if backdrop else [], 'poster_original': [poster_original] if poster_original else [], 'backdrop_original': [backdrop_original] if backdrop_original else [], 'actors': {}, 'extra_thumbs': extra_thumbs } # Genres try: genres = [genre.get('name') for genre in movie.get('genres', [])] except: genres = [] # 1900 is the same as None year = str(movie.get('release_date') or '')[:4] if not movie.get('release_date') or year == '1900' or year.lower() == 'none': year = None # Gather actors data actors = {} if extended: # Full data cast = movie.get('casts', {}).get('cast', []) for cast_item in cast: try: actors[toUnicode(cast_item.get('name'))] = toUnicode(cast_item.get('character')) images['actors'][toUnicode(cast_item.get('name'))] = self.getImage(cast_item, type = 'profile', size = 'original') except: log.debug('Error getting cast info for %s: %s', (cast_item, traceback.format_exc())) movie_data = { 'type': 'movie', 'via_tmdb': True, 'tmdb_id': movie.get('id'), 'titles': [toUnicode(movie_default.get('title') or movie.get('title'))], 'original_title': movie.get('original_title'), 'images': images, 'imdb': movie.get('imdb_id'), 'runtime': movie.get('runtime'), 'released': str(movie.get('release_date')), 'year': tryInt(year, None), 'plot': movie_default.get('overview') or movie.get('overview'), 'genres': genres, 'collection': getattr(movie.get('belongs_to_collection'), 'name', None), 'actor_roles': actors } movie_data = dict((k, v) for k, v in movie_data.items() if v) # Add alternative names movies = [ movie ] + movie_others if movie == movie_default else [ movie, movie_default ] + movie_others movie_titles = [ self.getTitles(movie) for movie in movies ] all_titles = sorted(list(itertools.chain.from_iterable(movie_titles))) alternate_titles = movie_data['titles'] for title in all_titles: if title and title not in alternate_titles and title.lower() != 'none' and title is not None: alternate_titles.append(title) movie_data['titles'] = alternate_titles return movie_data def getImage(self, movie, type = 'poster', size = 'poster'): image_url = '' try: path = movie.get('%s_path' % type) if path: image_url = '%s%s%s' % (self.configuration['images']['secure_base_url'], size, path) except: log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie)))) return image_url def getMultImages(self, movie, type = 'backdrops', size = 'original'): image_urls = [] try: for image in movie.get('images', {}).get(type, [])[1:5]: image_urls.append(self.getImage(image, 'file', size)) except: log.debug('Failed getting %s.%s for "%s"', (type, size, ss(str(movie)))) return image_urls def request(self, call = '', params = {}, return_key = None): params = dict((k, v) for k, v in params.items() if v) params = tryUrlencode(params) try: url = 'https://api.themoviedb.org/3/%s?api_key=%s%s' % (call, self.getApiKey(), '&%s' % params if params else '') data = self.getJsonData(url, show_error = False) except: log.debug('Movie not found: %s, %s', (call, params)) data = None if data and return_key and return_key in data: data = data.get(return_key) return data def isDisabled(self): if self.getApiKey() == '': log.error('No API key provided.') return True return False def getApiKey(self): key = self.conf('api_key') return bd(random.choice(self.ak)) if key == '' else key def getLanguages(self): languages = splitString(Env.setting('languages', section = 'core')) if len(languages): return languages return [ 'en' ] def getTitles(self, movie): # add the title to the list title = toUnicode(movie.get('title')) titles = [title] if title else [] # add the original_title to the list alternate_title = toUnicode(movie.get('original_title')) if alternate_title and alternate_title not in titles: titles.append(alternate_title) # Add alternative titles alternate_titles = movie.get('alternative_titles', {}).get('titles', []) for alt in alternate_titles: alt_name = toUnicode(alt.get('title')) if alt_name and alt_name not in titles and alt_name.lower() != 'none' and alt_name is not None: titles.append(alt_name) return titles; config = [{ 'name': 'themoviedb', 'groups': [ { 'tab': 'providers', 'name': 'tmdb', 'label': 'TheMovieDB', 'hidden': True, 'description': 'Used for all calls to TheMovieDB.', 'options': [ { 'name': 'api_key', 'default': '', 'label': 'Api Key', }, ], }, ], }]
10,510
Python
.py
237
33.548523
134
0.56546
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,841
_modifier.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/info/_modifier.py
import copy import traceback from CodernityDB.database import RecordNotFound from couchpotato import get_db from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.variable import mergeDicts, randomString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) autoload = 'MovieResultModifier' class MovieResultModifier(Plugin): default_info = { 'tmdb_id': 0, 'titles': [], 'original_title': '', 'year': 0, 'images': { 'poster': [], 'backdrop': [], 'poster_original': [], 'backdrop_original': [], 'actors': {}, 'landscape': [], 'logo': [], 'clear_art': [], 'disc_art': [], 'banner': [], 'extra_thumbs': [], 'extra_fanart': [] }, 'runtime': 0, 'plot': '', 'tagline': '', 'imdb': '', 'genres': [], 'mpaa': None, 'actors': [], 'actor_roles': {} } def __init__(self): addEvent('result.modify.info.search', self.returnByType) addEvent('result.modify.movie.search', self.combineOnIMDB) addEvent('result.modify.movie.info', self.checkLibrary) def returnByType(self, results): new_results = {} for r in results: type_name = r.get('type', 'movie') + 's' if type_name not in new_results: new_results[type_name] = [] new_results[type_name].append(r) # Combine movies, needs a cleaner way.. if 'movies' in new_results: new_results['movies'] = self.combineOnIMDB(new_results['movies']) return new_results def combineOnIMDB(self, results): temp = {} order = [] # Combine on imdb id for item in results: random_string = randomString() imdb = item.get('imdb', random_string) imdb = imdb if imdb else random_string if not temp.get(imdb): temp[imdb] = self.getLibraryTags(imdb) order.append(imdb) # Merge dicts temp[imdb] = mergeDicts(temp[imdb], item) # Make it a list again temp_list = [temp[x] for x in order] return temp_list def getLibraryTags(self, imdb): temp = { 'in_wanted': False, 'in_library': False, } # Add release info from current library db = get_db() try: media = None try: media = db.get('media', 'imdb-%s' % imdb, with_doc = True)['doc'] except RecordNotFound: pass if media: if media.get('status') == 'active': temp['in_wanted'] = media try: temp['in_wanted']['profile'] = db.get('id', media['profile_id']) except: temp['in_wanted']['profile'] = {'label': ''} for release in fireEvent('release.for_media', media['_id'], single = True): if release.get('status') == 'done': if not temp['in_library']: temp['in_library'] = media temp['in_library']['releases'] = [] temp['in_library']['releases'].append(release) except: log.error('Tried getting more info on searched movies: %s', traceback.format_exc()) return temp def checkLibrary(self, result): result = mergeDicts(copy.deepcopy(self.default_info), copy.deepcopy(result)) if result and result.get('imdb'): return mergeDicts(result, self.getLibraryTags(result['imdb'])) return result
3,850
Python
.py
102
26.656863
95
0.530552
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,842
fanarttv.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/info/fanarttv.py
import traceback from couchpotato import tryInt from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.base import MovieProvider from requests import HTTPError log = CPLog(__name__) autoload = 'FanartTV' class FanartTV(MovieProvider): urls = { 'api': 'http://webservice.fanart.tv/v3/movies/%s?api_key=b28b14e9be662e027cfbc7c3dd600405' } MAX_EXTRAFANART = 20 http_time_between_calls = 0 def __init__(self): addEvent('movie.info', self.getArt, priority = 1) def getArt(self, identifier = None, extended = True, **kwargs): if not identifier or not extended: return {} images = {} try: url = self.urls['api'] % identifier fanart_data = self.getJsonData(url, show_error = False) if fanart_data: log.debug('Found images for %s', fanart_data.get('name')) images = self._parseMovie(fanart_data) except HTTPError as e: log.debug('Failed getting extra art for %s: %s', (identifier, e)) except: log.error('Failed getting extra art for %s: %s', (identifier, traceback.format_exc())) return {} return { 'images': images } def _parseMovie(self, movie): images = { 'landscape': self._getMultImages(movie.get('moviethumb', []), 1), 'logo': [], 'disc_art': self._getMultImages(self._trimDiscs(movie.get('moviedisc', [])), 1), 'clear_art': self._getMultImages(movie.get('hdmovieart', []), 1), 'banner': self._getMultImages(movie.get('moviebanner', []), 1), 'extra_fanart': [], } if len(images['clear_art']) == 0: images['clear_art'] = self._getMultImages(movie.get('movieart', []), 1) images['logo'] = self._getMultImages(movie.get('hdmovielogo', []), 1) if len(images['logo']) == 0: images['logo'] = self._getMultImages(movie.get('movielogo', []), 1) fanarts = self._getMultImages(movie.get('moviebackground', []), self.MAX_EXTRAFANART + 1) if fanarts: images['backdrop_original'] = [fanarts[0]] images['extra_fanart'] = fanarts[1:] return images def _trimDiscs(self, disc_images): """ Return a subset of discImages. Only bluray disc images will be returned. """ trimmed = [] for disc in disc_images: if disc.get('disc_type') == 'bluray': trimmed.append(disc) if len(trimmed) == 0: return disc_images return trimmed def _getImage(self, images): image_url = None highscore = -1 for image in images: if tryInt(image.get('likes')) > highscore: highscore = tryInt(image.get('likes')) image_url = image.get('url') or image.get('href') return image_url def _getMultImages(self, images, n): """ Chooses the best n images and returns them as a list. If n<0, all images will be returned. """ image_urls = [] pool = [] for image in images: if image.get('lang') == 'en': pool.append(image) orig_pool_size = len(pool) while len(pool) > 0 and (n < 0 or orig_pool_size - len(pool) < n): best = None highscore = -1 for image in pool: if tryInt(image.get('likes')) > highscore: highscore = tryInt(image.get('likes')) best = image url = best.get('url') or best.get('href') if url: image_urls.append(url) pool.remove(best) return image_urls def isDisabled(self): if self.conf('api_key') == '': log.error('No API key provided.') return True return False
4,060
Python
.py
102
29.323529
98
0.55615
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,843
omdbapi.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/info/omdbapi.py
import json import re import traceback from couchpotato import Env from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'OMDBAPI' class OMDBAPI(MovieProvider): urls = { 'search': 'https://www.omdbapi.com/?apikey=%s&type=movie&%s', 'info': 'https://www.omdbapi.com/?apikey=%s&type=movie&i=%s', } http_time_between_calls = 0 def __init__(self): addEvent('info.search', self.search) addEvent('movie.search', self.search) addEvent('movie.info', self.getInfo) def search(self, q, limit = 12): if self.isDisabled(): return [] name_year = fireEvent('scanner.name_year', q, single = True) if not name_year or (name_year and not name_year.get('name')): name_year = { 'name': q } cache_key = 'omdbapi.cache.%s' % q url = self.urls['search'] % (self.getApiKey(), tryUrlencode({'t': name_year.get('name'), 'y': name_year.get('year', '')})) cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()}) if cached: result = self.parseMovie(cached) if result.get('titles') and len(result.get('titles')) > 0: log.info('Found: %s', result['titles'][0] + ' (' + str(result.get('year')) + ')') return [result] return [] return [] def getInfo(self, identifier = None, **kwargs): if self.isDisabled() or not identifier: return {} cache_key = 'omdbapi.cache.%s' % identifier url = self.urls['info'] % (self.getApiKey(), identifier) cached = self.getCache(cache_key, url, timeout = 3, headers = {'User-Agent': Env.getIdentifier()}) if cached: result = self.parseMovie(cached) if result.get('titles') and len(result.get('titles')) > 0: log.info('Found: %s', result['titles'][0] + ' (' + str(result['year']) + ')') return result return {} def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, (str, unicode)): movie = json.loads(movie) except ValueError: log.info('No proper json to decode') return movie_data if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': return movie_data if movie.get('Type').lower() != 'movie': return movie_data tmp_movie = movie.copy() for key in tmp_movie: tmp_movie_elem = tmp_movie.get(key) if not isinstance(tmp_movie_elem, (str, unicode)) or tmp_movie_elem.lower() == 'n/a': del movie[key] year = tryInt(movie.get('Year', '')) movie_data = { 'type': 'movie', 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title'), 'images': { 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], }, 'rating': { 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'mpaa': str(movie.get('Rated', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released'), 'year': year if isinstance(year, int) else None, 'plot': movie.get('Plot'), 'genres': splitString(movie.get('Genre', '')), 'directors': splitString(movie.get('Director', '')), 'writers': splitString(movie.get('Writer', '')), 'actors': splitString(movie.get('Actors', '')), } movie_data = dict((k, v) for k, v in movie_data.items() if v) except: log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) return movie_data def isDisabled(self): if self.getApiKey() == '': log.error('No API key provided.') return True return False def getApiKey(self): apikey = self.conf('api_key') return apikey def runtimeToMinutes(self, runtime_str): runtime = 0 regex = '(\d*.?\d+).(h|hr|hrs|mins|min)+' matches = re.findall(regex, runtime_str) for match in matches: nr, size = match runtime += tryInt(nr) * (60 if 'h' is str(size)[0] else 1) return runtime config = [{ 'name': 'omdbapi', 'groups': [ { 'tab': 'providers', 'name': 'tmdb', 'label': 'OMDB API', 'hidden': True, 'description': 'Used for all calls to TheMovieDB.', 'options': [ { 'name': 'api_key', 'default': 'bbc0e412', # Don't be a dick and use this somewhere else 'label': 'Api Key', }, ], }, ], }]
5,737
Python
.py
132
31.825758
130
0.524511
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,844
flickchart.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/flickchart.py
import traceback from couchpotato.core.event import fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.userscript.base import UserscriptBase log = CPLog(__name__) autoload = 'Flickchart' class Flickchart(UserscriptBase): version = 2 includes = ['http://www.flickchart.com/movie/*'] def getMovie(self, url): try: data = self.getUrl(url) except: return try: start = data.find('<title>') end = data.find('</title>', start) page_title = data[start + len('<title>'):end].strip().split('- Flick') year_name = fireEvent('scanner.name_year', page_title[0], single = True) return self.search(year_name.get('name'), year_name.get('year')) except: log.error('Failed parsing page for title and year: %s', traceback.format_exc())
916
Python
.py
22
33.636364
91
0.636364
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,845
imdb.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/imdb.py
from couchpotato.core.helpers.variable import getImdb from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'IMDB' class IMDB(UserscriptBase): includes = ['*://*.imdb.com/title/tt*', '*://imdb.com/title/tt*'] def getMovie(self, url): return self.getInfo(getImdb(url))
328
Python
.py
7
42.857143
81
0.740506
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,846
filmweb.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/filmweb.py
from bs4 import BeautifulSoup from couchpotato import fireEvent from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'Filmweb' class Filmweb(UserscriptBase): version = 3 includes = ['http://www.filmweb.pl/film/*'] def getMovie(self, url): cookie = {'Cookie': 'welcomeScreen=welcome_screen'} try: data = self.urlopen(url, headers = cookie) except: return html = BeautifulSoup(data) name = html.find('meta', {'name': 'title'})['content'][:-9].strip() name_year = fireEvent('scanner.name_year', name, single = True) name = name_year.get('name') year = name_year.get('year') return self.search(name, year)
761
Python
.py
19
32.789474
81
0.647059
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,847
reddit.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/reddit.py
from couchpotato import fireEvent from couchpotato.core.helpers.variable import splitString from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'Reddit' class Reddit(UserscriptBase): includes = ['*://www.reddit.com/r/Ijustwatched/comments/*'] def getMovie(self, url): name = splitString(splitString(url, '/ijw_')[-1], '/')[0] if name.startswith('ijw_'): name = name[4:] year_name = fireEvent('scanner.name_year', name, single = True) return self.search(year_name.get('name'), year_name.get('year'))
600
Python
.py
12
44
81
0.696552
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,848
filmcentrum.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/filmcentrum.py
from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'FilmCentrum' class FilmCentrum(UserscriptBase): includes = ['*://filmcentrum.nl/films/*']
192
Python
.py
4
45
81
0.793478
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,849
allocine.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/allocine.py
import traceback from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.userscript.base import UserscriptBase log = CPLog(__name__) autoload = 'AlloCine' class AlloCine(UserscriptBase): includes = ['http://www.allocine.fr/film/*'] def getMovie(self, url): if not 'fichefilm_gen_cfilm' in url: return 'Url isn\'t from a movie' try: data = self.getUrl(url) except: return try: start = data.find('<title>') end = data.find('</title>', start) page_title = data[start + len('<title>'):end].strip().split('-') name = page_title[0].strip() year = page_title[1].strip()[-4:] return self.search(name, year) except: log.error('Failed parsing page for title and year: %s', traceback.format_exc())
898
Python
.py
23
30.173913
91
0.597448
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,850
moviesio.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/moviesio.py
from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'MoviesIO' class MoviesIO(UserscriptBase): includes = ['*://movies.io/m/*']
177
Python
.py
4
41.25
81
0.775148
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,851
trakt.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/trakt.py
from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'Trakt' class Trakt(UserscriptBase): version = 2 includes = ['*://trakt.tv/movies/*', '*://*.trakt.tv/movies/*'] excludes = ['*://trakt.tv/movies/*/*', '*://*.trakt.tv/movies/*/*']
291
Python
.py
6
44.666667
81
0.657143
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,852
filmstarts.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/filmstarts.py
from bs4 import BeautifulSoup from couchpotato.core.media._base.providers.userscript.base import UserscriptBase import re autoload = 'Filmstarts' class Filmstarts(UserscriptBase): includes = ['*://www.filmstarts.de/kritiken/*'] def getMovie(self, url): try: data = self.getUrl(url) except: return html = BeautifulSoup(data) table = html.find("section", attrs={"class": "section ovw ovw-synopsis", "id": "synopsis-details"}) if table.find(text=re.compile('Originaltitel')): #some trailing whitespaces on some pages # Get original film title from the table specified above name = name = table.find("span", text=re.compile("Originaltitel")).findNext('h2').text else: # If none is available get the title from the meta data name = html.find("meta", {"property":"og:title"})['content'] # Year of production is not available in the meta data, so get it from the table year = table.find("span", text=re.compile("Produktionsjahr")).findNext('span').text return self.search(name, year)
1,035
Python
.py
22
43.409091
101
0.73716
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,853
letterboxd.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/letterboxd.py
from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'Letterboxd' class Letterboxd(UserscriptBase): includes = ['*://letterboxd.com/film/*']
189
Python
.py
4
44.25
81
0.790055
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,854
tmdb.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/tmdb.py
import re from couchpotato.core.event import fireEvent from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'TMDB' class TMDB(UserscriptBase): version = 2 includes = ['*://www.themoviedb.org/movie/*'] def getMovie(self, url): match = re.search('(?P<id>\d+)', url) movie = fireEvent('movie.info_by_tmdb', identifier = match.group('id'), extended = False, merge = True) if movie['imdb']: return self.getInfo(movie['imdb'])
519
Python
.py
12
37.416667
111
0.68008
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,855
youteather.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/youteather.py
import re from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'YouTheater' class YouTheater(UserscriptBase): id_re = re.compile("view\.php\?id=(\d+)") includes = ['http://www.youtheater.com/view.php?id=*', 'http://youtheater.com/view.php?id=*', 'http://www.sratim.co.il/view.php?id=*', 'http://sratim.co.il/view.php?id=*'] def getMovie(self, url): id = self.id_re.findall(url)[0] url = 'http://www.youtheater.com/view.php?id=%s' % id return super(YouTheater, self).getMovie(url)
578
Python
.py
11
46.272727
97
0.661319
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,856
appletrailers.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/appletrailers.py
import re import traceback from couchpotato import tryInt, CPLog from couchpotato.core.media._base.providers.userscript.base import UserscriptBase log = CPLog(__name__) autoload = 'AppleTrailers' class AppleTrailers(UserscriptBase): includes = ['http://trailers.apple.com/trailers/*'] def getMovie(self, url): try: data = self.getUrl(url) except: return try: id = re.search("FilmId.*=.*\'(?P<id>.*)\';", data) id = id.group('id') data = self.getJsonData('https://trailers.apple.com/trailers/feeds/data/%s.json' % id) name = data['page']['movie_title'] year = tryInt(data['page']['release_date'][0:4]) return self.search(name, year) except: log.error('Failed getting apple trailer info: %s', traceback.format_exc()) return None
899
Python
.py
23
30.608696
98
0.609954
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,857
moviemeter.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/moviemeter.py
from couchpotato.core.helpers.variable import getImdb from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'MovieMeter' class MovieMeter(UserscriptBase): includes = ['*://*.moviemeter.nl/film/*', '*://moviemeter.nl/film/*'] version = 3 def getMovie(self, url): cookie = {'Cookie': 'cok=1'} try: data = self.urlopen(url, headers = cookie) except: return return self.getInfo(getImdb(data))
505
Python
.py
13
31.923077
81
0.664596
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,858
rottentomatoes.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/rottentomatoes.py
import re import traceback from couchpotato import fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.userscript.base import UserscriptBase log = CPLog(__name__) autoload = 'RottenTomatoes' class RottenTomatoes(UserscriptBase): includes = ['*://www.rottentomatoes.com/m/*'] excludes = ['*://www.rottentomatoes.com/m/*/*/'] version = 4 def getMovie(self, url): try: data = self.getUrl(url) except: return try: title = re.findall("<title>(.*)</title>", data) title = title[0].split(' - Rotten')[0].replace('&nbsp;', ' ').decode('unicode_escape') name_year = fireEvent('scanner.name_year', title, single = True) name = name_year.get('name') year = name_year.get('year') if name and year: return self.search(name, year) except: log.error('Failed parsing page for title and year: %s', traceback.format_exc())
1,035
Python
.py
26
31.653846
98
0.617085
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,859
criticker.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/userscript/criticker.py
from couchpotato.core.media._base.providers.userscript.base import UserscriptBase autoload = 'Criticker' class Criticker(UserscriptBase): includes = ['http://www.criticker.com/film/*']
193
Python
.py
4
45.25
81
0.794595
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,860
mediabrowser.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/metadata/mediabrowser.py
import os from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData autoload = 'MediaBrowser' class MediaBrowser(MovieMetaData): def getThumbnailName(self, name, root, i): return os.path.join(root, 'folder.jpg') def getFanartName(self, name, root, i): return os.path.join(root, 'backdrop.jpg') config = [{ 'name': 'mediabrowser', 'groups': [ { 'tab': 'renamer', 'subtab': 'metadata', 'name': 'mediabrowser_metadata', 'label': 'MediaBrowser', 'description': 'Generate folder.jpg and backdrop.jpg', 'options': [ { 'name': 'meta_enabled', 'default': False, 'type': 'enabler', }, ], }, ], }]
846
Python
.py
27
21.555556
78
0.528395
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,861
wmc.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/metadata/wmc.py
import os from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData autoload = 'WindowsMediaCenter' class WindowsMediaCenter(MovieMetaData): def getThumbnailName(self, name, root, i): return os.path.join(root, 'folder.jpg') config = [{ 'name': 'windowsmediacenter', 'groups': [ { 'tab': 'renamer', 'subtab': 'metadata', 'name': 'windowsmediacenter_metadata', 'label': 'Windows Explorer / Media Center', 'description': 'Generate folder.jpg', 'options': [ { 'name': 'meta_enabled', 'default': False, 'type': 'enabler', }, ], }, ], }]
777
Python
.py
25
21.12
78
0.526882
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,862
wdtv.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/metadata/wdtv.py
from xml.etree.ElementTree import Element, SubElement, tostring import os import re import traceback import xml.dom.minidom from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import getTitle from couchpotato.core.logger import CPLog autoload = 'WdtvLive' log = CPLog(__name__) class WdtvLive(MovieMetaData): def getThumbnailName(self, name, root, i): return self.createMetaName('%s.jpg', name, root) def createMetaName(self, basename, name, root): return os.path.join(root, basename.replace('%s', name)) def getNfoName(self, name, root, i): return self.createMetaName('%s.xml', name, root) def getNfo(self, movie_info=None, data=None, i=0): if not data: data = {} if not movie_info: movie_info = {} nfoxml = Element('details') # Title try: el = SubElement(nfoxml, 'title') el.text = toUnicode(getTitle(data)) except: pass # IMDB id try: el = SubElement(nfoxml, 'id') el.text = toUnicode(data['identifier']) except: pass # Runtime try: runtime = SubElement(nfoxml, 'runtime') runtime.text = '%s min' % movie_info.get('runtime') except: pass # Other values types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released'] for type in types: if ':' in type: name, type = type.split(':') else: name = type try: if movie_info.get(type): el = SubElement(nfoxml, name) el.text = toUnicode(movie_info.get(type, '')) except: pass # Rating for rating_type in ['imdb', 'rotten', 'tmdb']: try: r, v = movie_info['rating'][rating_type] rating = SubElement(nfoxml, 'rating') rating.text = str(r) votes = SubElement(nfoxml, 'votes') votes.text = str(v) break except: log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) # Genre for genre in movie_info.get('genres', []): genres = SubElement(nfoxml, 'genre') genres.text = toUnicode(genre) # Actors for actor_name in movie_info.get('actor_roles', {}): role_name = movie_info['actor_roles'][actor_name] actor = SubElement(nfoxml, 'actor') name = SubElement(actor, 'name') name.text = toUnicode(actor_name) if role_name: role = SubElement(actor, 'role') role.text = toUnicode(role_name) if movie_info['images']['actors'].get(actor_name): thumb = SubElement(actor, 'thumb') thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name)) # Directors for director_name in movie_info.get('directors', []): director = SubElement(nfoxml, 'director') director.text = toUnicode(director_name) # Writers for writer in movie_info.get('writers', []): writers = SubElement(nfoxml, 'credits') writers.text = toUnicode(writer) # Sets or collections collection_name = movie_info.get('collection') if collection_name: collection = SubElement(nfoxml, 'set') collection.text = toUnicode(collection_name) sorttitle = SubElement(nfoxml, 'sorttitle') sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year')) # Images for image_url in movie_info['images']['poster_original']: image = SubElement(nfoxml, 'thumb') image.text = toUnicode(image_url) image_types = [ ('fanart', 'backdrop_original'), ('banner', 'banner'), ('discart', 'disc_art'), ('logo', 'logo'), ('clearart', 'clear_art'), ('landscape', 'landscape'), ('extrathumb', 'extra_thumbs'), ('extrafanart', 'extra_fanart'), ] for image_type in image_types: sub, type = image_type sub_element = SubElement(nfoxml, sub) for image_url in movie_info['images'][type]: image = SubElement(sub_element, 'thumb') image.text = toUnicode(image_url) # Add trailer if found trailer_found = False if data.get('renamed_files'): for filename in data.get('renamed_files'): if 'trailer' in filename: trailer = SubElement(nfoxml, 'trailer') trailer.text = toUnicode(filename) trailer_found = True if not trailer_found and data['files'].get('trailer'): trailer = SubElement(nfoxml, 'trailer') trailer.text = toUnicode(data['files']['trailer'][0]) # Add file metadata fileinfo = SubElement(nfoxml, 'fileinfo') streamdetails = SubElement(fileinfo, 'streamdetails') # Video data if data['meta_data'].get('video'): video = SubElement(streamdetails, 'video') codec = SubElement(video, 'codec') codec.text = toUnicode(data['meta_data']['video']) aspect = SubElement(video, 'aspect') aspect.text = str(data['meta_data']['aspect']) width = SubElement(video, 'width') width.text = str(data['meta_data']['resolution_width']) height = SubElement(video, 'height') height.text = str(data['meta_data']['resolution_height']) # Audio data if data['meta_data'].get('audio'): audio = SubElement(streamdetails, 'audio') codec = SubElement(audio, 'codec') codec.text = toUnicode(data['meta_data'].get('audio')) channels = SubElement(audio, 'channels') channels.text = toUnicode(data['meta_data'].get('audio_channels')) # Clean up the xml and return it nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) xml_string = nfoxml.toprettyxml(indent = ' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) xml_string = text_re.sub('>\g<1></', xml_string) return xml_string.encode('utf-8') config = [{ 'name': 'wdtvlive', 'groups': [ { 'tab': 'renamer', 'subtab': 'metadata', 'name': 'wdtvlive_metadata', 'label': 'WDTV Live', 'description': 'Metadata for WDTV', 'options': [ { 'name': 'meta_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'meta_nfo', 'label': 'NFO', 'default': True, 'type': 'bool', 'description': 'Generate metadata xml', }, { 'name': 'meta_thumbnail', 'label': 'Thumbnail', 'default': True, 'type': 'bool', 'description': 'Generate thumbnail jpg', } ], }, ], }]
7,626
Python
.py
186
28.908602
116
0.531398
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,863
ps3.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/metadata/ps3.py
import os from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData autoload = 'SonyPS3' class SonyPS3(MovieMetaData): def getThumbnailName(self, name, root, i): return os.path.join(root, 'cover.jpg') config = [{ 'name': 'sonyps3', 'groups': [ { 'tab': 'renamer', 'subtab': 'metadata', 'name': 'sonyps3_metadata', 'label': 'Sony PS3', 'description': 'Generate cover.jpg', 'options': [ { 'name': 'meta_enabled', 'default': False, 'type': 'enabler', }, ], }, ], }]
708
Python
.py
25
18.36
78
0.484444
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,864
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/metadata/base.py
import os import shutil import traceback from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import sp, toUnicode from couchpotato.core.helpers.variable import getIdentifier, underscoreToCamel from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.metadata.base import MetaDataBase from couchpotato.environment import Env log = CPLog(__name__) class MovieMetaData(MetaDataBase): enabled_option = 'meta_enabled' def __init__(self): addEvent('renamer.after', self.create) def create(self, message = None, group = None): if self.isDisabled(): return if not group: group = {} log.info('Creating %s metadata.', self.getName()) # Update library to get latest info try: group['media'] = fireEvent('movie.update', group['media'].get('_id'), identifier = getIdentifier(group['media']), extended = True, single = True) except: log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc()) root_name = toUnicode(self.getRootName(group)) meta_name = toUnicode(os.path.basename(root_name)) root = toUnicode(os.path.dirname(root_name)) movie_info = group['media'].get('info') for file_type in ['nfo']: try: self._createType(meta_name, root, movie_info, group, file_type, 0) except: log.error('Unable to create %s file: %s', ('nfo', traceback.format_exc())) for file_type in ['thumbnail', 'fanart', 'banner', 'disc_art', 'logo', 'clear_art', 'landscape', 'extra_thumbs', 'extra_fanart']: try: if file_type == 'thumbnail': num_images = len(movie_info['images']['poster_original']) elif file_type == 'fanart': num_images = len(movie_info['images']['backdrop_original']) else: num_images = len(movie_info['images'][file_type]) for i in range(num_images): self._createType(meta_name, root, movie_info, group, file_type, i) except: log.error('Unable to create %s file: %s', (file_type, traceback.format_exc())) def _createType(self, meta_name, root, movie_info, group, file_type, i): # Get file path camelcase_method = underscoreToCamel(file_type.capitalize()) name = getattr(self, 'get' + camelcase_method + 'Name')(meta_name, root, i) if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None): # Get file content content = getattr(self, 'get' + camelcase_method)(movie_info = movie_info, data = group, i = i) if content: log.debug('Creating %s file: %s', (file_type, name)) if os.path.isfile(content): content = sp(content) name = sp(name) if not os.path.exists(os.path.dirname(name)): os.makedirs(os.path.dirname(name)) shutil.copy2(content, name) shutil.copyfile(content, name) # Try and copy stats seperately try: shutil.copystat(content, name) except: pass else: self.createFile(name, content) group['renamed_files'].append(name) try: os.chmod(sp(name), Env.getPermission('file')) except: log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc())) def getRootName(self, data = None): if not data: data = {} return os.path.join(data['destination_dir'], data['filename']) def getFanartName(self, name, root, i): return def getThumbnailName(self, name, root, i): return def getBannerName(self, name, root, i): return def getClearArtName(self, name, root, i): return def getLogoName(self, name, root, i): return def getDiscArtName(self, name, root, i): return def getLandscapeName(self, name, root, i): return def getExtraThumbsName(self, name, root, i): return def getExtraFanartName(self, name, root, i): return def getNfoName(self, name, root, i): return def getNfo(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original', i = 0): if not data: data = {} if not movie_info: movie_info = {} # See if it is in current files files = data['media'].get('files') if files.get('image_' + wanted_file_type): if os.path.isfile(files['image_' + wanted_file_type][i]): return files['image_' + wanted_file_type][i] # Download using existing info try: images = movie_info['images'][wanted_file_type] file_path = fireEvent('file.download', url = images[i], single = True) return file_path except: pass def getFanart(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original', i = i) def getBanner(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'banner', i = i) def getClearArt(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'clear_art', i = i) def getLogo(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'logo', i = i) def getDiscArt(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'disc_art', i = i) def getLandscape(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data= data, wanted_file_type = 'landscape', i = i) def getExtraThumbs(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_thumbs', i = i) def getExtraFanart(self, movie_info = None, data = None, i = 0): if not data: data = {} if not movie_info: movie_info = {} return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_fanart', i = i)
7,307
Python
.py
142
40.873239
157
0.591994
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,865
xbmc.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/metadata/xbmc.py
from xml.etree.ElementTree import Element, SubElement, tostring import os import re import traceback import xml.dom.minidom import time from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import getTitle from couchpotato.core.logger import CPLog log = CPLog(__name__) autoload = 'XBMC' class XBMC(MovieMetaData): def getFanartName(self, name, root, i): return self.createMetaName(self.conf('meta_fanart_name'), name, root) def getThumbnailName(self, name, root, i): return self.createMetaName(self.conf('meta_thumbnail_name'), name, root) def getNfoName(self, name, root, i): return self.createMetaName(self.conf('meta_nfo_name'), name, root) def getBannerName(self, name, root, i): return self.createMetaName(self.conf('meta_banner_name'), name, root) def getClearArtName(self, name, root, i): return self.createMetaName(self.conf('meta_clear_art_name'), name, root) def getLogoName(self, name, root, i): return self.createMetaName(self.conf('meta_logo_name'), name, root) def getDiscArtName(self, name, root, i): return self.createMetaName(self.conf('meta_disc_art_name'), name, root) def getLandscapeName(self, name, root, i): return self.createMetaName(self.conf('meta_landscape_name'), name, root) def getExtraThumbsName(self, name, root, i): return self.createMetaNameMult(self.conf('meta_extra_thumbs_name'), name, root, i) def getExtraFanartName(self, name, root, i): return self.createMetaNameMult(self.conf('meta_extra_fanart_name'), name, root, i) def createMetaName(self, basename, name, root): return os.path.join(root, basename.replace('%s', name)) def createMetaNameMult(self, basename, name, root, i): return os.path.join(root, basename.replace('%s', name).replace('<i>', str(i + 1))) def getNfo(self, movie_info=None, data=None, i=0): if not data: data = {} if not movie_info: movie_info = {} # return imdb url only if self.conf('meta_url_only'): return 'http://www.imdb.com/title/%s/' % toUnicode(data['identifier']) nfoxml = Element('movie') # Title try: el = SubElement(nfoxml, 'title') el.text = toUnicode(getTitle(data)) except: pass # IMDB id try: el = SubElement(nfoxml, 'id') el.text = toUnicode(data['identifier']) except: pass # Runtime try: runtime = SubElement(nfoxml, 'runtime') runtime.text = '%s min' % movie_info.get('runtime') except: pass # mpaa try: mpaa = SubElement(nfoxml, 'mpaa') mpaa.text = toUnicode('Rated %s' % movie_info.get('mpaa')) except: pass # Other values types = ['year', 'originaltitle:original_title', 'outline', 'plot', 'tagline'] for type in types: if ':' in type: name, type = type.split(':') else: name = type try: if movie_info.get(type): el = SubElement(nfoxml, name) el.text = toUnicode(movie_info.get(type, '')) except: pass # Release date try: if movie_info.get('released'): el = SubElement(nfoxml, 'premiered') el.text = time.strftime('%Y-%m-%d', time.strptime(movie_info.get('released'), '%d %b %Y')) except: log.debug('Failed to parse release date %s: %s', (movie_info.get('released'), traceback.format_exc())) # Rating for rating_type in ['imdb', 'rotten', 'tmdb']: try: r, v = movie_info['rating'][rating_type] rating = SubElement(nfoxml, 'rating') rating.text = str(r) votes = SubElement(nfoxml, 'votes') votes.text = str(v) break except: log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) # Genre for genre in movie_info.get('genres', []): genres = SubElement(nfoxml, 'genre') genres.text = toUnicode(genre) # Actors for actor_name in movie_info.get('actor_roles', {}): role_name = movie_info['actor_roles'][actor_name] actor = SubElement(nfoxml, 'actor') name = SubElement(actor, 'name') name.text = toUnicode(actor_name) if role_name: role = SubElement(actor, 'role') role.text = toUnicode(role_name) if movie_info['images']['actors'].get(actor_name): thumb = SubElement(actor, 'thumb') thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name)) # Directors for director_name in movie_info.get('directors', []): director = SubElement(nfoxml, 'director') director.text = toUnicode(director_name) # Writers for writer in movie_info.get('writers', []): writers = SubElement(nfoxml, 'credits') writers.text = toUnicode(writer) # Sets or collections collection_name = movie_info.get('collection') if collection_name: collection = SubElement(nfoxml, 'set') collection.text = toUnicode(collection_name) sorttitle = SubElement(nfoxml, 'sorttitle') sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year')) # Images for image_url in movie_info['images']['poster_original']: image = SubElement(nfoxml, 'thumb') image.text = toUnicode(image_url) image_types = [ ('fanart', 'backdrop_original'), ('banner', 'banner'), ('discart', 'disc_art'), ('logo', 'logo'), ('clearart', 'clear_art'), ('landscape', 'landscape'), ('extrathumb', 'extra_thumbs'), ('extrafanart', 'extra_fanart'), ] for image_type in image_types: sub, type = image_type sub_element = SubElement(nfoxml, sub) for image_url in movie_info['images'][type]: image = SubElement(sub_element, 'thumb') image.text = toUnicode(image_url) # Add trailer if found trailer_found = False if data.get('renamed_files'): for filename in data.get('renamed_files'): if 'trailer' in filename: trailer = SubElement(nfoxml, 'trailer') trailer.text = toUnicode(filename) trailer_found = True if not trailer_found and data['files'].get('trailer'): trailer = SubElement(nfoxml, 'trailer') trailer.text = toUnicode(data['files']['trailer'][0]) # Add file metadata fileinfo = SubElement(nfoxml, 'fileinfo') streamdetails = SubElement(fileinfo, 'streamdetails') # Video data if data['meta_data'].get('video'): video = SubElement(streamdetails, 'video') codec = SubElement(video, 'codec') codec.text = toUnicode(data['meta_data']['video']) aspect = SubElement(video, 'aspect') aspect.text = str(data['meta_data']['aspect']) width = SubElement(video, 'width') width.text = str(data['meta_data']['resolution_width']) height = SubElement(video, 'height') height.text = str(data['meta_data']['resolution_height']) # Audio data if data['meta_data'].get('audio'): audio = SubElement(streamdetails, 'audio') codec = SubElement(audio, 'codec') codec.text = toUnicode(data['meta_data'].get('audio')) channels = SubElement(audio, 'channels') channels.text = toUnicode(data['meta_data'].get('audio_channels')) # Clean up the xml and return it nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) xml_string = nfoxml.toprettyxml(indent = ' ') text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) xml_string = text_re.sub('>\g<1></', xml_string) return xml_string.encode('utf-8') config = [{ 'name': 'xbmc', 'groups': [ { 'tab': 'renamer', 'subtab': 'metadata', 'name': 'xbmc_metadata', 'label': 'Kodi', 'description': 'Enable metadata Kodi can understand', 'options': [ { 'name': 'meta_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'meta_nfo', 'label': 'NFO', 'default': True, 'type': 'bool', }, { 'name': 'meta_nfo_name', 'label': 'NFO filename', 'default': '%s.nfo', 'advanced': True, 'description': '<strong>%s</strong> is the rootname of the movie. For example "/path/to/movie cd1.mkv" will be "/path/to/movie"' }, { 'name': 'meta_url_only', 'label': 'Only IMDB URL', 'default': False, 'advanced': True, 'description': 'Create a nfo with only the IMDB url inside', 'type': 'bool', }, { 'name': 'meta_fanart', 'label': 'Fanart', 'default': True, 'type': 'bool', }, { 'name': 'meta_fanart_name', 'label': 'Fanart filename', 'default': '%s-fanart.jpg', 'advanced': True, }, { 'name': 'meta_thumbnail', 'label': 'Thumbnail', 'default': True, 'type': 'bool', }, { 'name': 'meta_thumbnail_name', 'label': 'Thumbnail filename', 'default': '%s.tbn', 'advanced': True, }, { 'name': 'meta_banner', 'label': 'Banner', 'default': False, 'type': 'bool' }, { 'name': 'meta_banner_name', 'label': 'Banner filename', 'default': 'banner.jpg', 'advanced': True, }, { 'name': 'meta_clear_art', 'label': 'ClearArt', 'default': False, 'type': 'bool' }, { 'name': 'meta_clear_art_name', 'label': 'ClearArt filename', 'default': 'clearart.png', 'advanced': True, }, { 'name': 'meta_disc_art', 'label': 'DiscArt', 'default': False, 'type': 'bool' }, { 'name': 'meta_disc_art_name', 'label': 'DiscArt filename', 'default': 'disc.png', 'advanced': True, }, { 'name': 'meta_landscape', 'label': 'Landscape', 'default': False, 'type': 'bool' }, { 'name': 'meta_landscape_name', 'label': 'Landscape filename', 'default': 'landscape.jpg', 'advanced': True, }, { 'name': 'meta_logo', 'label': 'ClearLogo', 'default': False, 'type': 'bool' }, { 'name': 'meta_logo_name', 'label': 'ClearLogo filename', 'default': 'logo.png', 'advanced': True, }, { 'name': 'meta_extra_thumbs', 'label': 'Extrathumbs', 'default': False, 'type': 'bool' }, { 'name': 'meta_extra_thumbs_name', 'label': 'Extrathumbs filename', 'description': '&lt;i&gt; is the image number, and must be included to have multiple images', 'default': 'extrathumbs/thumb<i>.jpg', 'advanced': True }, { 'name': 'meta_extra_fanart', 'label': 'Extrafanart', 'default': False, 'type': 'bool' }, { 'name': 'meta_extra_fanart_name', 'label': 'Extrafanart filename', 'default': 'extrafanart/extrafanart<i>.jpg', 'description': '&lt;i&gt; is the image number, and must be included to have multiple images', 'advanced': True } ], }, ], }]
13,907
Python
.py
338
26.710059
148
0.479698
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,866
bitsoup.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/bitsoup.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.bitsoup import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'Bitsoup' class Bitsoup(MovieProvider, Base): cat_ids = [ ([17], ['3d']), ([80], ['720p', '1080p']), ([20], ['dvdr']), ([19], ['brrip', 'dvdrip']), ] cat_backup_id = 0 def buildUrl(self, title, media, quality): query = tryUrlencode({ 'search': '"%s" %s' % (title, media['info']['year']), 'cat': self.getCatId(quality)[0], }) return query
715
Python
.py
20
29.7
71
0.618841
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,867
yts.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/yts.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.yts import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'Yts' class Yts(MovieProvider, Base): pass
263
Python
.py
7
35.571429
69
0.814229
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,868
torrentpotato.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/torrentpotato.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import getIdentifier from couchpotato.core.helpers.variable import getTitle from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.torrentpotato import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'TorrentPotato' class TorrentPotato(MovieProvider, Base): def buildUrl(self, media, host): arguments = tryUrlencode({ 'user': host['name'], 'passkey': host['pass_key'], 'imdbid': getIdentifier(media), 'search' : getTitle(media) + ' ' + str(media['info']['year']), }) return '%s?%s' % (host['host'], arguments)
788
Python
.py
17
40.588235
77
0.710183
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,869
torrentbytes.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/torrentbytes.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.torrentbytes import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'TorrentBytes' class TorrentBytes(MovieProvider, Base): pass
291
Python
.py
7
39.428571
76
0.832143
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,870
thepiratebay.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/thepiratebay.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.event import fireEvent from couchpotato.core.media._base.providers.torrent.thepiratebay import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'ThePirateBay' class ThePirateBay(MovieProvider, Base): cat_ids = [ ([209], ['3d']), ([207], ['720p', '1080p', 'bd50']), ([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), ([201, 207], ['brrip']), ([202], ['dvdr']) ] def buildUrl(self, media, page, cats): return ( tryUrlencode('"%s"' % fireEvent('library.query', media, single = True)), page, ','.join(str(x) for x in cats) )
815
Python
.py
21
32.571429
84
0.623096
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,871
ilovetorrents.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/ilovetorrents.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.ilovetorrents import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'ILoveTorrents' class ILoveTorrents(MovieProvider, Base): pass
294
Python
.py
7
39.857143
77
0.833922
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,872
torrentshack.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/torrentshack.py
from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.torrentshack import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'TorrentShack' class TorrentShack(MovieProvider, Base): # TorrentShack movie search categories # Movies/x264 - 300 # Movies/DVD-R - 350 # Movies/XviD - 400 # Full Blu-ray - 970 # # REMUX - 320 (not included) # Movies-HD Pack - 982 (not included) # Movies-SD Pack - 983 (not included) cat_ids = [ ([970, 320], ['bd50']), ([300, 320], ['720p', '1080p']), ([350], ['dvdr']), ([400], ['brrip', 'dvdrip']), ] cat_backup_id = 400 def buildUrl(self, media, quality): query = (tryUrlencode(fireEvent('library.query', media, single = True)), self.getSceneOnly(), self.getCatId(quality)[0]) return query
1,075
Python
.py
29
31.206897
80
0.643888
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,873
morethantv.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/morethantv.py
from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.morethantv import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'MoreThanTV' class MoreThanTV(MovieProvider, Base): pass
389
Python
.py
9
41.333333
74
0.845745
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,874
sceneaccess.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/sceneaccess.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.sceneaccess import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'SceneAccess' class SceneAccess(MovieProvider, Base): cat_ids = [ ([22], ['720p', '1080p']), ([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), ([8], ['dvdr']), ] def buildUrl(self, title, media, quality): cat_id = self.getCatId(quality)[0] url = self.urls['search'] % (cat_id, cat_id) arguments = tryUrlencode({ 'search': '%s %s' % (title, media['info']['year']), 'method': 2, }) query = "%s&%s" % (url, arguments) return query
834
Python
.py
21
33.190476
75
0.61118
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,875
rarbg.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/rarbg.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.rarbg import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'Rarbg' class Rarbg(MovieProvider, Base): pass
270
Python
.py
7
36.428571
69
0.818533
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,876
hd4free.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/hd4free.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.hd4free import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'HD4Free' class HD4Free(MovieProvider, Base): pass
276
Python
.py
7
37.285714
71
0.822642
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,877
hdbits.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/hdbits.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.hdbits import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'HDBits' class HDBits(MovieProvider, Base): pass
273
Python
.py
7
36.857143
70
0.820611
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,878
passthepopcorn.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.passthepopcorn import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'PassThePopcorn' class PassThePopcorn(MovieProvider, Base): quality_search_params = { '2160p': {'resolution': '2160p'}, 'bd50': {'media': 'Blu-ray', 'format': 'BD50'}, '1080p': {'resolution': '1080p'}, '720p': {'resolution': '720p'}, 'brrip': {'resolution': 'anyhd'}, 'dvdr': {'resolution': 'anysd'}, 'dvdrip': {'media': 'DVD'}, 'scr': {'media': 'DVD-Screener'}, 'r5': {'media': 'R5'}, 'tc': {'media': 'TC'}, 'ts': {'media': 'TS'}, 'cam': {'media': 'CAM'} } post_search_filters = { '2160p': {'Resolution': ['2160p']}, 'bd50': {'Codec': ['BD50']}, '1080p': {'Resolution': ['1080p']}, '720p': {'Resolution': ['720p']}, 'brrip': {'Quality': ['High Definition'], 'Container': ['!ISO']}, 'dvdr': {'Codec': ['DVD5', 'DVD9']}, 'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']}, 'scr': {'Source': ['DVD-Screener']}, 'r5': {'Source': ['R5']}, 'tc': {'Source': ['TC']}, 'ts': {'Source': ['TS']}, 'cam': {'Source': ['CAM']} }
1,362
Python
.py
34
32.764706
78
0.514372
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,879
magnetdl.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/magnetdl.py
from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.magnetdl import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'MagnetDL' class MagnetDL(MovieProvider, Base): pass
383
Python
.py
9
40.666667
72
0.843243
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,880
kickasstorrents.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/kickasstorrents.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.kickasstorrents import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'KickAssTorrents' class KickAssTorrents(MovieProvider, Base): pass
300
Python
.py
7
40.714286
79
0.83737
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,881
awesomehd.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/awesomehd.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.awesomehd import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'AwesomeHD' class AwesomeHD(MovieProvider, Base): pass
282
Python
.py
7
38.142857
73
0.826568
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,882
torrentday.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/torrentday.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.torrentday import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'TorrentDay' class TorrentDay(MovieProvider, Base): cat_ids = [ ([11], ['720p', '1080p']), ([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), ([3], ['dvdr']), ([5], ['bd50']), ]
460
Python
.py
12
33.583333
75
0.62754
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,883
bithdtv.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/bithdtv.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.event import fireEvent from couchpotato.core.media._base.providers.torrent.bithdtv import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'BiTHDTV' class BiTHDTV(MovieProvider, Base): cat_ids = [ ([2], ['bd50']), ] cat_backup_id = 7 # Movies def buildUrl(self, media, quality): query = tryUrlencode({ 'search': fireEvent('library.query', media, single = True), 'cat': self.getCatId(quality)[0] }) return query
670
Python
.py
18
31.944444
71
0.7017
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,884
torrentleech.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/torrentleech.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.torrentleech import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'TorrentLeech' class TorrentLeech(MovieProvider, Base): cat_ids = [ ([41, 47], ['2160p']), ([13, 14, 37, 43], ['720p', '1080p']), ([13], ['bd50']), ([8], ['cam']), ([9], ['ts', 'tc']), ([10, 11, 37], ['r5', 'scr']), ([11], ['dvdrip']), ([13, 14, 37, 43], ['brrip']), ([12], ['dvdr']), ] def buildUrl(self, title, media, quality): return ( tryUrlencode(title.replace(':', '')), ','.join([str(x) for x in self.getCatId(quality)]) )
841
Python
.py
23
29.913043
76
0.567734
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,885
torrentz.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/torrentz.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.torrentz import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'Torrentz' class Torrentz(MovieProvider, Base): def buildUrl(self, title, media, quality): return tryUrlencode('%s %s' % (title, media['info']['year']))
447
Python
.py
9
46.777778
72
0.78291
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,886
scenetime.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/scenetime.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.scenetime import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'SceneTime' class SceneTime(MovieProvider, Base): pass
282
Python
.py
7
38.142857
73
0.826568
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,887
alpharatio.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/alpharatio.py
from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.alpharatio import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'AlphaRatio' class AlphaRatio(MovieProvider, Base): # AlphaRatio movie search categories # 10: MovieUHD # 13: MoviePackUHD # 9: MovieHD # 12: MoviePackHD # 8: MovieSD # 11: MoviePackSD cat_ids = [ ([10, 13], ['2160p']), ([9, 12], ['bd50']), ([9, 12], ['720p', '1080p']), ([8, 11], ['dvdr']), ([8, 11], ['brrip', 'dvdrip']), ] cat_backup_id = 8 def buildUrl(self, media, quality): query = (tryUrlencode(fireEvent('library.query', media, single = True)), self.getSceneOnly(), self.getCatId(quality)[0]) return query
979
Python
.py
28
28.928571
80
0.64089
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,888
iptorrents.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/torrent/iptorrents.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.torrent.iptorrents import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'IPTorrents' class IPTorrents(MovieProvider, Base): cat_ids = [ ([87], ['3d']), ([89, 90], ['bd50']), ([48, 20, 62], ['720p', '1080p']), ([100, 101], ['2160p']), ([48, 20], ['brrip']), ([7, 77], ['dvdrip']), ([6], ['dvdr']), ([96], ['cam', 'ts', 'tc', 'r5', 'scr']), ] def buildUrl(self, title, media, quality): query = '"%s" %s' % (title.replace(':', ''), media['info']['year']) return self._buildUrl(query, quality)
737
Python
.py
19
32.578947
75
0.559775
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,889
imdb.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/imdb.py
import traceback import re from bs4 import BeautifulSoup from couchpotato import fireEvent from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import getImdb, splitString, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import MultiProvider from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'IMDB' class IMDB(MultiProvider): def getTypes(self): return [IMDBWatchlist, IMDBAutomation, IMDBCharts] class IMDBBase(Automation, RSS): interval = 1800 charts = { 'theater': { 'order': 1, 'name': 'IMDB - Movies in Theaters', 'url': 'http://www.imdb.com/movies-in-theaters/', }, 'boxoffice': { 'order': 2, 'name': 'IMDB - Box Office', 'url': 'http://www.imdb.com/boxoffice/', }, 'top250': { 'order': 3, 'name': 'IMDB - Top 250 Movies', 'url': 'http://www.imdb.com/chart/top', }, } def getInfo(self, imdb_id): return fireEvent('movie.info', identifier = imdb_id, extended = False, adding = False, merge = True) def getFromURL(self, url): log.debug('Getting IMDBs from: %s', url) html = self.getHTMLData(url) try: split = splitString(html, split_on = "<div class=\"list compact\">")[1] html = splitString(split, split_on = "<div class=\"pages\">")[0] except: try: split = splitString(html, split_on = "<div id=\"main\">") if len(split) < 2: log.error('Failed parsing IMDB page "%s", unexpected html.', url) return [] html = BeautifulSoup(split[1]) for x in ['list compact', 'lister', 'list detail sub-list']: html2 = html.find('div', attrs = { 'class': x }) if html2: html = html2.contents html = ''.join([str(x) for x in html]) break except: log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc())) html = ss(html) imdbs = getImdb(html, multiple = True) if html else [] return imdbs class IMDBWatchlist(IMDBBase): enabled_option = 'automation_enabled' def getIMDBids(self): movies = [] watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] watchlist_urls = splitString(self.conf('automation_urls')) index = -1 for watchlist_url in watchlist_urls: try: # Get list ID ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url) if len(ids) == 1: watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[0] # Try find user id with watchlist else: userids = re.findall('(ur\d{7,9})', watchlist_url) if len(userids) == 1: watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[0] except: log.error('Failed getting id from watchlist: %s', traceback.format_exc()) index += 1 if not watchlist_enablers[index]: continue start = 0 while True: try: w_url = '%s&start=%s' % (watchlist_url, start) imdbs = self.getFromURL(w_url) for imdb in imdbs: if imdb not in movies: movies.append(imdb) if self.shuttingDown(): break log.debug('Found %s movies on %s', (len(imdbs), w_url)) if len(imdbs) < 225: break start = len(movies) except: log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) break return movies class IMDBAutomation(IMDBBase): enabled_option = 'automation_providers_enabled' def getIMDBids(self): movies = [] for name in self.charts: chart = self.charts[name] url = chart.get('url') if self.conf('automation_charts_%s' % name): imdb_ids = self.getFromURL(url) try: for imdb_id in imdb_ids: info = self.getInfo(imdb_id) if info and self.isMinimalMovie(info): movies.append(imdb_id) if self.shuttingDown(): break except: log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) return movies class IMDBCharts(IMDBBase): def getChartList(self): # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) movie_lists = [] max_items = 10 for name in self.charts: chart = self.charts[name].copy() cache_key = 'imdb.chart_display_%s' % name if self.conf('chart_display_%s' % name): cached = self.getCache(cache_key) if cached: chart['list'] = cached movie_lists.append(chart) continue url = chart.get('url') chart['list'] = [] imdb_ids = self.getFromURL(url) try: for imdb_id in imdb_ids[0:max_items]: is_movie = fireEvent('movie.is_movie', identifier = imdb_id, adding = False, single = True) if not is_movie: continue info = self.getInfo(imdb_id) chart['list'].append(info) if self.shuttingDown(): break except: log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) self.setCache(cache_key, chart['list'], timeout = 259200) if chart['list']: movie_lists.append(chart) return movie_lists config = [{ 'name': 'imdb', 'groups': [ { 'tab': 'automation', 'list': 'watchlist_providers', 'name': 'imdb_automation_watchlist', 'label': 'IMDB', 'description': 'From any <strong>public</strong> IMDB watchlists.', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_urls_use', 'label': 'Use', }, { 'name': 'automation_urls', 'label': 'url', 'type': 'combined', 'combine': ['automation_urls_use', 'automation_urls'], }, ], }, { 'tab': 'automation', 'list': 'automation_providers', 'name': 'imdb_automation_charts', 'label': 'IMDB', 'description': 'Import movies from IMDB Charts', 'options': [ { 'name': 'automation_providers_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_charts_theater', 'type': 'bool', 'label': 'In Theaters', 'description': 'New Movies <a href="http://www.imdb.com/movies-in-theaters/" target="_blank">In-Theaters</a> chart', 'default': True, }, { 'name': 'automation_charts_top250', 'type': 'bool', 'label': 'TOP 250', 'description': 'IMDB <a href="http://www.imdb.com/chart/top/" target="_blank">TOP 250</a> chart', 'default': False, }, { 'name': 'automation_charts_boxoffice', 'type': 'bool', 'label': 'Box office TOP 10', 'description': 'IMDB Box office <a href="http://www.imdb.com/chart/" target="_blank">TOP 10</a> chart', 'default': True, }, ], }, { 'tab': 'display', 'list': 'charts_providers', 'name': 'imdb_charts_display', 'label': 'IMDB', 'description': 'Display movies from IMDB Charts', 'options': [ { 'name': 'chart_display_enabled', 'default': True, 'type': 'enabler', }, { 'name': 'chart_display_theater', 'type': 'bool', 'label': 'In Theaters', 'description': 'New Movies <a href="http://www.imdb.com/movies-in-theaters/" target="_blank">In-Theaters</a> chart', 'default': False, }, { 'name': 'chart_display_top250', 'type': 'bool', 'label': 'TOP 250', 'description': 'IMDB <a href="http://www.imdb.com/chart/top/" target="_blank">TOP 250</a> chart', 'default': False, }, { 'name': 'chart_display_boxoffice', 'type': 'bool', 'label': 'Box office TOP 10', 'description': 'IMDB Box office <a href="http://www.imdb.com/chart/" target="_blank">TOP 10</a> chart', 'default': True, }, ], }, ], }]
10,524
Python
.py
253
26.158103
136
0.464825
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,890
hummingbird.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/hummingbird.py
from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'Hummingbird' class Hummingbird(Automation): def getIMDBids(self): movies = [] for movie in self.getWatchlist(): imdb = self.search(movie[0], movie[1]) if imdb: movies.append(imdb['imdb']) return movies def getWatchlist(self): if not self.conf('automation_username'): log.error('You need to fill in a username') return [] url = "http://hummingbird.me/api/v1/users/%s/library" % self.conf('automation_username') data = self.getJsonData(url) chosen_filter = { 'automation_list_current': 'currently-watching', 'automation_list_plan': 'plan-to-watch', 'automation_list_completed': 'completed', 'automation_list_hold': 'on-hold', 'automation_list_dropped': 'dropped', } chosen_lists = [] for x in chosen_filter: if self.conf(x): chosen_lists.append(chosen_filter[x]) entries = [] for item in data: if item['anime']['show_type'] != 'Movie' or item['status'] not in chosen_lists: continue title = item['anime']['title'] year = item['anime']['started_airing'] if year: year = year[:4] entries.append([title, year]) return entries config = [{ 'name': 'hummingbird', 'groups': [ { 'tab': 'automation', 'list': 'watchlist_providers', 'name': 'hummingbird_automation', 'label': 'Hummingbird', 'description': 'Import movies from your Hummingbird.me lists', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_username', 'label': 'Username', }, { 'name': 'automation_list_current', 'type': 'bool', 'label': 'Currently Watching', 'default': False, }, { 'name': 'automation_list_plan', 'type': 'bool', 'label': 'Plan to Watch', 'default': True, }, { 'name': 'automation_list_completed', 'type': 'bool', 'label': 'Completed', 'default': False, }, { 'name': 'automation_list_hold', 'type': 'bool', 'label': 'On Hold', 'default': False, }, { 'name': 'automation_list_dropped', 'type': 'bool', 'label': 'Dropped', 'default': False, }, ], }, ], }]
2,345
Python
.py
92
21.119565
90
0.619813
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,891
crowdai.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/crowdai.py
import re from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'CrowdAI' class CrowdAI(Automation, RSS): interval = 1800 def getIMDBids(self): movies = [] urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))])) for url in urls: if not urls[url]: continue rss_movies = self.getRSSData(url) for movie in rss_movies: description = self.getTextElement(movie, 'description') grabs = 0 for item in movie: if item.attrib.get('name') == 'grabs': grabs = item.attrib.get('value') break if int(grabs) > tryInt(self.conf('number_grabs')): title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1) log.info2('%s grabs for movie: %s, enqueue...', (grabs, title)) year = re.match(r'.*Year: (\d{4}).*', description).group(1) imdb = self.search(title, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies config = [{ 'name': 'crowdai', 'groups': [ { 'tab': 'automation', 'list': 'automation_providers', 'name': 'crowdai_automation', 'label': 'CrowdAI', 'description': ('Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie.', 'Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.'), 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_urls_use', 'label': 'Use', 'default': '1', }, { 'name': 'automation_urls', 'label': 'url', 'type': 'combined', 'combine': ['automation_urls_use', 'automation_urls'], 'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100', }, { 'name': 'number_grabs', 'default': '500', 'label': 'Grab threshold', 'description': 'Number of grabs required', }, ], }, ], }]
2,969
Python
.py
69
28.449275
148
0.490101
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,892
goodfilms.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/goodfilms.py
from bs4 import BeautifulSoup from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'Goodfilms' class Goodfilms(Automation): url = 'https://goodfil.ms/%s/queue?page=%d&without_layout=1' interval = 1800 def getIMDBids(self): if not self.conf('automation_username'): log.error('Please fill in your username') return [] movies = [] for movie in self.getWatchlist(): imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) movies.append(imdb_id) return movies def getWatchlist(self): movies = [] page = 1 while True: url = self.url % (self.conf('automation_username'), page) data = self.getHTMLData(url) soup = BeautifulSoup(data) this_watch_list = soup.find_all('div', attrs = { 'class': 'movie', 'data-film-title': True }) if not this_watch_list: # No Movies break for movie in this_watch_list: movies.append({ 'title': movie['data-film-title'], 'year': movie['data-film-year'] }) if not 'next page' in data.lower(): break page += 1 return movies config = [{ 'name': 'goodfilms', 'groups': [ { 'tab': 'automation', 'list': 'watchlist_providers', 'name': 'goodfilms_automation', 'label': 'Goodfilms', 'description': 'import movies from your <a href="http://goodfil.ms" target="_blank">Goodfilms</a> queue', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_username', 'label': 'Username', }, ], }, ], }]
2,151
Python
.py
62
22.887097
117
0.499758
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,893
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/__init__.py
config = [{ 'name': 'automation_providers', 'groups': [ { 'label': 'Watchlists', 'description': 'Check watchlists for new movies', 'type': 'list', 'name': 'watchlist_providers', 'tab': 'automation', 'options': [], }, { 'label': 'Automated', 'description': 'Uses minimal requirements', 'type': 'list', 'name': 'automation_providers', 'tab': 'automation', 'options': [], }, ], }]
566
Python
.py
21
17
61
0.434862
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,894
letterboxd.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/letterboxd.py
import re from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt, splitString, removeEmpty from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'Letterboxd' class Letterboxd(Automation): url = 'http://letterboxd.com/%s/watchlist/page/%d/' pattern = re.compile(r'(.*)\((\d*)\)') interval = 1800 def getIMDBids(self): urls = splitString(self.conf('automation_urls')) if len(urls) == 0: return [] movies = [] for movie in self.getWatchlist(): imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) movies.append(imdb_id) return movies def getWatchlist(self): enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] urls = splitString(self.conf('automation_urls')) index = -1 movies = [] for username in urls: index += 1 if not enablers[index]: continue soup = BeautifulSoup(self.getHTMLData(self.url % (username, 1))) pagination = soup.find_all('li', attrs={'class': 'paginate-page'}) number_of_pages = tryInt(pagination[-1].find('a').get_text()) if pagination else 1 pages = range(1, number_of_pages) for page in pages: soup = BeautifulSoup(self.getHTMLData(self.url % (username, page))) movies += self.getMoviesFromHTML(soup) return movies def getMoviesFromHTML(self, html): movies = [] for movie in html.find_all('li', attrs={'class': 'poster-container'}): img = movie.find('img') title = img.get('alt') movies.append({ 'title': title }) return movies config = [{ 'name': 'letterboxd', 'groups': [ { 'tab': 'automation', 'list': 'watchlist_providers', 'name': 'letterboxd_automation', 'label': 'Letterboxd', 'description': 'Import movies from any public <a href="http://letterboxd.com/" target="_blank">Letterboxd</a> watchlist', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_urls_use', 'label': 'Use', }, { 'name': 'automation_urls', 'label': 'Username', 'type': 'combined', 'combine': ['automation_urls_use', 'automation_urls'], }, ], }, ], }]
2,864
Python
.py
75
26.746667
133
0.530051
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,895
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/base.py
import time import unicodedata from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.automation.base import AutomationBase from couchpotato.environment import Env from couchpotato.core.helpers.variable import splitString log = CPLog(__name__) class Automation(AutomationBase): enabled_option = 'automation_enabled' chart_enabled_option = 'chart_display_enabled' http_time_between_calls = 2 interval = 1800 last_checked = 0 def __init__(self): addEvent('automation.get_movies', self._getMovies) addEvent('automation.get_chart_list', self._getChartList) def _getMovies(self): if self.isDisabled(): return if not self.canCheck(): log.debug('Just checked, skipping %s', self.getName()) return [] self.last_checked = time.time() return self.getIMDBids() def _getChartList(self): if not (self.conf(self.chart_enabled_option) or self.conf(self.chart_enabled_option) is None): return return self.getChartList() def search(self, name, year = None, imdb_only = False): try: cache_name = name.decode('utf-8').encode('ascii', 'ignore') except UnicodeEncodeError: cache_name = unicodedata.normalize('NFKD', name).encode('ascii','ignore') prop_name = 'automation.cached.%s.%s' % (cache_name, year) cached_imdb = Env.prop(prop_name, default = False) if cached_imdb and imdb_only: return cached_imdb result = fireEvent('movie.search', q = '%s %s' % (name, year if year else ''), limit = 1, merge = True) if len(result) > 0: if imdb_only and result[0].get('imdb'): Env.prop(prop_name, result[0].get('imdb')) return result[0].get('imdb') if imdb_only else result[0] else: return None def isMinimalMovie(self, movie): if not movie.get('rating'): log.info('ignoring %s as no rating is available for.', (movie['original_title'])) return False if movie['rating'] and type(movie['rating']) is not float and movie['rating'].get('imdb'): movie['votes'] = movie['rating']['imdb'][1] movie['rating'] = movie['rating']['imdb'][0] for minimal_type in ['year', 'rating', 'votes']: type_value = movie.get(minimal_type, 0) type_min = self.getMinimal(minimal_type) if type_value < type_min: log.info('%s too low for %s, need %s has %s', (minimal_type, movie['original_title'], type_min, type_value)) return False movie_genres = [genre.lower() for genre in movie['genres']] required_genres = splitString(self.getMinimal('required_genres').lower()) ignored_genres = splitString(self.getMinimal('ignored_genres').lower()) req_match = 0 for req_set in required_genres: req = splitString(req_set, '&') req_match += len(list(set(movie_genres) & set(req))) == len(req) if self.getMinimal('required_genres') and req_match == 0: log.info2('Required genre(s) missing for %s', movie['original_title']) return False for ign_set in ignored_genres: ign = splitString(ign_set, '&') if len(list(set(movie_genres) & set(ign))) == len(ign): log.info2('%s has blacklisted genre(s): %s', (movie['original_title'], ign)) return False return True def getMinimal(self, min_type): return Env.setting(min_type, 'automation') def getIMDBids(self): return [] def getChartList(self): # Example return: [ {'name': 'Display name of list', 'url': 'http://example.com/', 'order': 1, 'list': []} ] return def canCheck(self): return time.time() > self.last_checked + self.interval
4,003
Python
.py
83
38.771084
124
0.614771
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,896
bluray.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/bluray.py
import traceback from bs4 import BeautifulSoup from couchpotato import fireEvent from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'Bluray' class Bluray(Automation, RSS): interval = 1800 rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml' backlog_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases&page=%s' display_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases' chart_order = 1 def getIMDBids(self): movies = [] if self.conf('backlog'): cookie = {'Cookie': 'listlayout_7=full'} page = 0 while True: page += 1 url = self.backlog_url % page data = self.getHTMLData(url, headers = cookie) soup = BeautifulSoup(data) try: # Stop if the release year is before the minimal year brk = False h3s = soup.body.find_all('h3') for h3 in h3s: if h3.parent.name != 'a': try: page_year = tryInt(h3.get_text()[-4:]) if page_year > 0 and page_year < self.getMinimal('year'): brk = True except: log.error('Failed determining page year: %s', traceback.format_exc()) brk = True break if brk: break for h3 in h3s: try: if h3.parent.name == 'a': name = h3.get_text().lower().split('blu-ray')[0].strip() if not name.find('/') == -1: # make sure it is not a double movie release continue if not h3.parent.parent.small: # ignore non-movie tables continue year = h3.parent.parent.small.get_text().split('|')[1].strip() if tryInt(year) < self.getMinimal('year'): continue imdb = self.search(name, year) if imdb: if self.isMinimalMovie(imdb): movies.append(imdb['imdb']) except: log.debug('Error parsing movie html: %s', traceback.format_exc()) break except: log.debug('Error loading page %s: %s', (page, traceback.format_exc())) break self.conf('backlog', value = False) rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() if not name.find('/') == -1: # make sure it is not a double movie release continue if tryInt(year) < self.getMinimal('year'): continue imdb = self.search(name, year) if imdb: if self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies def getChartList(self): cache_key = 'bluray.charts' movie_list = { 'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': self.getCache(cache_key) or [] } if not movie_list['list']: movie_ids = [] max_items = 10 rss_movies = self.getRSSData(self.rss_url) for movie in rss_movies: name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() if not name.find('/') == -1: # make sure it is not a double movie release continue movie = self.search(name, year) if movie: if movie.get('imdb') in movie_ids: continue is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True) if not is_movie: continue movie_ids.append(movie.get('imdb')) movie_list['list'].append( movie ) if len(movie_list['list']) >= max_items: break if not movie_list['list']: return self.setCache(cache_key, movie_list['list'], timeout = 259200) return [movie_list] config = [{ 'name': 'bluray', 'groups': [ { 'tab': 'automation', 'list': 'automation_providers', 'name': 'bluray_automation', 'label': 'Blu-ray.com', 'description': 'Imports movies from blu-ray.com.', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'backlog', 'advanced': True, 'description': ('Parses the history until the minimum movie year is reached. (Takes a while)', 'Will be disabled once it has completed'), 'default': False, 'type': 'bool', }, ], }, { 'tab': 'display', 'list': 'charts_providers', 'name': 'bluray_charts_display', 'label': 'Blu-ray.com', 'description': 'Display <a href="http://www.blu-ray.com/movies/movies.php?show=newreleases" target="_blank">new releases</a> from Blu-ray.com', 'options': [ { 'name': 'chart_display_enabled', 'default': True, 'type': 'enabler', }, ], }, ], }]
6,634
Python
.py
148
27.662162
157
0.460261
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,897
popularmovies.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/popularmovies.py
from couchpotato import fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'PopularMovies' class PopularMovies(Automation): interval = 1800 url = 'https://s3.amazonaws.com/popular-movies/movies.json' def getIMDBids(self): movies = [] retrieved_movies = self.getJsonData(self.url) if retrieved_movies: for movie in retrieved_movies: imdb_id = movie.get('imdb_id') info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True) if self.isMinimalMovie(info): movies.append(imdb_id) return movies config = [{ 'name': 'popularmovies', 'groups': [ { 'tab': 'automation', 'list': 'automation_providers', 'name': 'popularmovies_automation', 'label': 'Popular Movies', 'description': 'Imports the <a href="http://movies.stevenlu.com/" target="_blank">top titles of movies that have been in theaters</a>. Script provided by <a href="https://github.com/sjlu/popular-movies" target="_blank">Steven Lu</a>', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, ], }, ], }]
1,465
Python
.py
37
29.432432
246
0.577982
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,898
itunes.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/itunes.py
from xml.etree.ElementTree import QName import datetime import traceback import xml.etree.ElementTree as XMLTree from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import md5, splitString, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'ITunes' class ITunes(Automation, RSS): interval = 1800 def getIMDBids(self): movies = [] enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] urls = splitString(self.conf('automation_urls')) namespace = 'http://www.w3.org/2005/Atom' namespace_im = 'http://itunes.apple.com/rss' index = -1 for url in urls: index += 1 if len(enablers) == 0 or len(enablers) < index or not enablers[index]: continue try: cache_key = 'itunes.rss.%s' % md5(url) rss_data = self.getCache(cache_key, url) data = XMLTree.fromstring(rss_data) if data is not None: entry_tag = str(QName(namespace, 'entry')) rss_movies = self.getElements(data, entry_tag) for movie in rss_movies: name_tag = str(QName(namespace_im, 'name')) name = self.getTextElement(movie, name_tag) releaseDate_tag = str(QName(namespace_im, 'releaseDate')) releaseDateText = self.getTextElement(movie, releaseDate_tag) year = datetime.datetime.strptime(releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y") imdb = self.search(name, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) except: log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc())) return movies config = [{ 'name': 'itunes', 'groups': [ { 'tab': 'automation', 'list': 'automation_providers', 'name': 'itunes_automation', 'label': 'iTunes', 'description': 'From any <a href="http://itunes.apple.com/rss" target="_blank">iTunes</a> Store feed. Url should be the RSS link.', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_urls_use', 'label': 'Use', 'default': ',', }, { 'name': 'automation_urls', 'label': 'url', 'type': 'combined', 'combine': ['automation_urls_use', 'automation_urls'], 'default': 'https://itunes.apple.com/rss/topmovies/limit=25/xml,', }, ], }, ], }]
3,121
Python
.py
73
29.09589
143
0.519841
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,899
flixster.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/flixster.py
from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'Flixster' class Flixster(Automation): url = 'http://www.flixster.com/api/users/%s/movies/ratings?scoreTypes=wts' interval = 60 def getIMDBids(self): ids = splitString(self.conf('automation_ids')) if len(ids) == 0: return [] movies = [] for movie in self.getWatchlist(): imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) movies.append(imdb_id) return movies def getWatchlist(self): enablers = [tryInt(x) for x in splitString(self.conf('automation_ids_use'))] ids = splitString(self.conf('automation_ids')) index = -1 movies = [] for user_id in ids: index += 1 if not enablers[index]: continue data = self.getJsonData(self.url % user_id, decode_from = 'iso-8859-1') for movie in data: movies.append({ 'title': movie['movie']['title'], 'year': movie['movie']['year'] }) return movies config = [{ 'name': 'flixster', 'groups': [ { 'tab': 'automation', 'list': 'watchlist_providers', 'name': 'flixster_automation', 'label': 'Flixster', 'description': 'Import movies from any public <a href="http://www.flixster.com/" target="_blank">Flixster</a> watchlist', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_ids_use', 'label': 'Use', }, { 'name': 'automation_ids', 'label': 'User ID', 'type': 'combined', 'combine': ['automation_ids_use', 'automation_ids'], }, ], }, ], }]
2,259
Python
.py
62
24.387097
133
0.501379
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)