id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
7,900
yifypopular.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/yifypopular.py
import HTMLParser from couchpotato import fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) autoload = 'YTSPopular' class YTSPopular(Automation): interval = 1800 url = 'https://yts.lt/' def getIMDBids(self): movies = [] source = self.getHTMLData(self.url) class MyHTMLParser(HTMLParser): doparse = False dotitle = False doyear = False currentmovie = {'title':"", 'year':""} movies = [] def handle_starttag(self, tag, attrs): for attr in attrs: self.doparse = (attr[0] == "id" and attr[1] == "popular-downloads") or self.doparse self.dotitle = (attr[0] == "class" and attr[1] == "browse-movie-title" and self.doparse) self.doyear = (attr[0] == "class" and attr[1] == "browse-movie-year" and self.doparse) if (attr[0] == "class" and attr[1] == "home-movies"): self.doparse = False def handle_endtag(self, tag): self.dotitle = False self.doyear = False def handle_data(self, data): if (self.doparse): if (self.dotitle): self.dotitle = False self.currentmovie['title'] = data if (self.doyear): self.doyear = False self.currentmovie['year'] = data self.movies.append(self.currentmovie) self.currentmovie = {'title':"", 'year':""} def getMovies(self): return self.movies parser = MyHTMLParser() parser.feed(source) for el in parser.getMovies(): imdb = self.search(el['title'], el['year']) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies config = [{ 'name': 'ytspopular', 'groups': [ { 'tab': 'automation', 'list': 'automation_providers', 'name': 'ytspopular_automation', 'label': 'YTS Popular', 'description': 'Imports popular downloas as currently listed on YTS.', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, ], }, ], }]
2,615
Python
.py
66
26.151515
108
0.502784
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,901
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/trakt/__init__.py
from .main import Trakt def autoload(): return Trakt() config = [{ 'name': 'trakt', 'groups': [ { 'tab': 'automation', 'list': 'watchlist_providers', 'name': 'trakt_automation', 'label': 'Trakt', 'description': 'Import movies from your own watchlist', 'options': [ { 'name': 'automation_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'automation_oauth_token', 'label': 'Auth Token', 'advanced': 1 }, { 'name': 'automation_oauth_refresh', 'label': 'Refresh Token', 'description': ('Used to automatically refresh your oauth token every 3 months', 'To get a refresh token, reconnect with trakt'), 'advanced': 1 }, ], }, ], }]
1,088
Python
.py
34
17.705882
100
0.398095
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,902
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/automation/trakt/main.py
import json import traceback import time from couchpotato import Env, fireEvent from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import Provider from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) class TraktBase(Provider): client_id = '8a54ed7b5e1b56d874642770ad2e8b73e2d09d6e993c3a92b1e89690bb1c9014' api_url = 'https://api-v2launch.trakt.tv/' def call(self, method_url, post_data = None): headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % self.conf('automation_oauth_token'), 'trakt-api-version': 2, 'trakt-api-key': self.client_id, } if post_data: post_data = json.dumps(post_data) data = self.getJsonData(self.api_url + method_url, data = post_data or {}, headers = headers) return data if data else [] class Trakt(Automation, TraktBase): urls = { 'watchlist': 'sync/watchlist/movies?extended=full', 'oauth': 'https://api.couchpota.to/authorize/trakt/', 'refresh_token': 'https://api.couchpota.to/authorize/trakt_refresh/', } def __init__(self): super(Trakt, self).__init__() addApiView('automation.trakt.auth_url', self.getAuthorizationUrl) addApiView('automation.trakt.credentials', self.getCredentials) fireEvent('schedule.interval', 'updater.check', self.refreshToken, hours = 24) addEvent('app.load', self.refreshToken) def refreshToken(self): token = self.conf('automation_oauth_token') refresh_token = self.conf('automation_oauth_refresh') if token and refresh_token: prop_name = 'last_trakt_refresh' last_refresh = int(Env.prop(prop_name, default = 0)) if last_refresh < time.time()-4838400: # refresh every 8 weeks log.debug('Refreshing trakt token') url = self.urls['refresh_token'] + '?token=' + self.conf('automation_oauth_refresh') data = fireEvent('cp.api_call', url, cache_timeout = 0, single = True) if data and 'oauth' in data and 'refresh' in data: log.debug('Oauth refresh: %s', data) self.conf('automation_oauth_token', value = data.get('oauth')) self.conf('automation_oauth_refresh', value = data.get('refresh')) Env.prop(prop_name, value = int(time.time())) else: log.error('Failed refreshing Trakt token, please re-register in settings') elif token and not refresh_token: log.error('Refresh token is missing, please re-register Trakt for autorefresh of the token in the future') def getIMDBids(self): movies = [] for movie in self.getWatchlist(): m = movie.get('movie') m['original_title'] = m['title'] log.debug("Movie: %s", m) if self.isMinimalMovie(m): log.info("Trakt automation: %s satisfies requirements, added", m.get('title')) movies.append(m.get('ids').get('imdb')) continue return movies def getWatchlist(self): return self.call(self.urls['watchlist']) def getAuthorizationUrl(self, host = None, **kwargs): callback_url = cleanHost(host) + '%sautomation.trakt.credentials/' % (Env.get('api_base').lstrip('/')) log.debug('callback_url is %s', callback_url) target_url = self.urls['oauth'] + "?target=" + callback_url log.debug('target_url is %s', target_url) return { 'success': True, 'url': target_url, } def getCredentials(self, **kwargs): try: oauth_token = kwargs.get('oauth') refresh_token = kwargs.get('refresh') log.debug('oauth_token is: %s', oauth_token) self.conf('automation_oauth_token', value = oauth_token) self.conf('automation_oauth_refresh', value = refresh_token) Env.prop('last_trakt_refresh', value = int(time.time())) except: log.error('Failed setting trakt token: %s', traceback.format_exc()) return 'redirect', Env.get('web_base') + 'settings/automation/'
4,482
Python
.py
89
40.292135
118
0.624255
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,903
hdtrailers.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/trailer/hdtrailers.py
from string import digits, ascii_letters import re from bs4 import SoupStrainer, BeautifulSoup from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import mergeDicts, getTitle, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider from requests import HTTPError log = CPLog(__name__) autoload = 'HDTrailers' class HDTrailers(TrailerProvider): urls = { 'api': 'http://www.hd-trailers.net/movie/%s/', 'backup': 'http://www.hd-trailers.net/blog/', } providers = ['apple.ico', 'yahoo.ico', 'moviefone.ico', 'myspace.ico', 'favicon.ico'] only_tables_tags = SoupStrainer('table') def search(self, group): movie_name = getTitle(group) url = self.urls['api'] % self.movieUrlName(movie_name) try: data = self.getCache('hdtrailers.%s' % getIdentifier(group), url, show_error = False) except HTTPError: log.debug('No page found for: %s', movie_name) data = None result_data = {'480p': [], '720p': [], '1080p': []} if not data: return result_data did_alternative = False for provider in self.providers: results = self.findByProvider(data, provider) # Find alternative if results.get('404') and not did_alternative: results = self.findViaAlternative(group) did_alternative = True result_data = mergeDicts(result_data, results) return result_data def findViaAlternative(self, group): results = {'480p': [], '720p': [], '1080p': []} movie_name = getTitle(group) url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) try: data = self.getCache('hdtrailers.alt.%s' % getIdentifier(group), url, show_error = False) except HTTPError: log.debug('No alternative page found for: %s', movie_name) data = None if not data: return results try: html = BeautifulSoup(data, parse_only = self.only_tables_tags) result_table = html.find_all('h2', text = re.compile(movie_name)) for h2 in result_table: if 'trailer' in h2.lower(): parent = h2.parent.parent.parent trailerLinks = parent.find_all('a', text = re.compile('480p|720p|1080p')) try: for trailer in trailerLinks: results[trailer].insert(0, trailer.parent['href']) except: pass except AttributeError: log.debug('No trailers found in via alternative.') return results def findByProvider(self, data, provider): results = {'480p':[], '720p':[], '1080p':[]} try: html = BeautifulSoup(data, parse_only = self.only_tables_tags) result_table = html.find('table', attrs = {'class':'bottomTable'}) for tr in result_table.find_all('tr'): trtext = str(tr).lower() if 'clips' in trtext: break if 'trailer' in trtext and not 'clip' in trtext and provider in trtext and not '3d' in trtext: if 'trailer' not in tr.find('span', 'standardTrailerName').text.lower(): continue resolutions = tr.find_all('td', attrs = {'class':'bottomTableResolution'}) for res in resolutions: if res.a and str(res.a.contents[0]) in results: results[str(res.a.contents[0])].insert(0, res.a['href']) except AttributeError: log.debug('No trailers found in provider %s.', provider) results['404'] = True return results def movieUrlName(self, string): safe_chars = ascii_letters + digits + ' ' r = ''.join([char if char in safe_chars else ' ' for char in string]) name = re.sub('\s+' , '-', r).lower() try: int(name) return '-' + name except: return name
4,275
Python
.py
92
34.684783
110
0.573838
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,904
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/trailer/base.py
from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import Provider log = CPLog(__name__) class TrailerProvider(Provider): type = 'trailer' def __init__(self): addEvent('trailer.search', self.search) def search(self, *args, **kwargs): pass
357
Python
.py
10
31.3
64
0.730205
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,905
omgwtfnzbs.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/nzb/omgwtfnzbs.py
from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.nzb.omgwtfnzbs import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'OMGWTFNZBs' class OMGWTFNZBs(MovieProvider, Base): pass
281
Python
.py
7
38
70
0.825926
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,906
nzbclub.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/nzb/nzbclub.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.event import fireEvent from couchpotato.core.media._base.providers.nzb.nzbclub import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'NZBClub' class NZBClub(MovieProvider, Base): def buildUrl(self, media): q = tryUrlencode({ 'q': '%s' % fireEvent('library.query', media, single = True), }) query = tryUrlencode({ 'ig': 1, 'rpp': 200, 'st': 5, 'sp': 1, 'ns': 1, }) return '%s&%s' % (q, query)
699
Python
.py
20
27.8
73
0.626488
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,907
newznab.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/nzb/newznab.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.nzb.newznab import Base from couchpotato.core.media.movie.providers.base import MovieProvider log = CPLog(__name__) autoload = 'Newznab' class Newznab(MovieProvider, Base): def buildUrl(self, media, host): query = tryUrlencode({ 't': 'movie', 'imdbid': getIdentifier(media).replace('tt', ''), 'apikey': host['api_key'], 'extended': 1 }) if len(host.get('custom_tag', '')) > 0: query = '%s&%s' % (query, host.get('custom_tag')) if len(host['custom_category']) > 0: query = '%s&cat=%s' % (query, host['custom_category']) return query
862
Python
.py
20
35.85
69
0.645858
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,908
binsearch.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/providers/nzb/binsearch.py
from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.nzb.binsearch import Base from couchpotato.core.media.movie.providers.base import MovieProvider from couchpotato.environment import Env log = CPLog(__name__) autoload = 'BinSearch' class BinSearch(MovieProvider, Base): def buildUrl(self, media, quality): query = tryUrlencode({ 'q': getIdentifier(media), 'm': 'n', 'max': 400, 'adv_age': Env.setting('retention', 'nzb'), 'adv_sort': 'date', 'adv_col': 'on', 'adv_nfo': 'on', 'xminsize': quality.get('size_min'), 'xmaxsize': quality.get('size_max'), }) return query
861
Python
.py
22
31.727273
69
0.651079
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,909
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/_base/main.py
import traceback import time from CodernityDB.database import RecordNotFound from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, fireEventAsync, addEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import splitString, getTitle, getImdb, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media.movie import MovieTypeBase import six log = CPLog(__name__) class MovieBase(MovieTypeBase): _type = 'movie' def __init__(self): # Initialize this type super(MovieBase, self).__init__() self.initType() addApiView('movie.add', self.addView, docs = { 'desc': 'Add new movie to the wanted list', 'return': {'type': 'object', 'example': """{ 'success': True, 'movie': object }"""}, 'params': { 'identifier': {'desc': 'IMDB id of the movie your want to add.'}, 'profile_id': {'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.'}, 'force_readd': {'desc': 'Force re-add even if movie already in wanted or manage. Default: True'}, 'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'}, 'title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, } }) addApiView('movie.edit', self.edit, docs = { 'desc': 'Add new movie to the wanted list', 'params': { 'id': {'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)'}, 'profile_id': {'desc': 'ID of quality profile you want the edit the movie to.'}, 'category_id': {'desc': 'ID of category you want the add the movie in. If empty will use no category.'}, 'default_title': {'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.'}, } }) addEvent('movie.add', self.add) addEvent('movie.update', self.update) addEvent('movie.update_release_dates', self.updateReleaseDate) def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None): if not params: params = {} # Make sure it's a correct zero filled imdb id params['identifier'] = getImdb(params.get('identifier', '')) if not params.get('identifier'): msg = 'Can\'t add movie without imdb identifier.' log.error(msg) fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) return False elif not params.get('info'): try: is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), adding = True, single = True) if not is_movie: msg = 'Can\'t add movie, seems to be a TV show.' log.error(msg) fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) return False except: pass info = params.get('info') if not info or (info and len(info.get('titles', [])) == 0): info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier')) # Allow force re-add overwrite from param if 'force_readd' in params: fra = params.get('force_readd') force_readd = fra.lower() not in ['0', '-1'] if not isinstance(fra, bool) else fra # Set default title def_title = self.getDefaultTitle(info) # Default profile and category default_profile = {} if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False): default_profile = fireEvent('profile.default', single = True) cat_id = params.get('category_id') try: db = get_db() media = { '_t': 'media', 'type': 'movie', 'title': def_title, 'identifiers': { 'imdb': params.get('identifier') }, 'status': status if status else 'active', 'profile_id': params.get('profile_id') or default_profile.get('_id'), 'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None, } # Update movie info try: del info['in_wanted'] except: pass try: del info['in_library'] except: pass media['info'] = info new = False previous_profile = None try: m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc'] try: db.get('id', m.get('profile_id')) previous_profile = m.get('profile_id') except RecordNotFound: pass except: log.error('Failed getting previous profile: %s', traceback.format_exc()) except: new = True m = db.insert(media) # Update dict to be usable m.update(media) added = True do_search = False search_after = search_after and self.conf('search_on_add', section = 'moviesearcher') onComplete = None if new: if search_after: onComplete = self.createOnComplete(m['_id']) search_after = False elif force_readd: # Clean snatched history for release in fireEvent('release.for_media', m['_id'], single = True): if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']: if params.get('ignore_previous', False): fireEvent('release.update_status', release['_id'], status = 'ignored') else: fireEvent('release.delete', release['_id'], single = True) m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None) m['last_edit'] = int(time.time()) m['tags'] = [] do_search = True db.update(m) else: try: del params['info'] except: pass log.debug('Movie already exists, not updating: %s', params) added = False # Trigger update info if added and update_after: # Do full update to get images etc fireEventAsync('movie.update', m['_id'], default_title = params.get('title'), on_complete = onComplete) # Remove releases for rel in fireEvent('release.for_media', m['_id'], single = True): if rel['status'] is 'available': db.delete(rel) movie_dict = fireEvent('media.get', m['_id'], single = True) if not movie_dict: log.debug('Failed adding media, can\'t find it anymore') return False if do_search and search_after: onComplete = self.createOnComplete(m['_id']) onComplete() if added and notify_after: if params.get('title'): message = 'Successfully added "%s" to your wanted list.' % params.get('title', '') else: title = getTitle(m) if title: message = 'Successfully added "%s" to your wanted list.' % title else: message = 'Successfully added to your wanted list.' fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message) return movie_dict except: log.error('Failed adding media: %s', traceback.format_exc()) def addView(self, **kwargs): add_dict = self.add(params = kwargs) return { 'success': True if add_dict else False, 'movie': add_dict, } def edit(self, id = '', **kwargs): try: db = get_db() ids = splitString(id) for media_id in ids: try: m = db.get('id', media_id) m['profile_id'] = kwargs.get('profile_id') or m['profile_id'] cat_id = kwargs.get('category_id') if cat_id is not None: m['category_id'] = cat_id if len(cat_id) > 0 else m['category_id'] # Remove releases for rel in fireEvent('release.for_media', m['_id'], single = True): if rel['status'] is 'available': db.delete(rel) # Default title if kwargs.get('default_title'): m['title'] = kwargs.get('default_title') db.update(m) fireEvent('media.restatus', m['_id'], single = True) m = db.get('id', media_id) movie_dict = fireEvent('media.get', m['_id'], single = True) fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id)) except: print traceback.format_exc() log.error('Can\'t edit non-existing media') return { 'success': True, } except: log.error('Failed editing media: %s', traceback.format_exc()) return { 'success': False, } def update(self, media_id = None, identifier = None, default_title = None, extended = False): """ Update movie information inside media['doc']['info'] @param media_id: document id @param default_title: default title, if empty, use first one or existing one @param extended: update with extended info (parses more info, actors, images from some info providers) @return: dict, with media """ if self.shuttingDown(): return lock_key = 'media.get.%s' % media_id if media_id else identifier self.acquireLock(lock_key) media = {} try: db = get_db() if media_id: media = db.get('id', media_id) else: media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc'] info = fireEvent('movie.info', merge = True, extended = extended, identifier = getIdentifier(media)) # Don't need those here try: del info['in_wanted'] except: pass try: del info['in_library'] except: pass if not info or len(info) == 0: log.error('Could not update, no movie info to work with: %s', identifier) return False # Update basic info media['info'] = info titles = info.get('titles', []) log.debug('Adding titles: %s', titles) # Define default title if default_title or media.get('title') == 'UNKNOWN' or len(media.get('title', '')) == 0: media['title'] = self.getDefaultTitle(info, default_title) # Files image_urls = info.get('images', []) self.getPoster(media, image_urls) db.update(media) except: log.error('Failed update media: %s', traceback.format_exc()) self.releaseLock(lock_key) return media def updateReleaseDate(self, media_id): """ Update release_date (eta) info only @param media_id: document id @return: dict, with dates dvd, theater, bluray, expires """ try: db = get_db() media = db.get('id', media_id) if not media.get('info'): media = self.update(media_id) dates = media.get('info', {}).get('release_date') else: dates = media.get('info').get('release_date') if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates: dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True) media['info'].update({'release_date': dates}) db.update(media) return dates except: log.error('Failed updating release dates: %s', traceback.format_exc()) return {}
13,170
Python
.py
272
34.525735
136
0.530063
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,910
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/charts/__init__.py
from .main import Charts def autoload(): return Charts() config = [{ 'name': 'charts', 'groups': [ { 'label': 'Charts', 'description': 'Displays selected charts on the home page', 'type': 'list', 'name': 'charts_providers', 'tab': 'display', 'options': [], }, ], }]
374
Python
.py
16
15.625
71
0.468927
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,911
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/media/movie/charts/main.py
from CodernityDB.database import RecordNotFound from couchpotato import Env, get_db from couchpotato.core.helpers.variable import getTitle, splitString from couchpotato.core.logger import CPLog from couchpotato.api import addApiView from couchpotato.core.event import fireEvent from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) class Charts(Plugin): def __init__(self): addApiView('charts.view', self.automationView) addApiView('charts.ignore', self.ignoreView) def automationView(self, force_update = False, **kwargs): db = get_db() charts = fireEvent('automation.get_chart_list', merge = True) ignored = splitString(Env.prop('charts_ignore', default = '')) # Create a list the movie/list.js can use for chart in charts: medias = [] for media in chart.get('list', []): identifier = media.get('imdb') if identifier in ignored: continue try: try: in_library = db.get('media', 'imdb-%s' % identifier) if in_library: continue except RecordNotFound: pass except: pass # Cache poster posters = media.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'chart', 'title': getTitle(media), 'type': 'movie', 'info': media, 'files': files, 'identifiers': { 'imdb': identifier } }) chart['list'] = medias return { 'success': True, 'count': len(charts), 'charts': charts, 'ignored': ignored, } def ignoreView(self, imdb = None, **kwargs): ignored = splitString(Env.prop('charts_ignore', default = '')) if imdb: ignored.append(imdb) Env.prop('charts_ignore', ','.join(set(ignored))) return { 'result': True }
2,589
Python
.py
63
27.507937
122
0.517365
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,912
qbittorrent_.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/qbittorrent_.py
from base64 import b16encode, b32decode from hashlib import sha1 from datetime import timedelta import os import re from bencode import bencode, bdecode from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import sp from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from qbittorrent.client import QBittorrentClient log = CPLog(__name__) autoload = 'qBittorrent' class qBittorrent(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] qb = None def __init__(self): super(qBittorrent, self).__init__() def connect(self): if self.qb is not None: self.qb.logout() url = cleanHost(self.conf('host'), protocol = True, ssl = False) if self.conf('username') and self.conf('password'): self.qb = QBittorrentClient(url) self.qb.login(username=self.conf('username'), password=self.conf('password')) else: self.qb = QBittorrentClient(url) return self.qb._is_authenticated def test(self): """ Check if connection works :return: bool """ return self.connect() def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} log.debug('Sending "%s" to qBittorrent.', (data.get('name'))) if not self.connect(): return False if not filedata and data.get('protocol') == 'torrent': log.error('Failed sending torrent, no data') return False if data.get('protocol') == 'torrent_magnet': # Send request to qBittorrent directly as a magnet try: self.qb.download_from_link(data.get('url'), label=self.conf('label')) torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() log.info('Torrent [magnet] sent to QBittorrent successfully.') return self.downloadReturnId(torrent_hash) except Exception as e: log.error('Failed to send torrent to qBittorrent: %s', e) return False if data.get('protocol') == 'torrent': info = bdecode(filedata)["info"] torrent_hash = sha1(bencode(info)).hexdigest() # Convert base 32 to hex if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)) # Send request to qBittorrent try: self.qb.download_from_file(filedata, label=self.conf('label')) log.info('Torrent [file] sent to QBittorrent successfully.') return self.downloadReturnId(torrent_hash) except Exception as e: log.error('Failed to send torrent to qBittorrent: %s', e) return False def getTorrentStatus(self, torrent): if torrent['state'] in ('uploading', 'queuedUP', 'stalledUP'): return 'seeding' if torrent['progress'] == 1: return 'completed' return 'busy' def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking qBittorrent download status.') if not self.connect(): return [] try: torrents = self.qb.torrents(status='all', label=self.conf('label')) release_downloads = ReleaseDownloadList(self) for torrent in torrents: if torrent['hash'] in ids: torrent_filelist = self.qb.get_torrent_files(torrent['hash']) torrent_files = [] torrent_dir = os.path.join(torrent['save_path'], torrent['name']) if os.path.isdir(torrent_dir): torrent['save_path'] = torrent_dir if len(torrent_filelist) > 1 and os.path.isdir(torrent_dir): # multi file torrent, path.isdir check makes sure we're not in the root download folder for root, _, files in os.walk(torrent['save_path']): for f in files: torrent_files.append(sp(os.path.join(root, f))) else: # multi or single file placed directly in torrent.save_path for f in torrent_filelist: file_path = os.path.join(torrent['save_path'], f['name']) if os.path.isfile(file_path): torrent_files.append(sp(file_path)) release_downloads.append({ 'id': torrent['hash'], 'name': torrent['name'], 'status': self.getTorrentStatus(torrent), 'seed_ratio': torrent['ratio'], 'original_status': torrent['state'], 'timeleft': str(timedelta(seconds = torrent['eta'])), 'folder': sp(torrent['save_path']), 'files': torrent_files }) return release_downloads except Exception as e: log.error('Failed to get status from qBittorrent: %s', e) return [] def pause(self, release_download, pause = True): if not self.connect(): return False torrent = self.qb.get_torrent(release_download['id']) if torrent is None: return False if pause: return self.qb.pause(release_download['id']) return self.qb.resume(release_download['id']) def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) return self.processComplete(release_download, delete_files = True) def processComplete(self, release_download, delete_files): log.debug('Requesting qBittorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) if not self.connect(): return False torrent = self.qb.get_torrent(release_download['id']) if torrent is None: return False if delete_files: self.qb.delete_permanently(release_download['id']) # deletes torrent with data else: self.qb.delete(release_download['id']) # just removes the torrent, doesn't delete data return True config = [{ 'name': 'qbittorrent', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'qbittorrent', 'label': 'qBittorrent', 'description': 'Use <a href="http://www.qbittorrent.org/" target="_blank">qBittorrent</a> to download torrents.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'torrent', }, { 'name': 'host', 'default': 'http://localhost:8080/', 'description': 'RPC Communication URI. Usually <strong>http://localhost:8080/</strong>' }, { 'name': 'username', }, { 'name': 'password', 'type': 'password', }, { 'name': 'label', 'label': 'Torrent Label', 'default': 'couchpotato', }, { 'name': 'remove_complete', 'label': 'Remove torrent', 'default': False, 'advanced': True, 'type': 'bool', 'description': 'Remove the torrent after it finishes seeding.', }, { 'name': 'delete_files', 'label': 'Remove files', 'default': True, 'type': 'bool', 'advanced': True, 'description': 'Also remove the leftover files.', }, { 'name': 'paused', 'type': 'bool', 'advanced': True, 'default': False, 'description': 'Add the torrent paused.', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, ], } ], }]
9,887
Python
.py
220
30.8
168
0.531572
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,913
transmission.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/transmission.py
from base64 import b64encode from datetime import timedelta import httplib import json import os.path import re import urllib2 from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import isInt, sp from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost from couchpotato.core.logger import CPLog log = CPLog(__name__) autoload = 'Transmission' class Transmission(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] log = CPLog(__name__) trpc = None def connect(self): # Load host from config and split out port. host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1) if not isInt(host[1]): log.error('Config properties are not filled in correctly, port is missing.') return False self.trpc = TransmissionRPC(host[0], port = host[1], rpc_url = self.conf('rpc_url').strip('/ '), username = self.conf('username'), password = self.conf('password')) return self.trpc def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('protocol'))) if not self.connect(): return False if not filedata and data.get('protocol') == 'torrent': log.error('Failed sending torrent, no data') return False # Set parameters for adding torrent params = { 'paused': self.conf('paused', default = False) } if self.conf('directory'): host = cleanHost(self.conf('host')).rstrip('/').rsplit(':', 1) if os.path.isdir(self.conf('directory')) or not (host[0] == '127.0.0.1' or host[0] == 'localhost'): params['download-dir'] = self.conf('directory').rstrip(os.path.sep) else: log.error('Download directory from Transmission settings: %s doesn\'t exist', self.conf('directory')) # Change parameters of torrent torrent_params = {} if data.get('seed_ratio'): torrent_params['seedRatioLimit'] = tryFloat(data.get('seed_ratio')) torrent_params['seedRatioMode'] = 1 if data.get('seed_time'): torrent_params['seedIdleLimit'] = tryInt(data.get('seed_time')) * 60 torrent_params['seedIdleMode'] = 1 # Send request to Transmission if data.get('protocol') == 'torrent_magnet': remote_torrent = self.trpc.add_torrent_uri(data.get('url'), arguments = params) torrent_params['trackerAdd'] = self.torrent_trackers else: remote_torrent = self.trpc.add_torrent_file(b64encode(filedata), arguments = params) if not remote_torrent: log.error('Failed sending torrent to Transmission') return False data = remote_torrent.get('torrent-added') or remote_torrent.get('torrent-duplicate') # Change settings of added torrents if torrent_params: self.trpc.set_torrent(data['hashString'], torrent_params) log.info('Torrent sent to Transmission successfully.') return self.downloadReturnId(data['hashString']) def test(self): """ Check if connection works :return: bool """ if self.connect() and self.trpc.get_session(): return True return False def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking Transmission download status.') if not self.connect(): return [] release_downloads = ReleaseDownloadList(self) return_params = { 'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files'] } session = self.trpc.get_session() queue = self.trpc.get_alltorrents(return_params) if not (queue and queue.get('torrents')): log.debug('Nothing in queue or error') return [] for torrent in queue['torrents']: if torrent['hashString'] in ids: log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s / incomplete-dir-enabled=%s / incomplete-dir=%s', (torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished'], session['incomplete-dir-enabled'], session['incomplete-dir'])) """ https://trac.transmissionbt.com/browser/branches/2.8x/libtransmission/transmission.h#L1853 0 = Torrent is stopped 1 = Queued to check files 2 = Checking files 3 = Queued to download 4 = Downloading 5 = Queued to seed 6 = Seeding """ status = 'busy' if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'): status = 'failed' elif torrent['status'] == 0 and torrent['percentDone'] == 1 and torrent['isFinished']: status = 'completed' elif torrent['status'] in [5, 6]: status = 'seeding' if session['incomplete-dir-enabled'] and status == 'busy': torrent_folder = session['incomplete-dir'] else: torrent_folder = torrent['downloadDir'] torrent_files = [] for file_item in torrent['files']: torrent_files.append(sp(os.path.join(torrent_folder, file_item['name']))) release_downloads.append({ 'id': torrent['hashString'], 'name': torrent['name'], 'status': status, 'original_status': torrent['status'], 'seed_ratio': torrent['uploadRatio'], 'timeleft': str(timedelta(seconds = torrent['eta'])), 'folder': sp(torrent_folder if len(torrent_files) == 1 else os.path.join(torrent_folder, torrent['name'])), 'files': torrent_files }) return release_downloads def pause(self, release_download, pause = True): if pause: return self.trpc.stop_torrent(release_download['id']) else: return self.trpc.start_torrent(release_download['id']) def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) return self.trpc.remove_torrent(release_download['id'], True) def processComplete(self, release_download, delete_files = False): log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) return self.trpc.remove_torrent(release_download['id'], delete_files) class TransmissionRPC(object): """TransmissionRPC lite library""" def __init__(self, host = 'http://localhost', port = 9091, rpc_url = 'transmission', username = None, password = None): super(TransmissionRPC, self).__init__() self.url = host + ':' + str(port) + '/' + rpc_url + '/rpc' self.tag = 0 self.session_id = 0 self.session = {} if username and password: password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager)) opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')] urllib2.install_opener(opener) elif username or password: log.debug('User or password missing, not using authentication.') self.session = self.get_session() def _request(self, ojson): self.tag += 1 headers = {'x-transmission-session-id': str(self.session_id)} request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers) try: open_request = urllib2.urlopen(request) response = json.loads(open_request.read()) log.debug('request: %s', json.dumps(ojson)) log.debug('response: %s', json.dumps(response)) if response['result'] == 'success': log.debug('Transmission action successful') return response['arguments'] else: log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result']) return False except httplib.InvalidURL as err: log.error('Invalid Transmission host, check your config %s', err) return False except urllib2.HTTPError as err: if err.code == 401: log.error('Invalid Transmission Username or Password, check your config') return False elif err.code == 409: msg = str(err.read()) try: self.session_id = \ re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1) log.debug('X-Transmission-Session-Id: %s', self.session_id) # #resend request with the updated header return self._request(ojson) except: log.error('Unable to get Transmission Session-Id %s', err) else: log.error('TransmissionRPC HTTPError: %s', err) except urllib2.URLError as err: log.error('Unable to connect to Transmission %s', err) def get_session(self): post_data = {'method': 'session-get', 'tag': self.tag} return self._request(post_data) def add_torrent_uri(self, torrent, arguments): arguments['filename'] = torrent post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag} return self._request(post_data) def add_torrent_file(self, torrent, arguments): arguments['metainfo'] = torrent post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag} return self._request(post_data) def set_torrent(self, torrent_id, arguments): arguments['ids'] = torrent_id post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag} return self._request(post_data) def get_alltorrents(self, arguments): post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag} return self._request(post_data) def stop_torrent(self, torrent_id): post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-stop', 'tag': self.tag} return self._request(post_data) def start_torrent(self, torrent_id): post_data = {'arguments': {'ids': torrent_id}, 'method': 'torrent-start', 'tag': self.tag} return self._request(post_data) def remove_torrent(self, torrent_id, delete_local_data): post_data = {'arguments': {'ids': torrent_id, 'delete-local-data': delete_local_data}, 'method': 'torrent-remove', 'tag': self.tag} return self._request(post_data) config = [{ 'name': 'transmission', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'transmission', 'label': 'Transmission', 'description': 'Use <a href="http://www.transmissionbt.com/" target="_blank">Transmission</a> to download torrents.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'torrent', }, { 'name': 'host', 'default': 'http://localhost:9091', 'description': 'Hostname with port. Usually <strong>http://localhost:9091</strong>', }, { 'name': 'rpc_url', 'type': 'string', 'default': 'transmission', 'advanced': True, 'description': 'Change if you don\'t run Transmission RPC at the default url.', }, { 'name': 'username', }, { 'name': 'password', 'type': 'password', }, { 'name': 'directory', 'type': 'directory', 'description': 'Download to this directory. Keep empty for default Transmission download directory.', }, { 'name': 'remove_complete', 'label': 'Remove torrent', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Remove the torrent from Transmission after it finished seeding.', }, { 'name': 'delete_files', 'label': 'Remove files', 'default': True, 'type': 'bool', 'advanced': True, 'description': 'Also remove the leftover files.', }, { 'name': 'paused', 'type': 'bool', 'advanced': True, 'default': False, 'description': 'Add the torrent paused.', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'stalled_as_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Consider a stalled torrent as failed', }, { 'name': 'delete_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Delete a release after the download has failed.', }, ], } ], }]
15,941
Python
.py
325
35.769231
307
0.554484
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,914
sabnzbd.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/sabnzbd.py
from datetime import timedelta from urllib2 import URLError import json import os import traceback from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp from couchpotato.core.helpers.variable import cleanHost, mergeDicts from couchpotato.core.logger import CPLog from couchpotato.environment import Env log = CPLog(__name__) autoload = 'Sabnzbd' class Sabnzbd(DownloaderBase): protocol = ['nzb'] def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} log.info('Sending "%s" to SABnzbd.', data.get('name')) req_params = { 'cat': self.conf('category'), 'mode': 'addurl', 'nzbname': self.createNzbName(data, media), 'priority': self.conf('priority'), } nzb_filename = None if filedata: if len(filedata) < 50: log.error('No proper nzb available: %s', filedata) return False # If it's a .rar, it adds the .rar extension, otherwise it stays .nzb nzb_filename = self.createFileName(data, filedata, media) req_params['mode'] = 'addfile' else: req_params['name'] = data.get('url') try: if nzb_filename and req_params.get('mode') is 'addfile': sab_data = self.call(req_params, files = {'nzbfile': (ss(nzb_filename), filedata)}) else: sab_data = self.call(req_params) except URLError: log.error('Failed sending release, probably wrong HOST: %s', traceback.format_exc(0)) return False except: log.error('Failed sending release, use API key, NOT the NZB key: %s', traceback.format_exc(0)) return False log.debug('Result from SAB: %s', sab_data) nzo_ids = sab_data.get('nzo_ids', []) if sab_data.get('status') and not sab_data.get('error') and isinstance(nzo_ids, list) and len(nzo_ids) > 0: log.info('NZB sent to SAB successfully.') if filedata: return self.downloadReturnId(nzo_ids[0]) else: return True else: log.error('Error getting data from SABNZBd: %s', sab_data) return False def test(self): """ Check if connection works Return message if an old version of SAB is used :return: bool """ try: sab_data = self.call({ 'mode': 'version', }) v = sab_data.split('.') if sab_data != 'develop' and int(v[0]) == 0 and int(v[1]) < 7: return False, 'Your Sabnzbd client is too old, please update to newest version.' # the version check will work even with wrong api key, so we need the next check as well sab_data = self.call({ 'mode': 'queue', }) if not sab_data: return False except: return False return True def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking SABnzbd download status.') # Go through Queue try: queue = self.call({ 'mode': 'queue', }) except: log.error('Failed getting queue: %s', traceback.format_exc(1)) return [] # Go through history items try: history = self.call({ 'mode': 'history', 'limit': 15, }) except: log.error('Failed getting history json: %s', traceback.format_exc(1)) return [] release_downloads = ReleaseDownloadList(self) # Get busy releases for nzb in queue.get('slots', []): if nzb['nzo_id'] in ids: status = 'busy' if 'ENCRYPTED / ' in nzb['filename']: status = 'failed' release_downloads.append({ 'id': nzb['nzo_id'], 'name': nzb['filename'], 'status': status, 'original_status': nzb['status'], 'timeleft': nzb['timeleft'] if not queue['paused'] else -1, }) # Get old releases for nzb in history.get('slots', []): if nzb['nzo_id'] in ids: status = 'busy' if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()): status = 'failed' elif nzb['status'] == 'Completed': status = 'completed' release_downloads.append({ 'id': nzb['nzo_id'], 'name': nzb['name'], 'status': status, 'original_status': nzb['status'], 'timeleft': str(timedelta(seconds = 0)), 'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']), }) return release_downloads def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) try: self.call({ 'mode': 'queue', 'name': 'delete', 'del_files': '1', 'value': release_download['id'] }, use_json = False) self.call({ 'mode': 'history', 'name': 'delete', 'del_files': '1', 'value': release_download['id'] }, use_json = False) except: log.error('Failed deleting: %s', traceback.format_exc(0)) return False return True def processComplete(self, release_download, delete_files = False): log.debug('Requesting SabNZBd to remove the NZB %s.', release_download['name']) try: self.call({ 'mode': 'history', 'name': 'delete', 'del_files': '0', 'value': release_download['id'] }, use_json = False) except: log.error('Failed removing: %s', traceback.format_exc(0)) return False return True def call(self, request_params, use_json = True, **kwargs): url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api?' + tryUrlencode(mergeDicts(request_params, { 'apikey': self.conf('api_key'), 'output': 'json' })) data = self.urlopen(url, timeout = 60, show_error = False, headers = {'User-Agent': Env.getIdentifier()}, **kwargs) if use_json: d = json.loads(data) if d.get('error'): log.error('Error getting data from SABNZBd: %s', d.get('error')) return {} return d.get(request_params['mode']) or d else: return data config = [{ 'name': 'sabnzbd', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'sabnzbd', 'label': 'Sabnzbd', 'description': 'Use <a href="http://sabnzbd.org/" target="_blank">SABnzbd</a> (0.7+) to download NZBs.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'nzb', }, { 'name': 'host', 'default': 'localhost:8080', }, { 'name': 'ssl', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>', }, { 'name': 'api_key', 'label': 'Api Key', 'description': 'Used for all calls to Sabnzbd.', }, { 'name': 'category', 'label': 'Category', 'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>', }, { 'name': 'priority', 'label': 'Priority', 'type': 'dropdown', 'default': '0', 'advanced': True, 'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)], 'description': 'Add to the queue with this priority.', }, { 'name': 'manual', 'default': False, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'remove_complete', 'advanced': True, 'label': 'Remove NZB', 'default': False, 'type': 'bool', 'description': 'Remove the NZB from history after it completed.', }, { 'name': 'delete_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Delete a release after the download has failed.', }, ], } ], }]
10,823
Python
.py
265
27.132075
133
0.492581
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,915
nzbvortex.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/nzbvortex.py
from base64 import b64encode import os from uuid import uuid4 import hashlib import traceback from requests import HTTPError from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import tryUrlencode, sp from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog log = CPLog(__name__) autoload = 'NZBVortex' class NZBVortex(DownloaderBase): protocol = ['nzb'] api_level = None session_id = None def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} # Send the nzb try: nzb_filename = self.createFileName(data, filedata, media, unique_tag = True) response = self.call('nzb/add', files = {'file': (nzb_filename, filedata, 'application/octet-stream')}, parameters = { 'name': nzb_filename, 'groupname': self.conf('group') }) if response and response.get('result', '').lower() == 'ok': return self.downloadReturnId(nzb_filename) log.error('Something went wrong sending the NZB file. Response: %s', response) return False except: log.error('Something went wrong sending the NZB file: %s', traceback.format_exc()) return False def test(self): """ Check if connection works :return: bool """ try: login_result = self.login() except: return False return login_result def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ raw_statuses = self.call('nzb') release_downloads = ReleaseDownloadList(self) for nzb in raw_statuses.get('nzbs', []): nzb_id = os.path.basename(nzb['nzbFileName']) if nzb_id in ids: # Check status status = 'busy' if nzb['state'] == 20: status = 'completed' elif nzb['state'] in [21, 22, 24]: status = 'failed' release_downloads.append({ 'temp_id': nzb['id'], 'id': nzb_id, 'name': nzb['uiTitle'], 'status': status, 'original_status': nzb['state'], 'timeleft': -1, 'folder': sp(nzb['destinationPath']), }) return release_downloads def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) try: self.call('nzb/%s/cancel' % release_download['temp_id']) except: log.error('Failed deleting: %s', traceback.format_exc(0)) return False return True def login(self): nonce = self.call('auth/nonce', auth = False).get('authNonce') cnonce = uuid4().hex hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest()) params = { 'nonce': nonce, 'cnonce': cnonce, 'hash': hashed } login_data = self.call('auth/login', parameters = params, auth = False) # Save for later if login_data.get('loginResult') == 'successful': self.session_id = login_data.get('sessionID') return True log.error('Login failed, please check you api-key') return False def call(self, call, parameters = None, is_repeat = False, auth = True, *args, **kwargs): # Login first if not parameters: parameters = {} if not self.session_id and auth: self.login() # Always add session id to request if self.session_id: parameters['sessionid'] = self.session_id params = tryUrlencode(parameters) url = cleanHost(self.conf('host')) + 'api/' + call try: data = self.getJsonData('%s%s' % (url, '?' + params if params else ''), *args, cache_timeout = 0, show_error = False, **kwargs) if data: return data except HTTPError as e: sc = e.response.status_code if sc == 403: # Try login and do again if not is_repeat: self.login() return self.call(call, parameters = parameters, is_repeat = True, **kwargs) log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) except: log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) return {} def getApiLevel(self): if not self.api_level: try: data = self.call('app/apilevel', auth = False) self.api_level = float(data.get('apilevel')) except HTTPError as e: sc = e.response.status_code if sc == 403: log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher') else: log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1)) return self.api_level def isEnabled(self, manual = False, data = None): if not data: data = {} return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel() config = [{ 'name': 'nzbvortex', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'nzbvortex', 'label': 'NZBVortex', 'description': 'Use <a href="https://www.nzbvortex.com/" target="_blank">NZBVortex</a> to download NZBs.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'nzb', }, { 'name': 'host', 'default': 'https://localhost:4321', 'description': 'Hostname with port. Usually <strong>https://localhost:4321</strong>', }, { 'name': 'api_key', 'label': 'Api Key', }, { 'name': 'group', 'label': 'Group', 'description': 'The group CP places the nzb in. Make sure to create it in NZBVortex.', }, { 'name': 'manual', 'default': False, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'delete_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Delete a release after the download has failed.', }, ], } ], }]
8,221
Python
.py
195
29.517949
145
0.533977
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,916
nzbget.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/nzbget.py
from base64 import standard_b64encode from datetime import timedelta import re import shutil import socket import traceback import xmlrpclib from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import ss, sp from couchpotato.core.helpers.variable import tryInt, md5, cleanHost from couchpotato.core.logger import CPLog log = CPLog(__name__) autoload = 'NZBGet' class NZBGet(DownloaderBase): protocol = ['nzb'] rpc = 'xmlrpc' def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} if not filedata: log.error('Unable to get NZB file: %s', traceback.format_exc()) return False log.info('Sending "%s" to NZBGet.', data.get('name')) nzb_name = ss('%s.nzb' % self.createNzbName(data, media)) rpc = self.getRPC() try: if rpc.writelog('INFO', 'CouchPotato connected to drop off %s.' % nzb_name): log.debug('Successfully connected to NZBGet') else: log.info('Successfully connected to NZBGet, but unable to send a message') except socket.error: log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') return False except xmlrpclib.ProtocolError as e: if e.errcode == 401: log.error('Password is incorrect.') else: log.error('Protocol Error: %s', e) return False if re.search(r"^0", rpc.version()): xml_response = rpc.append(nzb_name, self.conf('category'), False, standard_b64encode(filedata.strip())) else: xml_response = rpc.append(nzb_name, self.conf('category'), tryInt(self.conf('priority')), False, standard_b64encode(filedata.strip())) if xml_response: log.info('NZB sent successfully to NZBGet') nzb_id = md5(data['url']) # about as unique as they come ;) couchpotato_id = "couchpotato=" + nzb_id groups = rpc.listgroups() file_id = [item['LastID'] for item in groups if item['NZBFilename'] == nzb_name] confirmed = rpc.editqueue("GroupSetParameter", 0, couchpotato_id, file_id) if confirmed: log.debug('couchpotato parameter set in nzbget download') return self.downloadReturnId(nzb_id) else: log.error('NZBGet could not add %s to the queue.', nzb_name) return False def test(self): """ Check if connection works :return: bool """ rpc = self.getRPC() try: if rpc.writelog('INFO', 'CouchPotato connected to test connection'): log.debug('Successfully connected to NZBGet') else: log.info('Successfully connected to NZBGet, but unable to send a message') except socket.error: log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') return False except xmlrpclib.ProtocolError as e: if e.errcode == 401: log.error('Password is incorrect.') else: log.error('Protocol Error: %s', e) return False return True def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking NZBGet download status.') rpc = self.getRPC() try: if rpc.writelog('DETAIL', 'CouchPotato connected to check status'): log.debug('Successfully connected to NZBGet') else: log.info('Successfully connected to NZBGet, but unable to send a message') except socket.error: log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') return [] except xmlrpclib.ProtocolError as e: if e.errcode == 401: log.error('Password is incorrect.') else: log.error('Protocol Error: %s', e) return [] # Get NZBGet data try: status = rpc.status() groups = rpc.listgroups() queue = rpc.postqueue(0) history = rpc.history() except: log.error('Failed getting data: %s', traceback.format_exc(1)) return [] release_downloads = ReleaseDownloadList(self) for nzb in groups: try: nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0] except: nzb_id = nzb['NZBID'] if nzb_id in ids: log.debug('Found %s in NZBGet download queue', nzb['NZBFilename']) timeleft = -1 try: if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']): timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20)) except: pass release_downloads.append({ 'id': nzb_id, 'name': nzb['NZBFilename'], 'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED', # Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item 'timeleft': timeleft, }) for nzb in queue: # 'Parameters' is not passed in rpc.postqueue if nzb['NZBID'] in ids: log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename']) release_downloads.append({ 'id': nzb['NZBID'], 'name': nzb['NZBFilename'], 'original_status': nzb['Stage'], 'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1, }) for nzb in history: try: nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'couchpotato'][0] except: nzb_id = nzb['NZBID'] if nzb_id in ids: log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log'])) release_downloads.append({ 'id': nzb_id, 'name': nzb['NZBFilename'], 'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed', 'original_status': nzb['Status'], 'timeleft': str(timedelta(seconds = 0)), 'folder': sp(nzb['DestDir']) }) return release_downloads def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) rpc = self.getRPC() try: if rpc.writelog('INFO', 'CouchPotato connected to delete some history'): log.debug('Successfully connected to NZBGet') else: log.info('Successfully connected to NZBGet, but unable to send a message') except socket.error: log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') return False except xmlrpclib.ProtocolError as e: if e.errcode == 401: log.error('Password is incorrect.') else: log.error('Protocol Error: %s', e) return False try: history = rpc.history() nzb_id = None path = None for hist in history: for param in hist['Parameters']: if param['Name'] == 'couchpotato' and param['Value'] == release_download['id']: nzb_id = hist['ID'] path = hist['DestDir'] if nzb_id and path and rpc.editqueue('HistoryDelete', 0, "", [tryInt(nzb_id)]): shutil.rmtree(path, True) except: log.error('Failed deleting: %s', traceback.format_exc(0)) return False return True def getRPC(self): url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc return xmlrpclib.ServerProxy(url) config = [{ 'name': 'nzbget', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'nzbget', 'label': 'NZBGet', 'description': 'Use <a href="http://nzbget.net" target="_blank">NZBGet</a> to download NZBs.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'nzb', }, { 'name': 'host', 'default': 'localhost:6789', 'description': 'Hostname with port. Usually <strong>localhost:6789</strong>', }, { 'name': 'ssl', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>', }, { 'name': 'username', 'default': 'nzbget', 'advanced': True, 'description': 'Set a different username to connect. Default: nzbget', }, { 'name': 'password', 'type': 'password', 'description': 'Default NZBGet password is <i>tegbzn6789</i>', }, { 'name': 'category', 'default': 'Movies', 'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>', }, { 'name': 'priority', 'advanced': True, 'default': '0', 'type': 'dropdown', 'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100), ('Forced', 900)], 'description': 'Only change this if you are using NZBget 13.0 or higher', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'delete_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Delete a release after the download has failed.', }, ], } ], }]
12,457
Python
.py
272
31.893382
204
0.530769
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,917
utorrent.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/utorrent.py
from base64 import b16encode, b32decode from datetime import timedelta from hashlib import sha1 import cookielib import httplib import json import os import re import stat import time import urllib import urllib2 from bencode import bencode as benc, bdecode from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import isInt, ss, sp from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost from couchpotato.core.logger import CPLog from multipartpost import MultipartPostHandler log = CPLog(__name__) autoload = 'uTorrent' class uTorrent(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] utorrent_api = None status_flags = { 'STARTED': 1, 'CHECKING': 2, 'CHECK-START': 4, 'CHECKED': 8, 'ERROR': 16, 'PAUSED': 32, 'QUEUED': 64, 'LOADED': 128 } def connect(self): # Load host from config and split out port. host = cleanHost(self.conf('host'), protocol = False).split(':') if not isInt(host[1]): log.error('Config properties are not filled in correctly, port is missing.') return False self.utorrent_api = uTorrentAPI(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) return self.utorrent_api def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} log.debug("Sending '%s' (%s) to uTorrent.", (data.get('name'), data.get('protocol'))) if not self.connect(): return False torrent_params = {} if self.conf('label'): torrent_params['label'] = self.conf('label') if not filedata and data.get('protocol') == 'torrent': log.error('Failed sending torrent, no data') return False if data.get('protocol') == 'torrent_magnet': torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() torrent_params['trackers'] = '%0D%0A%0D%0A'.join(self.torrent_trackers) else: info = bdecode(filedata)['info'] torrent_hash = sha1(benc(info)).hexdigest().upper() torrent_filename = self.createFileName(data, filedata, media) if data.get('seed_ratio'): torrent_params['seed_override'] = 1 torrent_params['seed_ratio'] = tryInt(tryFloat(data['seed_ratio']) * 1000) if data.get('seed_time'): torrent_params['seed_override'] = 1 torrent_params['seed_time'] = tryInt(data['seed_time']) * 3600 # Convert base 32 to hex if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)) # Send request to uTorrent if data.get('protocol') == 'torrent_magnet': self.utorrent_api.add_torrent_uri(torrent_filename, data.get('url')) else: self.utorrent_api.add_torrent_file(torrent_filename, filedata) # Change settings of added torrent self.utorrent_api.set_torrent(torrent_hash, torrent_params) if self.conf('paused', default = 0): self.utorrent_api.pause_torrent(torrent_hash) return self.downloadReturnId(torrent_hash) def test(self): """ Check if connection works :return: bool """ if self.connect(): build_version = self.utorrent_api.get_build() if not build_version: return False if build_version < 25406: # This build corresponds to version 3.0.0 stable return False, 'Your uTorrent client is too old, please update to newest version.' return True return False def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking uTorrent download status.') if not self.connect(): return [] release_downloads = ReleaseDownloadList(self) data = self.utorrent_api.get_status() if not data: log.error('Error getting data from uTorrent') return [] queue = json.loads(data) if queue.get('error'): log.error('Error getting data from uTorrent: %s', queue.get('error')) return [] if not queue.get('torrents'): log.debug('Nothing in queue') return [] # Get torrents for torrent in queue['torrents']: if torrent[0] in ids: #Get files of the torrent torrent_files = [] try: torrent_files = json.loads(self.utorrent_api.get_files(torrent[0])) torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]] except: log.debug('Failed getting files from torrent: %s', torrent[2]) status = 'busy' if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000: status = 'seeding' elif torrent[1] & self.status_flags['ERROR'] and 'There is not enough space on the disk' not in torrent[21]: status = 'failed' elif torrent[4] == 1000: status = 'completed' if not status == 'busy': self.removeReadOnly(torrent_files) release_downloads.append({ 'id': torrent[0], 'name': torrent[2], 'status': status, 'seed_ratio': float(torrent[7]) / 1000, 'original_status': torrent[1], 'timeleft': str(timedelta(seconds = torrent[10])), 'folder': sp(torrent[26]), 'files': torrent_files }) return release_downloads def pause(self, release_download, pause = True): if not self.connect(): return False return self.utorrent_api.pause_torrent(release_download['id'], pause) def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) if not self.connect(): return False return self.utorrent_api.remove_torrent(release_download['id'], remove_data = True) def processComplete(self, release_download, delete_files = False): log.debug('Requesting uTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) if not self.connect(): return False return self.utorrent_api.remove_torrent(release_download['id'], remove_data = delete_files) def removeReadOnly(self, files): #Removes all read-on ly flags in a for all files for filepath in files: if os.path.isfile(filepath): #Windows only needs S_IWRITE, but we bitwise-or with current perms to preserve other permission bits on Linux os.chmod(filepath, stat.S_IWRITE | os.stat(filepath).st_mode) class uTorrentAPI(object): def __init__(self, host = 'localhost', port = 8000, username = None, password = None): super(uTorrentAPI, self).__init__() self.url = 'http://' + str(host) + ':' + str(port) + '/gui/' self.token = '' self.last_time = time.time() cookies = cookielib.CookieJar() self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies), MultipartPostHandler) self.opener.addheaders = [('User-agent', 'couchpotato-utorrent-client/1.0')] if username and password: password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager)) elif username or password: log.debug('User or password missing, not using authentication.') self.token = self.get_token() def _request(self, action, data = None): if time.time() > self.last_time + 1800: self.last_time = time.time() self.token = self.get_token() request = urllib2.Request(self.url + '?token=' + self.token + '&' + action, data) try: open_request = self.opener.open(request) response = open_request.read() if response: return response else: log.debug('Unknown failure sending command to uTorrent. Return text is: %s', response) except httplib.InvalidURL as err: log.error('Invalid uTorrent host, check your config %s', err) except urllib2.HTTPError as err: if err.code == 401: log.error('Invalid uTorrent Username or Password, check your config') else: log.error('uTorrent HTTPError: %s', err) except urllib2.URLError as err: log.error('Unable to connect to uTorrent %s', err) return False def get_token(self): request = self.opener.open(self.url + 'token.html') token = re.findall('<div.*?>(.*?)</', request.read())[0] return token def add_torrent_uri(self, filename, torrent, add_folder = False): action = 'action=add-url&s=%s' % urllib.quote(torrent) if add_folder: action += '&path=%s' % urllib.quote(filename) return self._request(action) def add_torrent_file(self, filename, filedata, add_folder = False): action = 'action=add-file' if add_folder: action += '&path=%s' % urllib.quote(filename) return self._request(action, {'torrent_file': (ss(filename), filedata)}) def set_torrent(self, hash, params): action = 'action=setprops&hash=%s' % hash for k, v in params.items(): action += '&s=%s&v=%s' % (k, v) return self._request(action) def pause_torrent(self, hash, pause = True): if pause: action = 'action=pause&hash=%s' % hash else: action = 'action=unpause&hash=%s' % hash return self._request(action) def stop_torrent(self, hash): action = 'action=stop&hash=%s' % hash return self._request(action) def remove_torrent(self, hash, remove_data = False): if remove_data: action = 'action=removedata&hash=%s' % hash else: action = 'action=remove&hash=%s' % hash return self._request(action) def get_status(self): action = 'list=1' return self._request(action) def get_settings(self): action = 'action=getsettings' settings_dict = {} try: utorrent_settings = json.loads(self._request(action)) # Create settings dict for setting in utorrent_settings['settings']: if setting[1] == 0: # int settings_dict[setting[0]] = int(setting[2] if not setting[2].strip() == '' else '0') elif setting[1] == 1: # bool settings_dict[setting[0]] = True if setting[2] == 'true' else False elif setting[1] == 2: # string settings_dict[setting[0]] = setting[2] #log.debug('uTorrent settings: %s', settings_dict) except Exception as err: log.error('Failed to get settings from uTorrent: %s', err) return settings_dict def set_settings(self, settings_dict = None): if not settings_dict: settings_dict = {} for key in settings_dict: if isinstance(settings_dict[key], bool): settings_dict[key] = 1 if settings_dict[key] else 0 action = 'action=setsetting' + ''.join(['&s=%s&v=%s' % (key, value) for (key, value) in settings_dict.items()]) return self._request(action) def get_files(self, hash): action = 'action=getfiles&hash=%s' % hash return self._request(action) def get_build(self): data = self._request('') if not data: return False response = json.loads(data) return int(response.get('build')) config = [{ 'name': 'utorrent', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'utorrent', 'label': 'uTorrent', 'description': 'Use <a href="http://www.utorrent.com/" target="_blank">uTorrent</a> (3.0+) to download torrents.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'torrent', }, { 'name': 'host', 'default': 'localhost:8000', 'description': 'Port can be found in settings when enabling WebUI.', }, { 'name': 'username', }, { 'name': 'password', 'type': 'password', }, { 'name': 'label', 'description': 'Label to add torrent as.', }, { 'name': 'remove_complete', 'label': 'Remove torrent', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Remove the torrent from uTorrent after it finished seeding.', }, { 'name': 'delete_files', 'label': 'Remove files', 'default': True, 'type': 'bool', 'advanced': True, 'description': 'Also remove the leftover files.', }, { 'name': 'paused', 'type': 'bool', 'advanced': True, 'default': False, 'description': 'Add the torrent paused.', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'delete_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Delete a release after the download has failed.', }, ], } ], }]
15,959
Python
.py
358
32.488827
157
0.558125
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,918
synology.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/synology.py
import json import traceback from couchpotato.core._base.downloader.main import DownloaderBase from couchpotato.core.helpers.encoding import isInt from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog import requests log = CPLog(__name__) autoload = 'Synology' class Synology(DownloaderBase): protocol = ['nzb', 'torrent', 'torrent_magnet'] status_support = False def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have fail checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One fail returns false, but the downloader should log his own errors """ if not media: media = {} if not data: data = {} response = False log.info('Sending "%s" (%s) to Synology.', (data['name'], data['protocol'])) # Load host from config and split out port. host = cleanHost(self.conf('host'), protocol = False).split(':') if not isInt(host[1]): log.error('Config properties are not filled in correctly, port is missing.') return False try: # Send request to Synology srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'), self.conf('destination')) if data['protocol'] == 'torrent_magnet': log.info('Adding torrent URL %s', data['url']) response = srpc.create_task(url = data['url']) elif data['protocol'] in ['nzb', 'torrent']: log.info('Adding %s' % data['protocol']) if not filedata: log.error('No %s data found', data['protocol']) else: filename = data['name'] + '.' + data['protocol'] response = srpc.create_task(filename = filename, filedata = filedata) except: log.error('Exception while adding torrent: %s', traceback.format_exc()) finally: return self.downloadReturnId('') if response else False def test(self): """ Check if connection works :return: bool """ host = cleanHost(self.conf('host'), protocol = False).split(':') try: srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password')) test_result = srpc.test() except: return False return test_result def getEnabledProtocol(self): if self.conf('use_for') == 'both': return super(Synology, self).getEnabledProtocol() elif self.conf('use_for') == 'torrent': return ['torrent', 'torrent_magnet'] else: return ['nzb'] def isEnabled(self, manual = False, data = None): if not data: data = {} for_protocol = ['both'] if data and 'torrent' in data.get('protocol'): for_protocol.append('torrent') elif data: for_protocol.append(data.get('protocol')) return super(Synology, self).isEnabled(manual, data) and\ ((self.conf('use_for') in for_protocol)) class SynologyRPC(object): """SynologyRPC lite library""" def __init__(self, host = 'localhost', port = 5000, username = None, password = None, destination = None): super(SynologyRPC, self).__init__() self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port) self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port) self.sid = None self.username = username self.password = password self.destination = destination self.session_name = 'DownloadStation' def _login(self): if self.username and self.password: args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2, 'method': 'login', 'session': self.session_name, 'format': 'sid'} response = self._req(self.auth_url, args) if response['success']: self.sid = response['data']['sid'] log.debug('sid=%s', self.sid) else: log.error('Couldn\'t log into Synology, %s', response) return response['success'] else: log.error('User or password missing, not using authentication.') return False def _logout(self): args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid} return self._req(self.auth_url, args) def _req(self, url, args, files = None): response = {'success': False} try: req = requests.post(url, data = args, files = files, verify = False) req.raise_for_status() response = json.loads(req.text) if response['success']: log.info('Synology action successfull') return response except requests.ConnectionError as err: log.error('Synology connection error, check your config %s', err) except requests.HTTPError as err: log.error('SynologyRPC HTTPError: %s', err) except Exception as err: log.error('Exception: %s', err) finally: return response def create_task(self, url = None, filename = None, filedata = None): """ Creates new download task in Synology DownloadStation. Either specify url or pair (filename, filedata). Returns True if task was created, False otherwise """ result = False # login if self._login(): args = {'api': 'SYNO.DownloadStation.Task', 'version': '1', 'method': 'create', '_sid': self.sid} if self.destination and len(self.destination) > 0: args['destination'] = self.destination if url: log.info('Login success, adding torrent URI') args['uri'] = url response = self._req(self.download_url, args = args) if response['success']: log.info('Response: %s', response) else: log.error('Response: %s', response) synoerrortype = { 400 : 'File upload failed', 401 : 'Max number of tasks reached', 402 : 'Destination denied', 403 : 'Destination does not exist', 404 : 'Invalid task id', 405 : 'Invalid task action', 406 : 'No default destination', 407 : 'Set destination failed', 408 : 'File does not exist' } log.error('DownloadStation returned the following error : %s', synoerrortype[response['error']['code']]) result = response['success'] elif filename and filedata: log.info('Login success, adding torrent') files = {'file': (filename, filedata)} response = self._req(self.download_url, args = args, files = files) log.info('Response: %s', response) result = response['success'] else: log.error('Invalid use of SynologyRPC.create_task: either url or filename+filedata must be specified') self._logout() return result def test(self): return bool(self._login()) config = [{ 'name': 'synology', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'synology', 'label': 'Synology', 'description': 'Use <a href="https://www.synology.com/en-us/dsm/app_packages/DownloadStation" target="_blank">Synology Download Station</a> to download.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'nzb,torrent', }, { 'name': 'host', 'default': 'localhost:5000', 'description': 'Hostname with port. Usually <strong>localhost:5000</strong>', }, { 'name': 'username', }, { 'name': 'password', 'type': 'password', }, { 'name': 'destination', 'description': 'Specify <strong>existing</strong> destination share to where your files will be downloaded, usually <strong>Downloads</strong>', 'advanced': True, }, { 'name': 'use_for', 'label': 'Use for', 'default': 'both', 'type': 'dropdown', 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, ], } ], }]
10,004
Python
.py
223
31.699552
166
0.532328
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,919
pneumatic.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/pneumatic.py
from __future__ import with_statement import os import traceback from couchpotato.core._base.downloader.main import DownloaderBase from couchpotato.core.helpers.encoding import sp from couchpotato.core.logger import CPLog log = CPLog(__name__) autoload = 'Pneumatic' class Pneumatic(DownloaderBase): protocol = ['nzb'] strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s' status_support = False def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} directory = self.conf('directory') if not directory or not os.path.isdir(directory): log.error('No directory set for .strm downloads.') else: try: if not filedata or len(filedata) < 50: log.error('No nzb available!') return False full_path = os.path.join(directory, self.createFileName(data, filedata, media)) try: if not os.path.isfile(full_path): log.info('Downloading %s to %s.', (data.get('protocol'), full_path)) with open(full_path, 'wb') as f: f.write(filedata) nzb_name = self.createNzbName(data, media) strm_path = os.path.join(directory, nzb_name) strm_file = open(strm_path + '.strm', 'wb') strmContent = self.strm_syntax % (full_path, nzb_name) strm_file.write(strmContent) strm_file.close() return self.downloadReturnId('') else: log.info('File %s already exists.', full_path) return self.downloadReturnId('') except: log.error('Failed to download .strm: %s', traceback.format_exc()) pass except: log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc())) return False return False def test(self): """ Check if connection works :return: bool """ directory = self.conf('directory') if directory and os.path.isdir(directory): test_file = sp(os.path.join(directory, 'couchpotato_test.txt')) # Check if folder is writable self.createFile(test_file, 'This is a test file') if os.path.isfile(test_file): os.remove(test_file) return True return False config = [{ 'name': 'pneumatic', 'order': 30, 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'pneumatic', 'label': 'Pneumatic', 'description': 'Use <a href="http://forum.kodi.tv/showthread.php?tid=97657" target="_blank">Pneumatic</a> to download .strm files.', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'directory', 'type': 'directory', 'description': 'Directory where the .strm file is saved to.', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, ], } ], }]
4,439
Python
.py
103
29.300971
144
0.527842
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,920
deluge.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/deluge.py
from base64 import b64encode, b16encode, b32decode from datetime import timedelta from hashlib import sha1 import os.path import re import traceback from bencode import bencode as benc, bdecode from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import isInt, sp from couchpotato.core.helpers.variable import tryFloat, cleanHost from couchpotato.core.logger import CPLog from deluge_client.client import DelugeRPCClient log = CPLog(__name__) autoload = 'Deluge' class Deluge(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] log = CPLog(__name__) drpc = None def connect(self, reconnect = False): """ Connect to the delugeRPC, re-use connection when already available :param reconnect: force reconnect :return: DelugeRPC instance """ # Load host from config and split out port. host = cleanHost(self.conf('host'), protocol = False).split(':') # Force host assignment if len(host) == 1: host.append(80) if not isInt(host[1]): log.error('Config properties are not filled in correctly, port is missing.') return False if not self.drpc or reconnect: self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password')) return self.drpc def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol'))) if not self.connect(): return False if not filedata and data.get('protocol') == 'torrent': log.error('Failed sending torrent, no data') return False # Set parameters for Deluge options = { 'add_paused': self.conf('paused', default = 0), 'label': self.conf('label') } if self.conf('directory'): #if os.path.isdir(self.conf('directory')): options['download_location'] = self.conf('directory') #else: # log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory')) if self.conf('completed_directory'): #if os.path.isdir(self.conf('completed_directory')): options['move_completed'] = 1 options['move_completed_path'] = self.conf('completed_directory') #else: # log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory')) if data.get('seed_ratio'): options['stop_at_ratio'] = 1 options['stop_ratio'] = tryFloat(data.get('seed_ratio')) # Deluge only has seed time as a global option. Might be added in # in a future API release. # if data.get('seed_time'): # Send request to Deluge if data.get('protocol') == 'torrent_magnet': remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options) else: filename = self.createFileName(data, filedata, media) remote_torrent = self.drpc.add_torrent_file(filename, filedata, options) if not remote_torrent: log.error('Failed sending torrent to Deluge') return False log.info('Torrent sent to Deluge successfully.') return self.downloadReturnId(remote_torrent) def test(self): """ Check if connection works :return: bool """ if self.connect(True) and self.drpc.test(): return True return False def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking Deluge download status.') if not self.connect(): return [] release_downloads = ReleaseDownloadList(self) queue = self.drpc.get_alltorrents(ids) if not queue: log.debug('Nothing in queue or error') return [] for torrent_id in queue: torrent = queue[torrent_id] if not 'hash' in torrent: # When given a list of ids, deluge will return an empty item for a non-existant torrent. continue log.debug('name=%s / id=%s / save_path=%s / move_on_completed=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_on_completed'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused'])) # Deluge has no easy way to work out if a torrent is stalled or failing. #status = 'failed' status = 'busy' # If an user opts to seed a torrent forever (usually associated to private trackers usage), stop_ratio will be 0 or -1 (depending on Deluge version). # In this scenario the status of the torrent would never change from BUSY to SEEDING. # The last check takes care of this case. if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) < 0)): # We have torrent['seeding_time'] to work out what the seeding time is, but we do not # have access to the downloader seed_time, as with deluge we have no way to pass it # when the torrent is added. So Deluge will only look at the ratio. # See above comment in download(). status = 'seeding' elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused': status = 'completed' download_dir = sp(torrent['save_path']) if torrent['move_on_completed']: download_dir = torrent['move_completed_path'] torrent_files = [] for file_item in torrent['files']: torrent_files.append(sp(os.path.join(download_dir, file_item['path']))) release_downloads.append({ 'id': torrent['hash'], 'name': torrent['name'], 'status': status, 'original_status': torrent['state'], 'seed_ratio': torrent['ratio'], 'timeleft': str(timedelta(seconds = torrent['eta'])), 'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])), 'files': torrent_files, }) return release_downloads def pause(self, release_download, pause = True): if pause: return self.drpc.pause_torrent([release_download['id']]) else: return self.drpc.resume_torrent([release_download['id']]) def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) return self.drpc.remove_torrent(release_download['id'], True) def processComplete(self, release_download, delete_files = False): log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files) class DelugeRPC(object): host = 'localhost' port = 58846 username = None password = None client = None def __init__(self, host = 'localhost', port = 58846, username = None, password = None): super(DelugeRPC, self).__init__() self.host = host self.port = port self.username = username self.password = password def connect(self): #self.client = DelugeClient() #self.client.connect(self.host, int(self.port), self.username, self.password) self.client = DelugeRPCClient(self.host, int(self.port), self.username, self.password) self.client.connect() def test(self): try: self.connect() except: return False return True def add_torrent_magnet(self, torrent, options): torrent_id = False try: self.connect() torrent_id = self.client.core.add_torrent_magnet(torrent, options) if not torrent_id: torrent_id = self._check_torrent(True, torrent) if torrent_id and options['label']: self.client.label.set_torrent(torrent_id, options['label']) except Exception as err: log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc())) finally: if self.client: self.disconnect() return torrent_id def add_torrent_file(self, filename, torrent, options): torrent_id = False try: self.connect() torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options) if not torrent_id: torrent_id = self._check_torrent(False, torrent) if torrent_id and options['label']: self.client.label.set_torrent(torrent_id, options['label']) except Exception as err: log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc())) finally: if self.client: self.disconnect() return torrent_id def get_alltorrents(self, ids): ret = False try: self.connect() ret = self.client.core.get_torrents_status({'id': ids}, ('name', 'hash', 'save_path', 'move_completed_path', 'progress', 'state', 'eta', 'ratio', 'stop_ratio', 'is_seed', 'is_finished', 'paused', 'move_on_completed', 'files')) except Exception as err: log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc())) finally: if self.client: self.disconnect() return ret def pause_torrent(self, torrent_ids): try: self.connect() self.client.core.pause_torrent(torrent_ids) except Exception as err: log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc())) finally: if self.client: self.disconnect() def resume_torrent(self, torrent_ids): try: self.connect() self.client.core.resume_torrent(torrent_ids) except Exception as err: log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc())) finally: if self.client: self.disconnect() def remove_torrent(self, torrent_id, remove_local_data): ret = False try: self.connect() ret = self.client.core.remove_torrent(torrent_id, remove_local_data) except Exception as err: log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc())) finally: if self.client: self.disconnect() return ret def disconnect(self): self.client.disconnect() def _check_torrent(self, magnet, torrent): # Torrent not added, check if it already existed. if magnet: torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0] else: info = bdecode(torrent)["info"] torrent_hash = sha1(benc(info)).hexdigest() # Convert base 32 to hex if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)) torrent_hash = torrent_hash.lower() torrent_check = self.client.core.get_torrent_status(torrent_hash, {}) if torrent_check['hash']: return torrent_hash return False config = [{ 'name': 'deluge', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'deluge', 'label': 'Deluge', 'description': 'Use <a href="http://www.deluge-torrent.org/" target="_blank">Deluge</a> to download torrents.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'torrent', }, { 'name': 'host', 'default': 'localhost:58846', 'description': 'Hostname with port. Usually <strong>localhost:58846</strong>', }, { 'name': 'username', }, { 'name': 'password', 'type': 'password', }, { 'name': 'directory', 'type': 'directory', 'description': 'Download to this directory. Keep empty for default Deluge download directory.', }, { 'name': 'completed_directory', 'type': 'directory', 'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.', 'advanced': True, }, { 'name': 'label', 'description': 'Label to add to torrents in the Deluge UI.', }, { 'name': 'remove_complete', 'label': 'Remove torrent', 'type': 'bool', 'default': True, 'advanced': True, 'description': 'Remove the torrent from Deluge after it has finished seeding.', }, { 'name': 'delete_files', 'label': 'Remove files', 'default': True, 'type': 'bool', 'advanced': True, 'description': 'Also remove the leftover files.', }, { 'name': 'paused', 'type': 'bool', 'advanced': True, 'default': False, 'description': 'Add the torrent paused.', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'delete_failed', 'default': True, 'advanced': True, 'type': 'bool', 'description': 'Delete a release after the download has failed.', }, ], } ], }]
16,194
Python
.py
353
33.586402
512
0.56055
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,921
blackhole.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/blackhole.py
from __future__ import with_statement import os import traceback from couchpotato.core._base.downloader.main import DownloaderBase from couchpotato.core.helpers.encoding import sp from couchpotato.core.helpers.variable import getDownloadDir from couchpotato.core.logger import CPLog from couchpotato.environment import Env log = CPLog(__name__) autoload = 'Blackhole' class Blackhole(DownloaderBase): protocol = ['nzb', 'torrent', 'torrent_magnet'] status_support = False def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} directory = self.conf('directory') # The folder needs to exist if not directory or not os.path.isdir(directory): log.error('No directory set for blackhole %s download.', data.get('protocol')) else: try: # Filedata can be empty, which probably means it a magnet link if not filedata or len(filedata) < 50: try: if data.get('protocol') == 'torrent_magnet': filedata = self.magnetToTorrent(data.get('url')) data['protocol'] = 'torrent' except: log.error('Failed download torrent via magnet url: %s', traceback.format_exc()) # If it's still empty, either write the magnet link to a .magnet file, or error out. if not filedata or len(filedata) < 50: if self.conf('magnet_file'): filedata = data.get('url') + '\n' data['protocol'] = 'magnet' else: log.error('No nzb/torrent available: %s', data.get('url')) return False # Create filename with imdb id and other nice stuff file_name = self.createFileName(data, filedata, media) full_path = os.path.join(directory, file_name) # People want thinks nice and tidy, create a subdir if self.conf('create_subdir'): try: new_path = os.path.splitext(full_path)[0] if not os.path.exists(new_path): os.makedirs(new_path) full_path = os.path.join(new_path, file_name) except: log.error('Couldnt create sub dir, reverting to old one: %s', full_path) try: # Make sure the file doesn't exist yet, no need in overwriting it if not os.path.isfile(full_path): log.info('Downloading %s to %s.', (data.get('protocol'), full_path)) with open(full_path, 'wb') as f: f.write(filedata) os.chmod(full_path, Env.getPermission('file')) return self.downloadReturnId('') else: log.info('File %s already exists.', full_path) return self.downloadReturnId('') except: log.error('Failed to download to blackhole %s', traceback.format_exc()) pass except: log.info('Failed to download file %s: %s', (data.get('name'), traceback.format_exc())) return False return False def test(self): """ Test and see if the directory is writable :return: boolean """ directory = self.conf('directory') if directory and os.path.isdir(directory): test_file = sp(os.path.join(directory, 'couchpotato_test.txt')) # Check if folder is writable self.createFile(test_file, 'This is a test file') if os.path.isfile(test_file): os.remove(test_file) return True return False def getEnabledProtocol(self): """ What protocols is this downloaded used for :return: list with protocols """ if self.conf('use_for') == 'both': return super(Blackhole, self).getEnabledProtocol() elif self.conf('use_for') == 'torrent': return ['torrent', 'torrent_magnet'] else: return ['nzb'] def isEnabled(self, manual = False, data = None): """ Check if protocol is used (and enabled) :param manual: The user has clicked to download a link through the webUI :param data: dict returned from provider Contains the release information :return: boolean """ if not data: data = {} for_protocol = ['both'] if data and 'torrent' in data.get('protocol'): for_protocol.append('torrent') elif data: for_protocol.append(data.get('protocol')) return super(Blackhole, self).isEnabled(manual, data) and \ ((self.conf('use_for') in for_protocol)) config = [{ 'name': 'blackhole', 'order': 30, 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'blackhole', 'label': 'Black hole', 'description': 'Download the NZB/Torrent to a specific folder. <em>Note: Seeding and copying/linking features do <strong>not</strong> work with Black hole</em>.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': True, 'type': 'enabler', 'radio_group': 'nzb,torrent', }, { 'name': 'directory', 'type': 'directory', 'description': 'Directory where the .nzb (or .torrent) file is saved to.', 'default': getDownloadDir() }, { 'name': 'use_for', 'label': 'Use for', 'default': 'both', 'type': 'dropdown', 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], }, { 'name': 'create_subdir', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Create a sub directory when saving the .nzb (or .torrent).', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, { 'name': 'magnet_file', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'If magnet file conversion fails, write down the magnet link in a .magnet file instead.', }, ], } ], }]
7,939
Python
.py
174
30.563218
174
0.512025
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,922
rtorrent_.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/rtorrent_.py
from base64 import b16encode, b32decode from datetime import timedelta from hashlib import sha1 from urlparse import urlparse import os import re from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import sp from couchpotato.core.helpers.variable import cleanHost, splitString from couchpotato.core.logger import CPLog from bencode import bencode, bdecode from rtorrent import RTorrent log = CPLog(__name__) autoload = 'rTorrent' class rTorrent(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] rt = None error_msg = '' # Migration url to host options def __init__(self): super(rTorrent, self).__init__() addEvent('app.load', self.migrate) addEvent('setting.save.rtorrent.*.after', self.settingsChanged) def migrate(self): url = self.conf('url') if url: host_split = splitString(url.split('://')[-1], split_on = '/') self.conf('ssl', value = url.startswith('https')) self.conf('host', value = host_split[0].strip()) self.conf('rpc_url', value = '/'.join(host_split[1:])) self.deleteConf('url') def settingsChanged(self): # Reset active connection if settings have changed if self.rt: log.debug('Settings have changed, closing active connection') self.rt = None return True def getAuth(self): if not self.conf('username') or not self.conf('password'): # Missing username or password parameter return None # Build authentication tuple return ( self.conf('authentication'), self.conf('username'), self.conf('password') ) def getVerifySsl(self): # Ensure verification has been enabled if not self.conf('ssl_verify'): return False # Use ca bundle if defined ca_bundle = self.conf('ssl_ca_bundle') if ca_bundle and os.path.exists(ca_bundle): return ca_bundle # Use default ssl verification return True def connect(self, reconnect = False): # Already connected? if not reconnect and self.rt is not None: return self.rt url = cleanHost(self.conf('host'), protocol = True, ssl = self.conf('ssl')) # Automatically add '+https' to 'httprpc' protocol if SSL is enabled if self.conf('ssl') and url.startswith('httprpc://'): url = url.replace('httprpc://', 'httprpc+https://') parsed = urlparse(url) # rpc_url is only used on http/https scgi pass-through if parsed.scheme in ['http', 'https']: url += self.conf('rpc_url') # Construct client self.rt = RTorrent( url, self.getAuth(), verify_ssl=self.getVerifySsl() ) self.error_msg = '' try: self.rt.connection.verify() except AssertionError as e: self.error_msg = e.message self.rt = None return self.rt def test(self): """ Check if connection works :return: bool """ if self.connect(True): return True if self.error_msg: return False, 'Connection failed: ' + self.error_msg return False def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} log.debug('Sending "%s" to rTorrent.', (data.get('name'))) if not self.connect(): return False torrent_hash = 0 torrent_params = {} if self.conf('label'): torrent_params['label'] = self.conf('label') if not filedata and data.get('protocol') == 'torrent': log.error('Failed sending torrent, no data') return False # Try download magnet torrents if data.get('protocol') == 'torrent_magnet': # Send magnet to rTorrent torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() # Send request to rTorrent try: torrent = self.rt.load_magnet(data.get('url'), torrent_hash) if not torrent: log.error('Unable to find the torrent, did it fail to load?') return False except Exception as err: log.error('Failed to send magnet to rTorrent: %s', err) return False if data.get('protocol') == 'torrent': info = bdecode(filedata)["info"] torrent_hash = sha1(bencode(info)).hexdigest().upper() # Convert base 32 to hex if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)) # Send request to rTorrent try: # Send torrent to rTorrent torrent = self.rt.load_torrent(filedata, verify_retries=10) if not torrent: log.error('Unable to find the torrent, did it fail to load?') return False except Exception as err: log.error('Failed to send torrent to rTorrent: %s', err) return False try: # Set label if self.conf('label'): torrent.set_custom(1, self.conf('label')) if self.conf('directory'): torrent.set_directory(self.conf('directory')) # Start torrent if not self.conf('paused', default = 0): torrent.start() return self.downloadReturnId(torrent_hash) except Exception as err: log.error('Failed to send torrent to rTorrent: %s', err) return False def getTorrentStatus(self, torrent): if not torrent.complete: return 'busy' if torrent.open: return 'seeding' return 'completed' def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking rTorrent download status.') if not self.connect(): return [] try: torrents = self.rt.get_torrents() release_downloads = ReleaseDownloadList(self) for torrent in torrents: if torrent.info_hash in ids: torrent_directory = os.path.normpath(torrent.directory) torrent_files = [] for file in torrent.get_files(): if not os.path.normpath(file.path).startswith(torrent_directory): file_path = os.path.join(torrent_directory, file.path.lstrip('/')) else: file_path = file.path torrent_files.append(sp(file_path)) release_downloads.append({ 'id': torrent.info_hash, 'name': torrent.name, 'status': self.getTorrentStatus(torrent), 'seed_ratio': torrent.ratio, 'original_status': torrent.state, 'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1, 'folder': sp(torrent.directory), 'files': torrent_files }) return release_downloads except Exception as err: log.error('Failed to get status from rTorrent: %s', err) return [] def pause(self, release_download, pause = True): if not self.connect(): return False torrent = self.rt.find_torrent(release_download['id']) if torrent is None: return False if pause: return torrent.pause() return torrent.resume() def removeFailed(self, release_download): log.info('%s failed downloading, deleting...', release_download['name']) return self.processComplete(release_download, delete_files = True) def processComplete(self, release_download, delete_files): log.debug('Requesting rTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) if not self.connect(): return False torrent = self.rt.find_torrent(release_download['id']) if torrent is None: return False if delete_files: for file_item in torrent.get_files(): # will only delete files, not dir/sub-dir os.unlink(os.path.join(torrent.directory, file_item.path)) if torrent.is_multi_file() and torrent.directory.endswith(torrent.name): # Remove empty directories bottom up try: for path, _, _ in os.walk(sp(torrent.directory), topdown = False): os.rmdir(path) except OSError: log.info('Directory "%s" contains extra files, unable to remove', torrent.directory) torrent.erase() # just removes the torrent, doesn't delete data return True config = [{ 'name': 'rtorrent', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'rtorrent', 'label': 'rTorrent', 'description': 'Use <a href="https://rakshasa.github.io/rtorrent/" target="_blank">rTorrent</a> to download torrents.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'torrent', }, { 'name': 'ssl', 'label': 'SSL Enabled', 'order': 1, 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Use HyperText Transfer Protocol Secure, or <strong>https</strong>', }, { 'name': 'ssl_verify', 'label': 'SSL Verify', 'order': 2, 'default': 1, 'type': 'bool', 'advanced': True, 'description': 'Verify SSL certificate on https connections', }, { 'name': 'ssl_ca_bundle', 'label': 'SSL CA Bundle', 'order': 3, 'type': 'string', 'advanced': True, 'description': 'Path to a directory (or file) containing trusted certificate authorities', }, { 'name': 'host', 'order': 4, 'default': 'localhost:80', 'description': 'RPC Communication URI. Usually <strong>scgi://localhost:5000</strong>, ' '<strong>httprpc://localhost/rutorrent</strong> or <strong>localhost:80</strong>', }, { 'name': 'rpc_url', 'order': 5, 'default': 'RPC2', 'type': 'string', 'advanced': True, 'description': 'Change if your RPC mount is at a different path.', }, { 'name': 'authentication', 'order': 6, 'default': 'basic', 'type': 'dropdown', 'advanced': True, 'values': [('Basic', 'basic'), ('Digest', 'digest')], 'description': 'Authentication method used for http(s) connections', }, { 'name': 'username', 'order': 7, }, { 'name': 'password', 'order': 8, 'type': 'password', }, { 'name': 'label', 'order': 9, 'description': 'Label to apply on added torrents.', }, { 'name': 'directory', 'order': 10, 'type': 'directory', 'description': 'Download to this directory. Keep empty for default rTorrent download directory.', }, { 'name': 'remove_complete', 'label': 'Remove torrent', 'order': 11, 'default': False, 'type': 'bool', 'advanced': True, 'description': 'Remove the torrent after it finishes seeding.', }, { 'name': 'delete_files', 'label': 'Remove files', 'order': 12, 'default': True, 'type': 'bool', 'advanced': True, 'description': 'Also remove the leftover files.', }, { 'name': 'paused', 'order': 13, 'type': 'bool', 'advanced': True, 'default': False, 'description': 'Add the torrent paused.', }, { 'name': 'manual', 'order': 14, 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, ], } ], }]
15,069
Python
.py
358
27.759777
141
0.508375
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,923
hadouken.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/hadouken.py
from base64 import b16encode, b32decode, b64encode from distutils.version import LooseVersion from hashlib import sha1 import httplib import json import os import re import urllib2 from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.encoding import isInt, sp from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from bencode import bencode as benc, bdecode log = CPLog(__name__) autoload = 'Hadouken' class Hadouken(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] hadouken_api = None def connect(self): # Load host from config and split out port. host = cleanHost(self.conf('host'), protocol = False).split(':') if not isInt(host[1]): log.error('Config properties are not filled in correctly, port is missing.') return False # This is where v4 and v5 begin to differ if(self.conf('version') == 'v4'): if not self.conf('api_key'): log.error('Config properties are not filled in correctly, API key is missing.') return False url = 'http://' + str(host[0]) + ':' + str(host[1]) + '/jsonrpc' client = JsonRpcClient(url, 'Token ' + self.conf('api_key')) self.hadouken_api = HadoukenAPIv4(client) return True else: auth_type = self.conf('auth_type') header = None if auth_type == 'api_key': header = 'Token ' + self.conf('api_key') elif auth_type == 'user_pass': header = 'Basic ' + b64encode(self.conf('auth_user') + ':' + self.conf('auth_pass')) url = 'http://' + str(host[0]) + ':' + str(host[1]) + '/api' client = JsonRpcClient(url, header) self.hadouken_api = HadoukenAPIv5(client) return True return False def download(self, data = None, media = None, filedata = None): """ Send a torrent/nzb file to the downloader :param data: dict returned from provider Contains the release information :param media: media dict with information Used for creating the filename when possible :param filedata: downloaded torrent/nzb filedata The file gets downloaded in the searcher and send to this function This is done to have failed checking before using the downloader, so the downloader doesn't need to worry about that :return: boolean One faile returns false, but the downloaded should log his own errors """ if not media: media = {} if not data: data = {} log.debug("Sending '%s' (%s) to Hadouken.", (data.get('name'), data.get('protocol'))) if not self.connect(): return False torrent_params = {} if self.conf('label'): torrent_params['label'] = self.conf('label') # Set the tags array since that is what v5 expects. torrent_params['tags'] = [self.conf('label')] torrent_filename = self.createFileName(data, filedata, media) if data.get('protocol') == 'torrent_magnet': torrent_hash = re.findall('urn:btih:([\w]{32,40})', data.get('url'))[0].upper() torrent_params['trackers'] = self.torrent_trackers torrent_params['name'] = torrent_filename else: info = bdecode(filedata)['info'] torrent_hash = sha1(benc(info)).hexdigest().upper() # Convert base 32 to hex if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)) # Send request to Hadouken if data.get('protocol') == 'torrent_magnet': self.hadouken_api.add_magnet_link(data.get('url'), torrent_params) else: self.hadouken_api.add_file(filedata, torrent_params) return self.downloadReturnId(torrent_hash) def test(self): """ Tests the given host:port and API key """ if not self.connect(): return False version = self.hadouken_api.get_version() if not version: log.error('Could not get Hadouken version.') return False # The minimum required version of Hadouken is 4.5.6. if LooseVersion(version) >= LooseVersion('4.5.6'): return True log.error('Hadouken v4.5.6 (or newer) required. Found v%s', version) return False def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking Hadouken download status.') if not self.connect(): return [] release_downloads = ReleaseDownloadList(self) queue = self.hadouken_api.get_by_hash_list(ids) if not queue: return [] for torrent in queue: if torrent is None: continue torrent_filelist = self.hadouken_api.get_files_by_hash(torrent.info_hash) torrent_files = [] for file_item in torrent_filelist: torrent_files.append(sp(os.path.join(torrent.save_path, file_item))) release_downloads.append({ 'id': torrent.info_hash.upper(), 'name': torrent.name, 'status': torrent.get_status(), 'seed_ratio': torrent.get_seed_ratio(), 'original_status': torrent.state, 'timeleft': -1, 'folder': sp(torrent.save_path if len(torrent_files == 1) else os.path.join(torrent.save_path, torrent.name)), 'files': torrent_files }) return release_downloads def pause(self, release_download, pause = True): """ Pauses or resumes the torrent specified by the ID field in release_download. Keyword arguments: release_download -- The CouchPotato release_download to pause/resume. pause -- Boolean indicating whether to pause or resume. """ if not self.connect(): return False return self.hadouken_api.pause(release_download['id'], pause) def removeFailed(self, release_download): """ Removes a failed torrent and also remove the data associated with it. Keyword arguments: release_download -- The CouchPotato release_download to remove. """ log.info('%s failed downloading, deleting...', release_download['name']) if not self.connect(): return False return self.hadouken_api.remove(release_download['id'], remove_data = True) def processComplete(self, release_download, delete_files = False): """ Removes the completed torrent from Hadouken and optionally removes the data associated with it. Keyword arguments: release_download -- The CouchPotato release_download to remove. delete_files: Boolean indicating whether to remove the associated data. """ log.debug('Requesting Hadouken to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) if not self.connect(): return False return self.hadouken_api.remove(release_download['id'], remove_data = delete_files) class JsonRpcClient(object): def __init__(self, url, auth_header = None): self.url = url self.requestId = 0 self.opener = urllib2.build_opener() self.opener.addheaders = [ ('User-Agent', 'couchpotato-hadouken-client/1.0'), ('Accept', 'application/json'), ('Content-Type', 'application/json') ] if auth_header: self.opener.addheaders.append(('Authorization', auth_header)) def invoke(self, method, params): self.requestId += 1 data = { 'jsonrpc': '2.0', 'id': self.requestId, 'method': method, 'params': params } request = urllib2.Request(self.url, data = json.dumps(data)) try: f = self.opener.open(request) response = f.read() f.close() obj = json.loads(response) if 'error' in obj.keys(): log.error('JSONRPC error, %s: %s', (obj['error']['code'], obj['error']['message'])) return False if 'result' in obj.keys(): return obj['result'] return True except httplib.InvalidURL as err: log.error('Invalid Hadouken host, check your config %s', err) except urllib2.HTTPError as err: if err.code == 401: log.error('Could not authenticate, check your config') else: log.error('Hadouken HTTPError: %s', err) except urllib2.URLError as err: log.error('Unable to connect to Hadouken %s', err) return False class HadoukenAPI(object): def __init__(self, rpc_client): self.rpc = rpc_client if not rpc_client: log.error('No JSONRPC client specified.') def add_file(self, data, params): """ Add a file to Hadouken with the specified parameters. Keyword arguments: filedata -- The binary torrent data. torrent_params -- Additional parameters for the file. """ pass def add_magnet_link(self, link, params): """ Add a magnet link to Hadouken with the specified parameters. Keyword arguments: magnetLink -- The magnet link to send. torrent_params -- Additional parameters for the magnet link. """ pass def get_by_hash_list(self, infoHashList): """ Gets a list of torrents filtered by the given info hash list. Keyword arguments: infoHashList -- A list of info hashes. """ pass def get_files_by_hash(self, infoHash): """ Gets a list of files for the torrent identified by the given info hash. Keyword arguments: infoHash -- The info hash of the torrent to return files for. """ pass def get_version(self): """ Gets the version, commitish and build date of Hadouken. """ pass def pause(self, infoHash, pause): """ Pauses/unpauses the torrent identified by the given info hash. Keyword arguments: infoHash -- The info hash of the torrent to operate on. pause -- If true, pauses the torrent. Otherwise resumes. """ pass def remove(self, infoHash, remove_data = False): """ Removes the torrent identified by the given info hash and optionally removes the data as well. Keyword arguments: infoHash -- The info hash of the torrent to remove. remove_data -- If true, removes the data associated with the torrent. """ pass class TorrentItem(object): @property def info_hash(self): pass @property def save_path(self): pass @property def name(self): pass @property def state(self): pass def get_status(self): """ Returns the CouchPotato status for a given torrent.""" pass def get_seed_ratio(self): """ Returns the seed ratio for a given torrent.""" pass class TorrentItemv5(TorrentItem): def __init__(self, obj): self.obj = obj def info_hash(self): return self.obj[0] def save_path(self): return self.obj[26] def name(self): return self.obj[2] def state(self): return self.obj[1] def get_status(self): if self.obj[1] == 32: return 'completed' if self.obj[1] == 1: return 'seeding' return 'busy' def get_seed_ratio(self): up = self.obj[6] down = self.obj[5] if up > 0 and down > 0: return up / down return 0 class HadoukenAPIv5(HadoukenAPI): def add_file(self, data, params): return self.rpc.invoke('webui.addTorrent', ['file', b64encode(data), params]) def add_magnet_link(self, link, params): return self.rpc.invoke('webui.addTorrent', ['url', link, params]) def get_by_hash_list(self, infoHashList): torrents = self.rpc.invoke('webui.list', None) result = [] for torrent in torrents['torrents']: if torrent[0] in infoHashList: result.append(TorrentItemv5(torrent)) return result def get_files_by_hash(self, infoHash): files = self.rpc.invoke('webui.getFiles', [infoHash]) result = [] for file in files['files'][1]: result.append(file[0]) return result def get_version(self): result = self.rpc.invoke('core.getSystemInfo', None) if not result: return False return result['versions']['hadouken'] def pause(self, infoHash, pause): if pause: return self.rpc.invoke('webui.perform', ['pause', infoHash]) return self.rpc.invoke('webui.perform', ['resume', infoHash]) def remove(self, infoHash, remove_data=False): if remove_data: return self.rpc.invoke('webui.perform', ['removedata', infoHash]) return self.rpc.invoke('webui.perform', ['remove', infoHash]) class TorrentItemv4(TorrentItem): def __init__(self, obj): self.obj = obj def info_hash(self): return self.obj['InfoHash'] def save_path(self): return self.obj['SavePath'] def name(self): return self.obj['Name'] def state(self): return self.obj['State'] def get_status(self): if self.obj['IsSeeding'] and self.obj['IsFinished'] and self.obj['Paused']: return 'completed' if self.obj['IsSeeding']: return 'seeding' return 'busy' def get_seed_ratio(self): up = self.obj['TotalUploadedBytes'] down = self.obj['TotalDownloadedBytes'] if up > 0 and down > 0: return up / down return 0 class HadoukenAPIv4(object): def add_file(self, data, params): return self.rpc.invoke('torrents.addFile', [b64encode(data), params]) def add_magnet_link(self, link, params): return self.rpc.invoke('torrents.addUrl', [link, params]) def get_by_hash_list(self, infoHashList): torrents = self.rpc.invoke('torrents.getByInfoHashList', [infoHashList]) result = [] for torrent in torrents: result.append(TorrentItemv4(torrent)) return result def get_files_by_hash(self, infoHash): files = self.rpc.invoke('torrents.getFiles', [infoHash]) result = [] for file in files: result.append(file['Path']) return result def get_version(self): result = self.rpc.invoke('core.getVersion', None) if not result: return False return result['Version'] def pause(self, infoHash, pause): if pause: return self.rpc.invoke('torrents.pause', [infoHash]) return self.rpc.invoke('torrents.resume', [infoHash]) def remove(self, infoHash, remove_data = False): return self.rpc.invoke('torrents.remove', [infoHash, remove_data]) config = [{ 'name': 'hadouken', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'hadouken', 'label': 'Hadouken', 'description': 'Use <a href="http://www.hdkn.net" target="_blank">Hadouken</a> (>= v4.5.6) to download torrents.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'torrent' }, { 'name': 'version', 'label': 'Version', 'type': 'dropdown', 'default': 'v4', 'values': [('v4.x', 'v4'), ('v5.x', 'v5')], 'description': 'Hadouken version.', }, { 'name': 'host', 'default': 'localhost:7890' }, { 'name': 'auth_type', 'label': 'Auth. type', 'type': 'dropdown', 'default': 'api_key', 'values': [('None', 'none'), ('API key/Token', 'api_key'), ('Username/Password', 'user_pass')], 'description': 'Type of authentication', }, { 'name': 'api_key', 'label': 'API key (v4)/Token (v5)', 'type': 'password' }, { 'name': 'auth_user', 'label': 'Username', 'description': '(only for v5)' }, { 'name': 'auth_pass', 'label': 'Password', 'type': 'password', 'description': '(only for v5)' }, { 'name': 'label', 'description': 'Label to add torrent as.' } ] } ] }]
17,821
Python
.py
438
29.993151
126
0.572747
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,924
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/putio/__init__.py
from .main import PutIO def autoload(): return PutIO() config = [{ 'name': 'putio', 'groups': [ { 'tab': 'downloaders', 'list': 'download_providers', 'name': 'putio', 'label': 'Put.io', 'description': 'This will start a torrent download on <a href="https://put.io/" target="_blank">Put.io</a>.', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', 'radio_group': 'torrent', }, { 'name': 'oauth_token', 'label': 'oauth_token', 'description': 'This is the OAUTH_TOKEN from your putio API', 'advanced': True, }, { 'name': 'folder', 'description': ('The folder on putio where you want the upload to go','Will find the first first folder that matches this name'), 'default': 0, }, { 'name': 'https', 'description': 'Set to true if your callback host accepts https instead of http', 'type': 'bool', 'default': 0, }, { 'name': 'callback_host', 'description': 'External reachable url to CP so put.io can do it\'s thing', }, { 'name': 'download', 'description': 'Set this to have CouchPotato download the file from Put.io', 'type': 'bool', 'default': 0, }, { 'name': 'delete_file', 'description': ('Set this to remove the file from putio after sucessful download','Does nothing if you don\'t select download'), 'type': 'bool', 'default': 0, }, { 'name': 'download_dir', 'type': 'directory', 'label': 'Download Directory', 'description': 'The Directory to download files to, does nothing if you don\'t select download', }, { 'name': 'manual', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', }, ], } ], }]
2,735
Python
.py
70
22.357143
149
0.405111
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,925
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/downloaders/putio/main.py
from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEventAsync from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from couchpotato.environment import Env from pio import api as pio import datetime log = CPLog(__name__) autoload = 'Putiodownload' class PutIO(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] downloading_list = [] oauth_authenticate = 'https://api.couchpota.to/authorize/putio/' def __init__(self): addApiView('downloader.putio.getfrom', self.getFromPutio, docs = { 'desc': 'Allows you to download file from prom Put.io', }) addApiView('downloader.putio.auth_url', self.getAuthorizationUrl) addApiView('downloader.putio.credentials', self.getCredentials) addEvent('putio.download', self.putioDownloader) return super(PutIO, self).__init__() # This is a recusive function to check for the folders def recursionFolder(self, client, folder = 0, tfolder = ''): files = client.File.list(folder) for f in files: if f.content_type == 'application/x-directory': if f.name == tfolder: return f.id else: result = self.recursionFolder(client, f.id, tfolder) if result != 0: return result return 0 # This will check the root for the folder, and kick of recusively checking sub folder def convertFolder(self, client, folder): if folder == 0: return 0 else: return self.recursionFolder(client, 0, folder) def download(self, data = None, media = None, filedata = None): if not media: media = {} if not data: data = {} log.info('Sending "%s" to put.io', data.get('name')) url = data.get('url') client = pio.Client(self.conf('oauth_token')) putioFolder = self.convertFolder(client, self.conf('folder')) log.debug('putioFolder ID is %s', putioFolder) # It might be possible to call getFromPutio from the renamer if we can then we don't need to do this. # Note callback_host is NOT our address, it's the internet host that putio can call too callbackurl = None if self.conf('download'): pre = 'http://' if self.conf('https'): pre = 'https://' callbackurl = pre + self.conf('callback_host') + '%sdownloader.putio.getfrom/' %Env.get('api_base'.strip('/')) log.debug('callbackurl is %s', callbackurl) resp = client.Transfer.add_url(url, callback_url = callbackurl, parent_id = putioFolder) log.debug('resp is %s', resp.id) return self.downloadReturnId(resp.id) def test(self): try: client = pio.Client(self.conf('oauth_token')) if client.File.list(): return True except: log.info('Failed to get file listing, check OAUTH_TOKEN') return False def getAuthorizationUrl(self, host = None, **kwargs): callback_url = cleanHost(host) + '%sdownloader.putio.credentials/' % (Env.get('api_base').lstrip('/')) log.debug('callback_url is %s', callback_url) target_url = self.oauth_authenticate + "?target=" + callback_url log.debug('target_url is %s', target_url) return { 'success': True, 'url': target_url, } def getCredentials(self, **kwargs): try: oauth_token = kwargs.get('oauth') except: return 'redirect', Env.get('web_base') + 'settings/downloaders/' log.debug('oauth_token is: %s', oauth_token) self.conf('oauth_token', value = oauth_token); return 'redirect', Env.get('web_base') + 'settings/downloaders/' def getAllDownloadStatus(self, ids): log.debug('Checking putio download status.') client = pio.Client(self.conf('oauth_token')) transfers = client.Transfer.list() log.debug(transfers); release_downloads = ReleaseDownloadList(self) for t in transfers: if t.id in ids: log.debug('downloading list is %s', self.downloading_list) if t.status == "COMPLETED" and self.conf('download') == False : status = 'completed' # So check if we are trying to download something elif t.status == "COMPLETED" and self.conf('download') == True: # Assume we are done status = 'completed' if not self.downloading_list: now = datetime.datetime.utcnow() date_time = datetime.datetime.strptime(t.finished_at,"%Y-%m-%dT%H:%M:%S") # We need to make sure a race condition didn't happen if (now - date_time) < datetime.timedelta(minutes=5): # 5 minutes haven't passed so we wait status = 'busy' else: # If we have the file_id in the downloading_list mark it as busy if str(t.file_id) in self.downloading_list: status = 'busy' else: status = 'busy' release_downloads.append({ 'id' : t.id, 'name': t.name, 'status': status, 'timeleft': t.estimated_time, }) return release_downloads def putioDownloader(self, fid): log.info('Put.io Real downloader called with file_id: %s',fid) client = pio.Client(self.conf('oauth_token')) log.debug('About to get file List') putioFolder = self.convertFolder(client, self.conf('folder')) log.debug('PutioFolderID is %s', putioFolder) files = client.File.list(parent_id=putioFolder) downloaddir = self.conf('download_dir') for f in files: if str(f.id) == str(fid): client.File.download(f, dest = downloaddir, delete_after_download = self.conf('delete_file')) # Once the download is complete we need to remove it from the running list. self.downloading_list.remove(fid) return True def getFromPutio(self, **kwargs): try: file_id = str(kwargs.get('file_id')) except: return { 'success' : False, } log.info('Put.io Download has been called file_id is %s', file_id) if file_id not in self.downloading_list: self.downloading_list.append(file_id) fireEventAsync('putio.download',fid = file_id) return { 'success': True, } return { 'success': False, }
7,093
Python
.py
151
34.907285
122
0.578025
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,926
desktop.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/desktop.py
from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env log = CPLog(__name__) autoload = 'Desktop' if Env.get('desktop'): class Desktop(Plugin): def __init__(self): desktop = Env.get('desktop') desktop.setSettings({ 'base_url': fireEvent('app.base_url', single = True), 'api_url': fireEvent('app.api_url', single = True), 'api': Env.setting('api'), }) # Events from desktop desktop.addEvents({ 'onClose': self.onClose, }) # Events to desktop addEvent('app.after_shutdown', desktop.afterShutdown) addEvent('app.load', desktop.onAppLoad, priority = 110) def onClose(self, event): return fireEvent('app.shutdown', single = True) else: class Desktop(Plugin): pass
1,018
Python
.py
27
28.259259
69
0.600613
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,927
scheduler.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/scheduler.py
from apscheduler.scheduler import Scheduler as Sched from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) autoload = 'Scheduler' class Scheduler(Plugin): crons = {} intervals = {} started = False def __init__(self): addEvent('schedule.cron', self.cron) addEvent('schedule.interval', self.interval) addEvent('schedule.remove', self.remove) addEvent('schedule.queue', self.queue) self.sched = Sched(misfire_grace_time = 60) self.sched.start() self.started = True def remove(self, identifier): for cron_type in ['intervals', 'crons']: try: self.sched.unschedule_job(getattr(self, cron_type)[identifier]['job']) log.debug('%s unscheduled %s', (cron_type.capitalize(), identifier)) except: pass def doShutdown(self, *args, **kwargs): self.stop() return super(Scheduler, self).doShutdown(*args, **kwargs) def stop(self): if self.started: log.debug('Stopping scheduler') self.sched.shutdown(wait = False) log.debug('Scheduler stopped') self.started = False def cron(self, identifier = '', handle = None, day = '*', hour = '*', minute = '*'): log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute)) self.remove(identifier) self.crons[identifier] = { 'handle': handle, 'day': day, 'hour': hour, 'minute': minute, 'job': self.sched.add_cron_job(handle, day = day, hour = hour, minute = minute) } def interval(self, identifier = '', handle = None, hours = 0, minutes = 0, seconds = 0): log.info('Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s', (identifier, hours, minutes, seconds)) self.remove(identifier) self.intervals[identifier] = { 'handle': handle, 'hours': hours, 'minutes': minutes, 'seconds': seconds, 'job': self.sched.add_interval_job(handle, hours = hours, minutes = minutes, seconds = seconds) } return True def queue(self, handlers = None): if not handlers: handlers = [] for h in handlers: h() if self.shuttingDown(): break return True
2,528
Python
.py
62
31.387097
122
0.585037
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,928
clientscript.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/clientscript.py
import os from couchpotato.core.event import addEvent from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env log = CPLog(__name__) autoload = 'ClientScript' class ClientScript(Plugin): paths = { 'style': [ 'style/combined.min.css', ], 'script': [ 'scripts/combined.vendor.min.js', 'scripts/combined.base.min.js', 'scripts/combined.plugins.min.js', ], } def __init__(self): addEvent('clientscript.get_styles', self.getStyles) addEvent('clientscript.get_scripts', self.getScripts) self.makeRelative() def makeRelative(self): for static_type in self.paths: updates_paths = [] for rel_path in self.paths.get(static_type): file_path = os.path.join(Env.get('app_dir'), 'couchpotato', 'static', rel_path) core_url = 'static/%s?%d' % (rel_path, tryInt(os.path.getmtime(file_path))) updates_paths.append(core_url) self.paths[static_type] = updates_paths def getStyles(self, *args, **kwargs): return self.get('style', *args, **kwargs) def getScripts(self, *args, **kwargs): return self.get('script', *args, **kwargs) def get(self, type): if type in self.paths: return self.paths[type] return []
1,500
Python
.py
39
30.128205
95
0.623701
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,929
_core.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/_core.py
from uuid import uuid4 import os import platform import signal import time import traceback import webbrowser import sys from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder, compareVersions from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from tornado.ioloop import IOLoop log = CPLog(__name__) autoload = 'Core' class Core(Plugin): ignore_restart = [ 'Core.restart', 'Core.shutdown', 'Updater.check', 'Updater.autoUpdate', ] shutdown_started = False def __init__(self): addApiView('app.shutdown', self.shutdown, docs = { 'desc': 'Shutdown the app.', 'return': {'type': 'string: shutdown'} }) addApiView('app.restart', self.restart, docs = { 'desc': 'Restart the app.', 'return': {'type': 'string: restart'} }) addApiView('app.available', self.available, docs = { 'desc': 'Check if app available.' }) addApiView('app.version', self.versionView, docs = { 'desc': 'Get version.' }) addEvent('app.shutdown', self.shutdown) addEvent('app.restart', self.restart) addEvent('app.load', self.launchBrowser, priority = 1) addEvent('app.base_url', self.createBaseUrl) addEvent('app.api_url', self.createApiUrl) addEvent('app.version', self.version) addEvent('app.load', self.checkDataDir) addEvent('app.load', self.cleanUpFolders) addEvent('app.load.after', self.dependencies) addEvent('setting.save.core.password', self.md5Password) addEvent('setting.save.core.api_key', self.checkApikey) # Make sure we can close-down with ctrl+c properly if not Env.get('desktop'): self.signalHandler() # Set default urlopen timeout import socket socket.setdefaulttimeout(30) # Don't check ssl by default try: if sys.version_info >= (2, 7, 9): import ssl ssl._create_default_https_context = ssl._create_unverified_context except: log.debug('Failed setting default ssl context: %s', traceback.format_exc()) def dependencies(self): # Check if lxml is available try: from lxml import etree except: log.error('LXML not available, please install for better/faster scraping support: `http://lxml.de/installation.html`') try: import OpenSSL v = OpenSSL.__version__ v_needed = '0.15' if compareVersions(OpenSSL.__version__, v_needed) < 0: log.error('OpenSSL installed but %s is needed while %s is installed. Run `pip install pyopenssl --upgrade`', (v_needed, v)) try: import ssl log.debug('OpenSSL detected: pyopenssl (%s) using OpenSSL (%s)', (v, ssl.OPENSSL_VERSION)) except: pass except: log.error('OpenSSL not available, please install for better requests validation: `https://pyopenssl.readthedocs.org/en/latest/install.html`: %s', traceback.format_exc()) def md5Password(self, value): return md5(value) if value else '' def checkApikey(self, value): return value if value and len(value) > 3 else uuid4().hex def checkDataDir(self): if isSubFolder(Env.get('data_dir'), Env.get('app_dir')): log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.') return True def cleanUpFolders(self): only_clean = ['couchpotato', 'libs', 'init'] self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean) def available(self, **kwargs): return { 'success': True } def shutdown(self, **kwargs): if self.shutdown_started: return False def shutdown(): self.initShutdown() if IOLoop.current()._closing: shutdown() else: IOLoop.current().add_callback(shutdown) return 'shutdown' def restart(self, **kwargs): if self.shutdown_started: return False def restart(): self.initShutdown(restart = True) IOLoop.current().add_callback(restart) return 'restarting' def initShutdown(self, restart = False): if self.shutdown_started: log.info('Already shutting down') return log.info('Shutting down' if not restart else 'Restarting') self.shutdown_started = True fireEvent('app.do_shutdown', restart = restart) log.debug('Every plugin got shutdown event') loop = True starttime = time.time() while loop: log.debug('Asking who is running') still_running = fireEvent('plugin.running', merge = True) log.debug('Still running: %s', still_running) if len(still_running) == 0: break elif starttime < time.time() - 30: # Always force break after 30s wait break running = list(set(still_running) - set(self.ignore_restart)) if len(running) > 0: log.info('Waiting on plugins to finish: %s', running) else: loop = False time.sleep(1) log.debug('Safe to shutdown/restart') loop = IOLoop.current() try: if not loop._closing: loop.stop() except RuntimeError: pass except: log.error('Failed shutting down the server: %s', traceback.format_exc()) fireEvent('app.after_shutdown', restart = restart) def launchBrowser(self): if Env.setting('launch_browser'): log.info('Launching browser') url = self.createBaseUrl() try: webbrowser.open(url, 2, 1) except: try: webbrowser.open(url, 1, 1) except: log.error('Could not launch a browser.') def createBaseUrl(self): host = Env.setting('host') if host == '0.0.0.0' or host == '': host = 'localhost' port = Env.setting('port') ssl = Env.setting('ssl_cert') and Env.setting('ssl_key') return '%s:%d%s' % (cleanHost(host, ssl = ssl).rstrip('/'), int(port), Env.get('web_base')) def createApiUrl(self): return '%sapi/%s' % (self.createBaseUrl(), Env.setting('api_key')) def version(self): ver = fireEvent('updater.info', single = True) or {'version': {}} if os.name == 'nt': platf = 'windows' elif 'Darwin' in platform.platform(): platf = 'osx' else: platf = 'linux' return '%s - %s-%s - v2' % (platf, ver.get('version').get('type') or 'unknown', ver.get('version').get('hash') or 'unknown') def versionView(self, **kwargs): return { 'version': self.version() } def signalHandler(self): if Env.get('daemonized'): return def signal_handler(*args, **kwargs): fireEvent('app.shutdown', single = True) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) config = [{ 'name': 'core', 'order': 1, 'groups': [ { 'tab': 'general', 'name': 'basics', 'description': 'Needs restart before changes take effect.', 'wizard': True, 'options': [ { 'name': 'username', 'default': '', 'ui-meta' : 'rw', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'port', 'default': 5050, 'type': 'int', 'description': 'The port I should listen to.', }, { 'name': 'ipv6', 'default': 0, 'type': 'bool', 'description': 'Also bind the WebUI to ipv6 address', }, { 'name': 'ssl_cert', 'description': 'Path to SSL server.crt', 'advanced': True, }, { 'name': 'ssl_key', 'description': 'Path to SSL server.key', 'advanced': True, }, { 'name': 'launch_browser', 'default': True, 'type': 'bool', 'description': 'Launch the browser when I start.', 'wizard': True, }, { 'name': 'dark_theme', 'default': False, 'type': 'bool', 'description': 'For people with sensitive skin', 'wizard': True, }, ], }, { 'tab': 'general', 'name': 'advanced', 'description': "For those who know what they're doing", 'advanced': True, 'options': [ { 'name': 'api_key', 'default': uuid4().hex, 'ui-meta' : 'ro', 'description': 'Let 3rd party app do stuff. <a href="../../docs/" target="_self">Docs</a>', }, { 'name': 'dereferer', 'default': 'http://www.nullrefer.com/?', 'description': 'Derefer links to external sites, keep empty for no dereferer. Example: http://www.dereferer.org/? or http://www.nullrefer.com/?.', }, { 'name': 'use_proxy', 'default': 0, 'type': 'bool', 'description': 'Route outbound connections via proxy. Currently, only <a href="https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers" target=_"blank">HTTP(S) proxies</a> are supported. ', }, { 'name': 'proxy_server', 'description': 'Override system default proxy server. Currently, only <a href="https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers" target=_"blank">HTTP(S) proxies</a> are supported. Ex. <i>\"127.0.0.1:8080\"</i>. Keep empty to use system default proxy server.', }, { 'name': 'proxy_username', 'description': 'Only HTTP Basic Auth is supported. Leave blank to disable authentication.', }, { 'name': 'proxy_password', 'type': 'password', 'description': 'Leave blank for no password.', }, { 'name': 'bookmarklet_host', 'description': 'Override default bookmarklet host. This can be useful in a reverse proxy environment. For example: "http://username:password@customHost:1020". Requires restart to take effect.', 'advanced': True, }, { 'name': 'debug', 'default': 0, 'type': 'bool', 'description': 'Enable debugging.', }, { 'name': 'development', 'default': 0, 'type': 'bool', 'description': 'Enable this if you\'re developing, and NOT in any other case, thanks.', }, { 'name': 'data_dir', 'type': 'directory', 'description': 'Where cache/logs/etc are stored. Keep empty for defaults.', }, { 'name': 'url_base', 'default': '', 'description': 'When using mod_proxy use this to append the url with this.', }, { 'name': 'permission_folder', 'default': '0755', 'label': 'Folder CHMOD', 'description': 'Can be either decimal (493) or octal (leading zero: 0755). <a href="http://permissions-calculator.org/" target="_blank">Calculate the correct value</a>', }, { 'name': 'permission_file', 'default': '0644', 'label': 'File CHMOD', 'description': 'See Folder CHMOD description, but for files', }, ], }, ], }]
13,193
Python
.py
318
28.144654
291
0.510142
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,930
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/downloader/__init__.py
from .main import Downloader def autoload(): return Downloader() config = [{ 'name': 'download_providers', 'groups': [ { 'label': 'Downloaders', 'description': 'You can select different downloaders for each type (usenet / torrent)', 'type': 'list', 'name': 'download_providers', 'tab': 'downloaders', 'options': [], }, ], }]
433
Python
.py
16
19.3125
99
0.525424
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,931
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/downloader/main.py
from base64 import b32decode, b16encode import random import re from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.variable import mergeDicts from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import Provider from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) ## This is here to load the static files class Downloader(Plugin): pass class DownloaderBase(Provider): protocol = [] http_time_between_calls = 0 status_support = True torrent_sources = [ 'https://torcache.net/torrent/%s.torrent', 'https://itorrents.org/torrent/%s.torrent', ] torrent_trackers = [ 'udp://tracker.istole.it:80/announce', 'http://tracker.istole.it/announce', 'udp://fr33domtracker.h33t.com:3310/announce', 'http://tracker.publicbt.com/announce', 'udp://tracker.publicbt.com:80/announce', 'http://tracker.ccc.de/announce', 'udp://tracker.ccc.de:80/announce', 'http://exodus.desync.com/announce', 'http://exodus.desync.com:6969/announce', 'http://tracker.publichd.eu/announce', 'udp://tracker.publichd.eu:80/announce', 'http://tracker.openbittorrent.com/announce', 'udp://tracker.openbittorrent.com/announce', 'udp://tracker.openbittorrent.com:80/announce', 'udp://open.demonii.com:1337/announce', ] def __init__(self): addEvent('download', self._download) addEvent('download.enabled', self._isEnabled) addEvent('download.enabled_protocols', self.getEnabledProtocol) addEvent('download.status', self._getAllDownloadStatus) addEvent('download.remove_failed', self._removeFailed) addEvent('download.pause', self._pause) addEvent('download.process_complete', self._processComplete) addApiView('download.%s.test' % self.getName().lower(), self._test) def getEnabledProtocol(self): for download_protocol in self.protocol: if self.isEnabled(manual = True, data = {'protocol': download_protocol}): return self.protocol return [] def _download(self, data = None, media = None, manual = False, filedata = None): if not media: media = {} if not data: data = {} if self.isDisabled(manual, data): return return self.download(data = data, media = media, filedata = filedata) def download(self, *args, **kwargs): return False def _getAllDownloadStatus(self, download_ids): if self.isDisabled(manual = True, data = {}): return ids = [download_id['id'] for download_id in download_ids if download_id['downloader'] == self.getName()] if ids: return self.getAllDownloadStatus(ids) else: return def getAllDownloadStatus(self, ids): return [] def _removeFailed(self, release_download): if self.isDisabled(manual = True, data = {}): return if release_download and release_download.get('downloader') == self.getName(): if self.conf('delete_failed'): return self.removeFailed(release_download) return False return def removeFailed(self, release_download): return def _processComplete(self, release_download): if self.isDisabled(manual = True, data = {}): return if release_download and release_download.get('downloader') == self.getName(): if self.conf('remove_complete', default = False): return self.processComplete(release_download = release_download, delete_files = self.conf('delete_files', default = False)) return False return def processComplete(self, release_download, delete_files): return def isCorrectProtocol(self, protocol): is_correct = protocol in self.protocol if not is_correct: log.debug("Downloader doesn't support this protocol") return is_correct def magnetToTorrent(self, magnet_link): torrent_hash = re.findall('urn:btih:([\w]{32,40})', magnet_link)[0].upper() # Convert base 32 to hex if len(torrent_hash) == 32: torrent_hash = b16encode(b32decode(torrent_hash)) sources = self.torrent_sources random.shuffle(sources) for source in sources: try: filedata = self.urlopen(source % torrent_hash, headers = {'Referer': source % torrent_hash}, show_error = False) if 'torcache' in filedata and 'file not found' in filedata.lower(): continue return filedata except: log.debug('Torrent hash "%s" wasn\'t found on: %s', (torrent_hash, source)) log.error('Failed converting magnet url to torrent: %s', torrent_hash) return False def downloadReturnId(self, download_id): return { 'downloader': self.getName(), 'status_support': self.status_support, 'id': download_id } def isDisabled(self, manual = False, data = None): if not data: data = {} return not self.isEnabled(manual, data) def _isEnabled(self, manual, data = None): if not data: data = {} if not self.isEnabled(manual, data): return return True def isEnabled(self, manual = False, data = None): if not data: data = {} d_manual = self.conf('manual', default = False) return super(DownloaderBase, self).isEnabled() and \ (d_manual and manual or d_manual is False) and \ (not data or self.isCorrectProtocol(data.get('protocol'))) def _test(self, **kwargs): t = self.test() if isinstance(t, tuple): return {'success': t[0], 'msg': t[1]} return {'success': t} def test(self): return False def _pause(self, release_download, pause = True): if self.isDisabled(manual = True, data = {}): return if release_download and release_download.get('downloader') == self.getName(): self.pause(release_download, pause) return True return False def pause(self, release_download, pause): return class ReleaseDownloadList(list): provider = None def __init__(self, provider, **kwargs): self.provider = provider self.kwargs = kwargs super(ReleaseDownloadList, self).__init__() def extend(self, results): for r in results: self.append(r) def append(self, result): new_result = self.fillResult(result) super(ReleaseDownloadList, self).append(new_result) def fillResult(self, result): defaults = { 'id': 0, 'status': 'busy', 'downloader': self.provider.getName(), 'folder': '', 'files': [], } return mergeDicts(defaults, result)
7,112
Python
.py
169
33.023669
139
0.624473
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,932
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/updater/__init__.py
import os from .main import Updater from couchpotato.environment import Env def autoload(): return Updater() config = [{ 'name': 'updater', 'groups': [ { 'tab': 'general', 'name': 'updater', 'label': 'Updater', 'git_only': True, 'description': 'Enable periodic update checking', 'options': [ { 'name': 'enabled', 'default': True, 'type': 'enabler', }, { 'name': 'notification', 'type': 'bool', 'default': True, 'description': 'Send a notification if an update is available.', }, { 'name': 'automatic', 'default': True, 'type': 'bool', 'description': 'Automatically update when update is available', }, { 'name': 'git_command', 'default': 'git', 'hidden': not os.path.isdir(os.path.join(Env.get('app_dir'), '.git')), 'advanced': True }, ], }, ], }]
1,296
Python
.py
42
16.809524
90
0.3856
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,933
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/_base/updater/main.py
import json import os import shutil import tarfile import time import traceback import zipfile from datetime import datetime from threading import RLock import re from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.helpers.encoding import sp from couchpotato.core.helpers.variable import removePyc, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from dateutil.parser import parse from git.repository import LocalRepository import version from six.moves import filter log = CPLog(__name__) class Updater(Plugin): available_notified = False _lock = RLock() last_check = 'updater.last_checked' def __init__(self): if Env.get('desktop'): self.updater = DesktopUpdater() elif os.path.isdir(os.path.join(Env.get('app_dir'), '.git')): git_default = 'git' git_command = self.conf('git_command', default = git_default) git_command = git_command if git_command != git_default and (os.path.isfile(git_command) or re.match('^[a-zA-Z0-9_/\.\-]+$', git_command)) else git_default self.updater = GitUpdater(git_command) else: self.updater = SourceUpdater() addEvent('app.load', self.logVersion, priority = 10000) addEvent('app.load', self.setCrons) addEvent('updater.info', self.info) addApiView('updater.info', self.info, docs = { 'desc': 'Get updater information', 'return': { 'type': 'object', 'example': """{ 'last_check': "last checked for update", 'update_version': "available update version or empty", 'version': current_cp_version }"""} }) addApiView('updater.update', self.doUpdateView) addApiView('updater.check', self.checkView, docs = { 'desc': 'Check for available update', 'return': {'type': 'see updater.info'} }) addEvent('setting.save.updater.enabled.after', self.setCrons) def logVersion(self): info = self.info() log.info('=== VERSION %s, using %s ===', (info.get('version', {}).get('repr', 'UNKNOWN'), self.updater.getName())) def setCrons(self): fireEvent('schedule.remove', 'updater.check', single = True) if self.isEnabled(): fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 24) self.autoUpdate() # Check after enabling def autoUpdate(self): do_check = True try: last_check = tryInt(Env.prop(self.last_check, default = 0)) now = tryInt(time.time()) do_check = last_check < now - 43200 if do_check: Env.prop(self.last_check, value = now) except: log.error('Failed checking last time to update: %s', traceback.format_exc()) if do_check and self.isEnabled() and self.check() and self.conf('automatic') and not self.updater.update_failed: if self.updater.doUpdate(): # Notify before restarting try: if self.conf('notification'): info = self.updater.info() version_date = datetime.fromtimestamp(info['update_version']['date']) fireEvent('updater.updated', 'CouchPotato: Updated to a new version with hash "%s", this version is from %s' % (info['update_version']['hash'], version_date), data = info) except: log.error('Failed notifying for update: %s', traceback.format_exc()) fireEventAsync('app.restart') return True return False def check(self, force = False): if not force and self.isDisabled(): return if self.updater.check(): if not self.available_notified and self.conf('notification') and not self.conf('automatic'): info = self.updater.info() version_date = datetime.fromtimestamp(info['update_version']['date']) fireEvent('updater.available', message = 'A new update with hash "%s" is available, this version is from %s' % (info['update_version']['hash'], version_date), data = info) self.available_notified = True return True return False def info(self, **kwargs): self._lock.acquire() info = {} try: info = self.updater.info() except: log.error('Failed getting updater info: %s', traceback.format_exc()) self._lock.release() return info def checkView(self, **kwargs): return { 'update_available': self.check(force = True), 'info': self.updater.info() } def doUpdateView(self, **kwargs): self.check() if not self.updater.update_version: log.error('Trying to update when no update is available.') success = False else: success = self.updater.doUpdate() if success: fireEventAsync('app.restart') # Assume the updater handles things if not success: success = True return { 'success': success } def doShutdown(self, *args, **kwargs): if not Env.get('dev') and not Env.get('desktop'): removePyc(Env.get('app_dir'), show_logs = False) return super(Updater, self).doShutdown(*args, **kwargs) class BaseUpdater(Plugin): repo_user = 'CouchPotato' repo_name = 'CouchPotatoServer' branch = version.BRANCH version = None update_failed = False update_version = None last_check = 0 def doUpdate(self): pass def info(self): current_version = self.getVersion() return { 'last_check': self.last_check, 'update_version': self.update_version, 'version': current_version, 'repo_name': '%s/%s' % (self.repo_user, self.repo_name), 'branch': current_version.get('branch', self.branch), } def getVersion(self): pass def check(self): pass class GitUpdater(BaseUpdater): old_repo = 'RuudBurger/CouchPotatoServer' new_repo = 'CouchPotato/CouchPotatoServer' def __init__(self, git_command): self.repo = LocalRepository(Env.get('app_dir'), command = git_command) remote_name = 'origin' remote = self.repo.getRemoteByName(remote_name) if self.old_repo in remote.url: log.info('Changing repo to new github organization: %s -> %s', (self.old_repo, self.new_repo)) new_url = remote.url.replace(self.old_repo, self.new_repo) self.repo._executeGitCommandAssertSuccess("remote set-url %s %s" % (remote_name, new_url)) def doUpdate(self): try: log.info('Updating to latest version') self.repo.pull() return True except: log.error('Failed updating via GIT: %s', traceback.format_exc()) self.update_failed = True return False def getVersion(self): if not self.version: hash = None date = None branch = self.branch try: output = self.repo.getHead() # Yes, please log.debug('Git version output: %s', output.hash) hash = output.hash[:8] date = output.getDate() branch = self.repo.getCurrentBranch().name except Exception as e: log.error('Failed using GIT updater, running from source, you need to have GIT installed. %s', e) self.version = { 'repr': 'git:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, branch, hash or 'unknown_hash', datetime.fromtimestamp(date) if date else 'unknown_date'), 'hash': hash, 'date': date, 'type': 'git', 'branch': branch } return self.version def check(self): if self.update_version: return True log.info('Checking for new version on github for %s', self.repo_name) if not Env.get('dev'): self.repo.fetch() current_branch = self.repo.getCurrentBranch().name for branch in self.repo.getRemoteByName('origin').getBranches(): if current_branch == branch.name: local = self.repo.getHead() remote = branch.getHead() log.debug('Versions, local:%s, remote:%s', (local.hash[:8], remote.hash[:8])) if local.getDate() < remote.getDate(): self.update_version = { 'hash': remote.hash[:8], 'date': remote.getDate(), } return True self.last_check = time.time() return False class SourceUpdater(BaseUpdater): def __init__(self): # Create version file in cache self.version_file = os.path.join(Env.get('cache_dir'), 'version') if not os.path.isfile(self.version_file): self.createFile(self.version_file, json.dumps(self.latestCommit())) def doUpdate(self): try: download_data = fireEvent('cp.source_url', repo = self.repo_user, repo_name = self.repo_name, branch = self.branch, single = True) destination = os.path.join(Env.get('cache_dir'), self.update_version.get('hash')) + '.' + download_data.get('type') extracted_path = os.path.join(Env.get('cache_dir'), 'temp_updater') destination = fireEvent('file.download', url = download_data.get('url'), dest = destination, single = True) # Cleanup leftover from last time if os.path.isdir(extracted_path): self.removeDir(extracted_path) self.makeDir(extracted_path) # Extract if download_data.get('type') == 'zip': zip_file = zipfile.ZipFile(destination) zip_file.extractall(extracted_path) zip_file.close() else: tar = tarfile.open(destination) tar.extractall(path = extracted_path) tar.close() os.remove(destination) if self.replaceWith(os.path.join(extracted_path, os.listdir(extracted_path)[0])): self.removeDir(extracted_path) # Write update version to file self.createFile(self.version_file, json.dumps(self.update_version)) return True except: log.error('Failed updating: %s', traceback.format_exc()) self.update_failed = True return False def replaceWith(self, path): path = sp(path) app_dir = Env.get('app_dir') data_dir = Env.get('data_dir') # Get list of files we want to overwrite removePyc(app_dir) existing_files = [] for root, subfiles, filenames in os.walk(app_dir): for filename in filenames: existing_files.append(os.path.join(root, filename)) for root, subfiles, filenames in os.walk(path): for filename in filenames: fromfile = os.path.join(root, filename) tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, '')) if not Env.get('dev'): try: if os.path.isfile(tofile): os.remove(tofile) dirname = os.path.dirname(tofile) if not os.path.isdir(dirname): self.makeDir(dirname) shutil.move(fromfile, tofile) try: existing_files.remove(tofile) except ValueError: pass except: log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc())) return False for still_exists in existing_files: if data_dir in still_exists: continue try: os.remove(still_exists) except: log.error('Failed removing non-used file: %s', traceback.format_exc()) return True def removeDir(self, path): try: if os.path.isdir(path): shutil.rmtree(path) except OSError as inst: os.chmod(inst.filename, 0o777) self.removeDir(path) def getVersion(self): if not self.version: try: f = open(self.version_file, 'r') output = json.loads(f.read()) f.close() log.debug('Source version output: %s', output) self.version = output self.version['type'] = 'source' self.version['repr'] = 'source:(%s:%s % s) %s (%s)' % (self.repo_user, self.repo_name, self.branch, output.get('hash', '')[:8], datetime.fromtimestamp(output.get('date', 0))) except Exception as e: log.error('Failed using source updater. %s', e) return {} return self.version def check(self): current_version = self.getVersion() try: latest = self.latestCommit() if latest.get('hash') != current_version.get('hash') and latest.get('date') >= current_version.get('date'): self.update_version = latest self.last_check = time.time() except: log.error('Failed updating via source: %s', traceback.format_exc()) return self.update_version is not None def latestCommit(self): try: url = 'https://api.github.com/repos/%s/%s/commits?per_page=1&sha=%s' % (self.repo_user, self.repo_name, self.branch) data = self.getCache('github.commit', url = url) commit = json.loads(data)[0] return { 'hash': commit['sha'], 'date': int(time.mktime(parse(commit['commit']['committer']['date']).timetuple())), } except: log.error('Failed getting latest request from github: %s', traceback.format_exc()) return {} class DesktopUpdater(BaseUpdater): def __init__(self): self.desktop = Env.get('desktop') def doUpdate(self): try: def do_restart(e): if e['status'] == 'done': fireEventAsync('app.restart') elif e['status'] == 'error': log.error('Failed updating desktop: %s', e['exception']) self.update_failed = True self.desktop._esky.auto_update(callback = do_restart) return except: self.update_failed = True return False def info(self): return { 'last_check': self.last_check, 'update_version': self.update_version, 'version': self.getVersion(), 'branch': self.branch, } def check(self): current_version = self.getVersion() try: latest = self.desktop._esky.find_update() if latest and latest != current_version.get('hash'): self.update_version = { 'hash': latest, 'date': None, 'changelog': self.desktop._changelogURL, } self.last_check = time.time() except: log.error('Failed updating desktop: %s', traceback.format_exc()) return self.update_version is not None def getVersion(self): return { 'repr': 'desktop: %s' % self.desktop._esky.active_version, 'hash': self.desktop._esky.active_version, 'date': None, 'type': 'desktop', }
16,210
Python
.py
373
31.479893
195
0.563765
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,934
request.py
CouchPotato_CouchPotatoServer/couchpotato/core/helpers/request.py
from urllib import unquote import re from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import natsortKey def getParams(params): reg = re.compile('^[a-z0-9_\.]+$') # Sort keys param_keys = params.keys() param_keys.sort(key = natsortKey) temp = {} for param in param_keys: value = params[param] nest = re.split("([\[\]]+)", param) if len(nest) > 1: nested = [] for key in nest: if reg.match(key): nested.append(key) current = temp for item in nested: if item is nested[-1]: current[item] = toUnicode(unquote(value)) else: try: current[item] except: current[item] = {} current = current[item] else: temp[param] = toUnicode(unquote(value)) if temp[param].lower() in ['true', 'false']: temp[param] = temp[param].lower() != 'false' return dictToList(temp) non_decimal = re.compile(r'[^\d.]+') def dictToList(params): if type(params) is dict: new = {} for x, value in params.items(): try: convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] sorted_keys = sorted(value.keys(), key = alphanum_key) all_ints = 0 for pnr in sorted_keys: all_ints += 1 if non_decimal.sub('', pnr) == pnr else 0 if all_ints == len(sorted_keys): new_value = [dictToList(value[k]) for k in sorted_keys] else: new_value = value except: new_value = value new[x] = new_value else: new = params return new
2,029
Python
.py
55
24.581818
90
0.498978
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,935
encoding.py
CouchPotato_CouchPotatoServer/couchpotato/core/helpers/encoding.py
from string import ascii_letters, digits from urllib import quote_plus import os import re import traceback import unicodedata from chardet import detect from couchpotato.core.logger import CPLog import six log = CPLog(__name__) def toSafeString(original): valid_chars = "-_.() %s%s" % (ascii_letters, digits) cleaned_filename = unicodedata.normalize('NFKD', toUnicode(original)).encode('ASCII', 'ignore') valid_string = ''.join(c for c in cleaned_filename if c in valid_chars) return ' '.join(valid_string.split()) def simplifyString(original): string = stripAccents(original.lower()) string = toSafeString(' '.join(re.split('\W+', string))) split = re.split('\W+|_', string.lower()) return toUnicode(' '.join(split)) def toUnicode(original, *args): try: if isinstance(original, unicode): return original else: try: return six.text_type(original, *args) except: try: from couchpotato.environment import Env return original.decode(Env.get("encoding")) except: try: detected = detect(original) try: if detected.get('confidence') > 0.8: return original.decode(detected.get('encoding')) except: pass return ek(original, *args) except: raise except: log.error('Unable to decode value "%s..." : %s ', (repr(original)[:20], traceback.format_exc())) return 'ERROR DECODING STRING' def ss(original, *args): u_original = toUnicode(original, *args) try: from couchpotato.environment import Env return u_original.encode(Env.get('encoding')) except Exception as e: log.debug('Failed ss encoding char, force UTF8: %s', e) try: return u_original.encode(Env.get('encoding'), 'replace') except: return u_original.encode('utf-8', 'replace') def sp(path, *args): # Standardise encoding, normalise case, path and strip trailing '/' or '\' if not path or len(path) == 0: return path # convert windows path (from remote box) to *nix path if os.path.sep == '/' and '\\' in path: path = '/' + path.replace(':', '').replace('\\', '/') path = os.path.normpath(ss(path, *args)) # Remove any trailing path separators if path != os.path.sep: path = path.rstrip(os.path.sep) # Add a trailing separator in case it is a root folder on windows (crashes guessit) if len(path) == 2 and path[1] == ':': path = path + os.path.sep # Replace *NIX ambiguous '//' at the beginning of a path with '/' (crashes guessit) path = re.sub('^//', '/', path) return path def ek(original, *args): if isinstance(original, (str, unicode)): try: from couchpotato.environment import Env return original.decode(Env.get('encoding'), 'ignore') except UnicodeDecodeError: raise return original def isInt(value): try: int(value) return True except ValueError: return False def stripAccents(s): return ''.join((c for c in unicodedata.normalize('NFD', toUnicode(s)) if unicodedata.category(c) != 'Mn')) def tryUrlencode(s): new = six.u('') if isinstance(s, dict): for key, value in s.items(): new += six.u('&%s=%s') % (key, tryUrlencode(value)) return new[1:] else: for letter in ss(s): try: new += quote_plus(letter) except: new += letter return new
3,827
Python
.py
102
28.313725
110
0.579632
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,936
rss.py
CouchPotato_CouchPotatoServer/couchpotato/core/helpers/rss.py
import xml.etree.ElementTree as XMLTree from couchpotato.core.logger import CPLog log = CPLog(__name__) class RSS(object): def getTextElements(self, xml, path): """ Find elements and return tree""" textelements = [] try: elements = xml.findall(path) except: return for element in elements: textelements.append(element.text) return textelements def getElements(self, xml, path): elements = None try: elements = xml.findall(path) except: pass return elements def getElement(self, xml, path): """ Find element and return text""" try: return xml.find(path) except: return def getTextElement(self, xml, path): """ Find element and return text""" try: return xml.find(path).text except: return def getItems(self, data, path = 'channel/item'): try: return XMLTree.parse(data).findall(path) except Exception as e: log.error('Error parsing RSS. %s', e) return []
1,177
Python
.py
39
20.897436
52
0.569012
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,937
variable.py
CouchPotato_CouchPotatoServer/couchpotato/core/helpers/variable.py
import collections import ctypes import hashlib import os import platform import random import re import string import sys import traceback from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp, toUnicode from couchpotato.core.logger import CPLog import six from six.moves import map, zip, filter log = CPLog(__name__) def fnEscape(pattern): return pattern.replace('[', '[[').replace(']', '[]]').replace('[[', '[[]') def link(src, dst): if os.name == 'nt': import ctypes if ctypes.windll.kernel32.CreateHardLinkW(toUnicode(dst), toUnicode(src), 0) == 0: raise ctypes.WinError() else: os.link(toUnicode(src), toUnicode(dst)) def symlink(src, dst): if os.name == 'nt': import ctypes if ctypes.windll.kernel32.CreateSymbolicLinkW(toUnicode(dst), toUnicode(src), 1 if os.path.isdir(src) else 0) in [0, 1280]: raise ctypes.WinError() else: os.symlink(toUnicode(src), toUnicode(dst)) def getUserDir(): try: import pwd if not os.environ['HOME']: os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir) except: pass return sp(os.path.expanduser('~')) def getDownloadDir(): user_dir = getUserDir() # OSX if 'darwin' in platform.platform().lower(): return os.path.join(user_dir, 'Downloads') if os.name == 'nt': return os.path.join(user_dir, 'Downloads') return user_dir def getDataDir(): # Windows if os.name == 'nt': return os.path.join(os.environ['APPDATA'], 'CouchPotato') user_dir = getUserDir() # OSX if 'darwin' in platform.platform().lower(): return os.path.join(user_dir, 'Library', 'Application Support', 'CouchPotato') # FreeBSD if 'freebsd' in sys.platform: return os.path.join('/usr/local/', 'couchpotato', 'data') # Linux return os.path.join(user_dir, '.couchpotato') def isDict(obj): return isinstance(obj, dict) def mergeDicts(a, b, prepend_list = False): assert isDict(a), isDict(b) dst = a.copy() stack = [(dst, b)] while stack: current_dst, current_src = stack.pop() for key in current_src: if key not in current_dst: current_dst[key] = current_src[key] else: if isDict(current_src[key]) and isDict(current_dst[key]): stack.append((current_dst[key], current_src[key])) elif isinstance(current_src[key], list) and isinstance(current_dst[key], list): current_dst[key] = current_src[key] + current_dst[key] if prepend_list else current_dst[key] + current_src[key] current_dst[key] = removeListDuplicates(current_dst[key]) else: current_dst[key] = current_src[key] return dst def removeListDuplicates(seq): checked = [] for e in seq: if e not in checked: checked.append(e) return checked def flattenList(l): if isinstance(l, list): return sum(map(flattenList, l)) else: return l def md5(text): return hashlib.md5(ss(text)).hexdigest() def sha1(text): return hashlib.sha1(text).hexdigest() def isLocalIP(ip): ip = ip.lstrip('htps:/') regex = '/(^127\.)|(^192\.168\.)|(^10\.)|(^172\.1[6-9]\.)|(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^::1)$/' return re.search(regex, ip) is not None or 'localhost' in ip or ip[:4] == '127.' def getExt(filename): return os.path.splitext(filename)[1][1:] def cleanHost(host, protocol = True, ssl = False, username = None, password = None): """Return a cleaned up host with given url options set Changes protocol to https if ssl is set to True and http if ssl is set to false. >>> cleanHost("localhost:80", ssl=True) 'https://localhost:80/' >>> cleanHost("localhost:80", ssl=False) 'http://localhost:80/' Username and password is managed with the username and password variables >>> cleanHost("localhost:80", username="user", password="passwd") 'http://user:passwd@localhost:80/' Output without scheme (protocol) can be forced with protocol=False >>> cleanHost("localhost:80", protocol=False) 'localhost:80' """ if not '://' in host and protocol: host = ('https://' if ssl else 'http://') + host if not protocol: host = host.split('://', 1)[-1] if protocol and username and password: try: auth = re.findall('^(?:.+?//)(.+?):(.+?)@(?:.+)$', host) if auth: log.error('Cleanhost error: auth already defined in url: %s, please remove BasicAuth from url.', host) else: host = host.replace('://', '://%s:%s@' % (username, password), 1) except: pass host = host.rstrip('/ ') if protocol: host += '/' return host def getImdb(txt, check_inside = False, multiple = False): if not check_inside: txt = simplifyString(txt) else: txt = ss(txt) if check_inside and os.path.isfile(txt): output = open(txt, 'r') txt = output.read() output.close() try: ids = re.findall('(tt\d{4,8})', txt) if multiple: return removeDuplicate(['tt%s' % str(tryInt(x[2:])).rjust(7, '0') for x in ids]) if len(ids) > 0 else [] return 'tt%s' % str(tryInt(ids[0][2:])).rjust(7, '0') except IndexError: pass return False def tryInt(s, default = 0): try: return int(s) except: return default def tryFloat(s): try: if isinstance(s, str): return float(s) if '.' in s else tryInt(s) else: return float(s) except: return 0 def natsortKey(string_): """See http://www.codinghorror.com/blog/archives/001018.html""" return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] def toIterable(value): if isinstance(value, collections.Iterable): return value return [value] def getIdentifier(media): return media.get('identifier') or media.get('identifiers', {}).get('imdb') def getTitle(media_dict): try: try: return media_dict['title'] except: try: return media_dict['titles'][0] except: try: return media_dict['info']['titles'][0] except: try: return media_dict['media']['info']['titles'][0] except: log.error('Could not get title for %s', getIdentifier(media_dict)) return None except: log.error('Could not get title for library item: %s', media_dict) return None def possibleTitles(raw_title): titles = [ toSafeString(raw_title).lower(), raw_title.lower(), simplifyString(raw_title) ] # replace some chars new_title = raw_title.replace('&', 'and') titles.append(simplifyString(new_title)) return removeDuplicate(titles) def randomString(size = 8, chars = string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) def splitString(str, split_on = ',', clean = True): l = [x.strip() for x in str.split(split_on)] if str else [] return removeEmpty(l) if clean else l def removeEmpty(l): return list(filter(None, l)) def removeDuplicate(l): seen = set() return [x for x in l if x not in seen and not seen.add(x)] def dictIsSubset(a, b): return all([k in b and b[k] == v for k, v in a.items()]) # Returns True if sub_folder is the same as or inside base_folder def isSubFolder(sub_folder, base_folder): if base_folder and sub_folder: base = sp(os.path.realpath(base_folder)) + os.path.sep subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep return os.path.commonprefix([subfolder, base]) == base return False # From SABNZBD re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)] def scanForPassword(name): m = None for reg in re_password: m = reg.search(name) if m: break if m: return m.group(1).strip('. '), m.group(2).strip() under_pat = re.compile(r'_([a-z])') def underscoreToCamel(name): return under_pat.sub(lambda x: x.group(1).upper(), name) def removePyc(folder, only_excess = True, show_logs = True): folder = sp(folder) for root, dirs, files in os.walk(folder): pyc_files = filter(lambda filename: filename.endswith('.pyc'), files) py_files = set(filter(lambda filename: filename.endswith('.py'), files)) excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files for excess_pyc_file in excess_pyc_files: full_path = os.path.join(root, excess_pyc_file) if show_logs: log.debug('Removing old PYC file: %s', full_path) try: os.remove(full_path) except: log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc())) for dir_name in dirs: full_path = os.path.join(root, dir_name) if len(os.listdir(full_path)) == 0: try: os.rmdir(full_path) except: log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) def getFreeSpace(directories): single = not isinstance(directories, (tuple, list)) if single: directories = [directories] free_space = {} for folder in directories: size = None if os.path.isdir(folder): if os.name == 'nt': _, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \ ctypes.c_ulonglong() if sys.version_info >= (3,) or isinstance(folder, unicode): fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW #@UndefinedVariable else: fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA #@UndefinedVariable ret = fun(folder, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free)) if ret == 0: raise ctypes.WinError() return [total.value, free.value] else: s = os.statvfs(folder) size = [s.f_blocks * s.f_frsize / (1024 * 1024), (s.f_bavail * s.f_frsize) / (1024 * 1024)] if single: return size free_space[folder] = size return free_space def getSize(paths): single = not isinstance(paths, (tuple, list)) if single: paths = [paths] total_size = 0 for path in paths: path = sp(path) if os.path.isdir(path): total_size = 0 for dirpath, _, filenames in os.walk(path): for f in filenames: total_size += os.path.getsize(sp(os.path.join(dirpath, f))) elif os.path.isfile(path): total_size += os.path.getsize(path) return total_size / 1048576 # MB def find(func, iterable): for item in iterable: if func(item): return item return None def compareVersions(version1, version2): def normalize(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] return cmp(normalize(version1), normalize(version2))
11,559
Python
.py
295
31.013559
155
0.598294
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,938
subtitle.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/subtitle.py
import traceback from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import toUnicode, sp from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env import subliminal log = CPLog(__name__) autoload = 'Subtitle' class Subtitle(Plugin): services = ['opensubtitles', 'thesubdb', 'subswiki', 'subscenter', 'wizdom'] def __init__(self): addEvent('renamer.before', self.searchSingle) def searchSingle(self, group): if self.isDisabled(): return try: available_languages = sum(group['subtitle_language'].values(), []) downloaded = [] files = [toUnicode(x) for x in group['files']['movie']] log.debug('Searching for subtitles for: %s', files) for lang in self.getLanguages(): if lang not in available_languages: download = subliminal.download_subtitles(files, multi = True, force = self.conf('force'), languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) for subtitle in download: downloaded.extend(download[subtitle]) for d_sub in downloaded: log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files)) group['files']['subtitle'].append(sp(d_sub.path)) group['before_rename'].append(sp(d_sub.path)) group['subtitle_language'][sp(d_sub.path)] = [d_sub.language.alpha2] return True except: log.error('Failed searching for subtitle: %s', (traceback.format_exc())) return False def getLanguages(self): return splitString(self.conf('languages')) config = [{ 'name': 'subtitle', 'groups': [ { 'tab': 'renamer', 'name': 'subtitle', 'label': 'Download subtitles', 'description': 'after rename', 'options': [ { 'name': 'enabled', 'label': 'Search and download subtitles', 'default': False, 'type': 'enabler', }, { 'name': 'languages', 'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at <a href="http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes" target="_blank">on Wikipedia</a>'), }, { 'advanced': True, 'name': 'force', 'label': 'Force', 'description': ('Force download all languages (including embedded).', 'This will also <strong>overwrite</strong> all existing subtitles.'), 'default': False, 'type': 'bool', }, ], }, ], }]
3,012
Python
.py
68
32.147059
212
0.55434
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,939
browser_test.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/browser_test.py
#import sys import os import mock import unittest from unittest import TestCase from couchpotato.core.plugins.browser import FileBrowser from couchpotato.core.softchroot import SoftChroot CHROOT_DIR = '/tmp/' # 'couchpotato.core.plugins.browser.Env', @mock.patch('couchpotato.core.plugins.browser.Env', name='EnvMock') class FileBrowserChrootedTest(TestCase): def setUp(self): self.b = FileBrowser() def tuneMock(self, env): #set up mock: sc = SoftChroot() sc.initialize(CHROOT_DIR) env.get.return_value = sc def test_view__chrooted_path_none(self, env): #def view(self, path = '/', show_hidden = True, **kwargs): self.tuneMock(env) r = self.b.view(None) self.assertEqual(r['home'], '/') self.assertEqual(r['parent'], '/') self.assertTrue(r['is_root']) def test_view__chrooted_path_chroot(self, env): #def view(self, path = '/', show_hidden = True, **kwargs): self.tuneMock(env) for path, parent in [('/asdf','/'), (CHROOT_DIR, '/'), ('/mnk/123/t', '/mnk/123/')]: r = self.b.view(path) path_strip = path if (path.endswith(os.path.sep)): path_strip = path_strip.rstrip(os.path.sep) self.assertEqual(r['home'], '/') self.assertEqual(r['parent'], parent) self.assertFalse(r['is_root'])
1,416
Python
.py
36
31.861111
92
0.617302
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,940
browser.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/browser.py
import ctypes import os import string import traceback import time from couchpotato import CPLog from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import sp, ss, toUnicode from couchpotato.core.helpers.variable import getUserDir from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env log = CPLog(__name__) if os.name == 'nt': import imp try: imp.find_module('win32file') except: # todo:: subclass ImportError for missing dependencies, vs. broken plugins? raise ImportError("Missing the win32file module, which is a part of the prerequisite \ pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/") else: # noinspection PyUnresolvedReferences import win32file autoload = 'FileBrowser' class FileBrowser(Plugin): def __init__(self): addApiView('directory.list', self.view, docs = { 'desc': 'Return the directory list of a given directory', 'params': { 'path': {'desc': 'The directory to scan'}, 'show_hidden': {'desc': 'Also show hidden files'} }, 'return': {'type': 'object', 'example': """{ 'is_root': bool, //is top most folder 'parent': string, //parent folder of requested path 'home': string, //user home folder 'empty': bool, //directory is empty 'dirs': array, //directory names }"""} }) def getDirectories(self, path = '/', show_hidden = True): # Return driveletters or root if path is empty if path == '/' or not path or path == '\\': if os.name == 'nt': return self.getDriveLetters() path = '/' dirs = [] path = sp(path) for f in os.listdir(path): p = sp(os.path.join(path, f)) if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): dirs.append(toUnicode('%s%s' % (p, os.path.sep))) return sorted(dirs) def getFiles(self): pass def getDriveLetters(self): driveletters = [] for drive in string.ascii_uppercase: if win32file.GetDriveType(drive + ':') in [win32file.DRIVE_FIXED, win32file.DRIVE_REMOTE, win32file.DRIVE_RAMDISK, win32file.DRIVE_REMOVABLE]: driveletters.append(drive + ':\\') return driveletters def view(self, path = '/', show_hidden = True, **kwargs): soft_chroot = Env.get('softchroot') home = getUserDir() if soft_chroot.enabled: if not soft_chroot.is_subdir(home): home = soft_chroot.get_chroot() if not path: path = home if path.endswith(os.path.sep): path = path.rstrip(os.path.sep) else: path = soft_chroot.chroot2abs(path) try: dirs = self.getDirectories(path = path, show_hidden = show_hidden) except: log.error('Failed getting directory "%s" : %s', (path, traceback.format_exc())) dirs = [] if soft_chroot.enabled: dirs = map(soft_chroot.abs2chroot, dirs) parent = os.path.dirname(path.rstrip(os.path.sep)) if parent == path.rstrip(os.path.sep): parent = '/' elif parent != '/' and parent[-2:] != ':\\': parent += os.path.sep # TODO : check on windows: is_root = path == '/' if soft_chroot.enabled: is_root = soft_chroot.is_root_abs(path) # fix paths: if soft_chroot.is_subdir(parent): parent = soft_chroot.abs2chroot(parent) else: parent = os.path.sep home = soft_chroot.abs2chroot(home) return { 'is_root': is_root, 'empty': len(dirs) == 0, 'parent': parent, 'home': home, 'platform': os.name, 'dirs': dirs, } def is_hidden(self, filepath): name = ss(os.path.basename(os.path.abspath(filepath))) return name.startswith('.') or self.has_hidden_attribute(filepath) def has_hidden_attribute(self, filepath): result = False try: attrs = ctypes.windll.kernel32.GetFileAttributesW(sp(filepath)) #@UndefinedVariable assert attrs != -1 result = bool(attrs & 2) except (AttributeError, AssertionError): pass except: log.error('Failed getting hidden attribute: %s', traceback.format_exc()) return result
4,716
Python
.py
118
30.491525
154
0.590829
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,941
file.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/file.py
import os.path import traceback from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode, ss, sp from couchpotato.core.helpers.variable import md5, getExt, isSubFolder from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from tornado.web import StaticFileHandler log = CPLog(__name__) autoload = 'FileManager' class FileManager(Plugin): def __init__(self): addEvent('file.download', self.download) addApiView('file.cache/(.*)', self.showCacheFile, static = True, docs = { 'desc': 'Return a file from the cp_data/cache directory', 'params': { 'filename': {'desc': 'path/filename of the wanted file'} }, 'return': {'type': 'file'} }) fireEvent('schedule.interval', 'file.cleanup', self.cleanup, hours = 24) addEvent('app.test', self.doSubfolderTest) def cleanup(self): # Wait a bit after starting before cleanup log.debug('Cleaning up unused files') try: db = get_db() cache_dir = Env.get('cache_dir') medias = db.all('media', with_doc = True) files = [] for media in medias: file_dict = media['doc'].get('files', {}) for x in file_dict.keys(): files.extend(file_dict[x]) for f in os.listdir(cache_dir): if os.path.splitext(f)[1] in ['.png', '.jpg', '.jpeg']: file_path = os.path.join(cache_dir, f) if toUnicode(file_path) not in files: os.remove(file_path) except: log.error('Failed removing unused file: %s', traceback.format_exc()) def showCacheFile(self, route, **kwargs): Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': toUnicode(Env.get('cache_dir'))})]) def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): if not urlopen_kwargs: urlopen_kwargs = {} # Return response object to stream download urlopen_kwargs['stream'] = True if not dest: # to Cache dest = os.path.join(Env.get('cache_dir'), ss('%s.%s' % (md5(url), getExt(url)))) dest = sp(dest) if not overwrite and os.path.isfile(dest): return dest try: filedata = self.urlopen(url, **urlopen_kwargs) except: log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) return False self.createFile(dest, filedata, binary = True) return dest def doSubfolderTest(self): tests = { ('/test/subfolder', '/test/sub'): False, ('/test/sub/folder', '/test/sub'): True, ('/test/sub/folder', '/test/sub2'): False, ('/sub/fold', '/test/sub/fold'): False, ('/sub/fold', '/test/sub/folder'): False, ('/opt/couchpotato', '/var/opt/couchpotato'): False, ('/var/opt', '/var/opt/couchpotato'): False, ('/CapItaLs/Are/OK', '/CapItaLs/Are/OK'): True, ('/CapItaLs/Are/OK', '/CapItaLs/Are/OK2'): False, ('/capitals/are/not/OK', '/capitals/are/NOT'): False, ('\\\\Mounted\\Volume\\Test', '\\\\Mounted\\Volume'): True, ('C:\\\\test\\path', 'C:\\\\test2'): False } failed = 0 for x in tests: if isSubFolder(x[0], x[1]) is not tests[x]: log.error('Failed subfolder test %s %s', x) failed += 1 if failed > 0: log.error('Subfolder test failed %s tests', failed) else: log.info('Subfolder test succeeded') return failed == 0
3,976
Python
.py
87
35.321839
147
0.571355
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,942
renamer.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/renamer.py
import fnmatch import os import re import shutil import time import traceback from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent, fireEventAsync from couchpotato.core.helpers.encoding import toUnicode, ss, sp from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder, \ getIdentifier, randomString, getFreeSpace, getSize from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from unrar2 import RarFile import six from six.moves import filter log = CPLog(__name__) autoload = 'Renamer' class Renamer(Plugin): renaming_started = False checking_snatched = False def __init__(self): addApiView('renamer.scan', self.scanView, docs = { 'desc': 'For the renamer to check for new files to rename in a folder', 'params': { 'async': {'desc': 'Optional: Set to 1 if you dont want to fire the renamer.scan asynchronous.'}, 'to_folder': {'desc': 'Optional: The folder to move releases to. Leave empty for default folder.'}, 'media_folder': {'desc': 'Optional: The folder of the media to scan. Keep empty for default renamer folder.'}, 'files': {'desc': 'Optional: Provide the release files if more releases are in the same media_folder, delimited with a \'|\'. Note that no dedicated release folder is expected for releases with one file.'}, 'base_folder': {'desc': 'Optional: The folder to find releases in. Leave empty for default folder.'}, 'downloader': {'desc': 'Optional: The downloader the release has been downloaded with. \'download_id\' is required with this option.'}, 'download_id': {'desc': 'Optional: The nzb/torrent ID of the release in media_folder. \'downloader\' is required with this option.'}, 'status': {'desc': 'Optional: The status of the release: \'completed\' (default) or \'seeding\''}, }, }) addApiView('renamer.progress', self.getProgress, docs = { 'desc': 'Get the progress of current renamer scan', 'return': {'type': 'object', 'example': """{ 'progress': False || True, }"""}, }) addEvent('renamer.scan', self.scan) addEvent('renamer.check_snatched', self.checkSnatched) addEvent('app.load', self.scan) addEvent('app.load', self.setCrons) # Enable / disable interval addEvent('setting.save.renamer.enabled.after', self.setCrons) addEvent('setting.save.renamer.run_every.after', self.setCrons) addEvent('setting.save.renamer.force_every.after', self.setCrons) def setCrons(self): fireEvent('schedule.remove', 'renamer.check_snatched') if self.isEnabled() and self.conf('run_every') > 0: fireEvent('schedule.interval', 'renamer.check_snatched', self.checkSnatched, minutes = self.conf('run_every'), single = True) fireEvent('schedule.remove', 'renamer.check_snatched_forced') if self.isEnabled() and self.conf('force_every') > 0: fireEvent('schedule.interval', 'renamer.check_snatched_forced', self.scan, hours = self.conf('force_every'), single = True) return True def getProgress(self, **kwargs): return { 'progress': self.renaming_started } def scanView(self, **kwargs): async = tryInt(kwargs.get('async', 0)) base_folder = kwargs.get('base_folder') media_folder = sp(kwargs.get('media_folder')) to_folder = kwargs.get('to_folder') # Backwards compatibility, to be removed after a few versions :) if not media_folder: media_folder = sp(kwargs.get('movie_folder')) downloader = kwargs.get('downloader') download_id = kwargs.get('download_id') files = [sp(filename) for filename in splitString(kwargs.get('files'), '|')] status = kwargs.get('status', 'completed') release_download = None if not base_folder and media_folder: release_download = {'folder': media_folder} if download_id: release_download.update({ 'id': download_id, 'downloader': downloader, 'status': status, 'files': files }) fire_handle = fireEvent if not async else fireEventAsync fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download, to_folder = to_folder) return { 'success': True } def scan(self, base_folder = None, release_download = None, to_folder = None): if not release_download: release_download = {} if self.isDisabled(): return if self.renaming_started is True: log.info('Renamer is already running, if you see this often, check the logs above for errors.') return if not base_folder: base_folder = sp(self.conf('from')) from_folder = sp(self.conf('from')) if not to_folder: to_folder = sp(self.conf('to')) # Get media folder to process media_folder = sp(release_download.get('folder')) # Get all folders that should not be processed no_process = [to_folder] cat_list = fireEvent('category.all', single = True) or [] no_process.extend([item['destination'] for item in cat_list]) # Don't continue if from-folder doesn't exist if not os.path.isdir(base_folder): log.error('The from folder "%s" doesn\'t exist. Please create it.', base_folder) return # Don't continue if to-folder doesn't exist elif not os.path.isdir(to_folder): log.error('The to folder "%s" doesn\'t exist. Please create it.', to_folder) return else: # Check to see if the no_process folders are inside the "from" folder. for item in no_process: if isSubFolder(item, base_folder): log.error('To protect your data, the media libraries can\'t be inside of or the same as the "from" folder. "%s" in "%s"', (item, base_folder)) return # Check to see if the no_process folders are inside the provided media_folder if media_folder and not os.path.isdir(media_folder): log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder) # Update to the from folder if len(release_download.get('files', [])) == 1: new_media_folder = sp(from_folder) else: new_media_folder = sp(os.path.join(from_folder, os.path.basename(media_folder))) if not os.path.isdir(new_media_folder): log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder) return # Update the files new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in release_download.get('files', [])] if new_files and not os.path.isfile(new_files[0]): log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder) return # Update release_download info to the from folder log.debug('Release %s found in the \'from\' folder.', media_folder) release_download['folder'] = new_media_folder release_download['files'] = new_files media_folder = new_media_folder if media_folder: for item in no_process: if isSubFolder(item, media_folder): log.error('To protect your data, the media libraries can\'t be inside of or the same as the provided media folder. "%s" in "%s"', (item, media_folder)) return # Make sure a checkSnatched marked all downloads/seeds as such if not release_download and self.conf('run_every') > 0: self.checkSnatched(fire_scan = False) self.renaming_started = True # make sure the media folder name is included in the search folder = None files = [] if media_folder: log.info('Scanning media folder %s...', media_folder) folder = os.path.dirname(media_folder) release_files = release_download.get('files', []) if release_files: files = release_files # If there is only one file in the torrent, the downloader did not create a subfolder if len(release_files) == 1: folder = media_folder else: # Get all files from the specified folder try: for root, folders, names in os.walk(media_folder): files.extend([sp(os.path.join(root, name)) for name in names]) except: log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc())) # post_filter files from configuration; this is a ":"-separated list of globs files = self.filesAfterIgnoring(files) db = get_db() # Extend the download info with info stored in the downloaded release keep_original = self.moveTypeIsLinked() is_torrent = False if release_download: release_download = self.extendReleaseDownload(release_download) is_torrent = self.downloadIsTorrent(release_download) keep_original = True if is_torrent and self.conf('file_action') not in ['move'] else keep_original # Unpack any archives extr_files = None if self.conf('unrar'): folder, media_folder, files, extr_files = self.extractFiles(folder = folder, media_folder = media_folder, files = files, cleanup = self.conf('cleanup') and not keep_original) groups = fireEvent('scanner.scan', folder = folder if folder else base_folder, files = files, release_download = release_download, return_ignored = False, single = True) or [] folder_name = self.conf('folder_name') file_name = self.conf('file_name') trailer_name = self.conf('trailer_name') nfo_name = self.conf('nfo_name') separator = self.conf('separator') if len(file_name) == 0: log.error('Please fill in the filename option under renamer settings. Forcing it on <original>.<ext> to keep the same name as source file.') file_name = '<original>.<ext>' cd_keys = ['<cd>','<cd_nr>', '<original>'] if not any(x in folder_name for x in cd_keys) and not any(x in file_name for x in cd_keys): log.error('Missing `cd` or `cd_nr` in the renamer. This will cause multi-file releases of being renamed to the same file. ' 'Please add it in the renamer settings. Force adding it for now.') file_name = '%s %s' % ('<cd>', file_name) # Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader. if not groups and self.statusInfoComplete(release_download): self.tagRelease(release_download = release_download, tag = 'failed_rename') for group_identifier in groups: group = groups[group_identifier] group['release_download'] = None rename_files = {} remove_files = [] remove_releases = [] media_title = getTitle(group) # Add _UNKNOWN_ if no library item is connected if not group.get('media') or not media_title: self.tagRelease(group = group, tag = 'unknown') continue # Rename the files using the library data else: # Media not in library, add it first if not group['media'].get('_id'): group['media'] = fireEvent('movie.add', params = { 'identifier': group['identifier'], 'profile_id': None }, search_after = False, status = 'done', single = True) else: group['media'] = fireEvent('movie.update', media_id = group['media'].get('_id'), single = True) if not group['media'] or not group['media'].get('_id'): log.error('Could not rename, no library item to work with: %s', group_identifier) continue media = group['media'] media_title = getTitle(media) # Overwrite destination when set in category destination = to_folder category_label = '' if media.get('category_id') and media.get('category_id') != '-1': try: category = db.get('id', media['category_id']) category_label = category['label'] if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None': destination = sp(category['destination']) log.debug('Setting category destination for "%s": %s' % (media_title, destination)) else: log.debug('No category destination found for "%s"' % media_title) except: log.error('Failed getting category label: %s', traceback.format_exc()) # Find subtitle for renaming group['before_rename'] = [] fireEvent('renamer.before', group) # Add extracted files to the before_rename list if extr_files: group['before_rename'].extend(extr_files) # Remove weird chars from movie name movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title) # Put 'The' at the end name_the = movie_name for prefix in ['the ', 'an ', 'a ']: if prefix == movie_name[:len(prefix)].lower(): name_the = movie_name[len(prefix):] + ', ' + prefix.strip().capitalize() break replacements = { 'ext': 'mkv', 'namethe': name_the.strip(), 'thename': movie_name.strip(), 'year': media['info']['year'], 'first': name_the[0].upper(), 'quality': group['meta_data']['quality']['label'], 'quality_type': group['meta_data']['quality_type'], 'video': group['meta_data'].get('video'), 'audio': group['meta_data'].get('audio'), 'group': group['meta_data']['group'], 'source': group['meta_data']['source'], 'resolution_width': group['meta_data'].get('resolution_width'), 'resolution_height': group['meta_data'].get('resolution_height'), 'audio_channels': group['meta_data'].get('audio_channels'), 'imdb_id': group['identifier'], 'cd': '', 'cd_nr': '', 'mpaa': media['info'].get('mpaa', ''), 'mpaa_only': media['info'].get('mpaa', ''), 'category': category_label, '3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '', '3d_type': group['meta_data'].get('3d_type'), '3d_type_short': group['meta_data'].get('3d_type'), } if replacements['mpaa_only'] not in ('G', 'PG', 'PG-13', 'R', 'NC-17'): replacements['mpaa_only'] = 'Not Rated' if replacements['3d_type_short']: replacements['3d_type_short'] = replacements['3d_type_short'].replace('Half ', 'H').replace('Full ', '') if self.conf('use_tab_threed') and replacements['3d_type']: if 'OU' in replacements['3d_type']: replacements['3d_type'] = replacements['3d_type'].replace('OU','TAB') if self.conf('use_tab_threed') and replacements['3d_type_short']: if 'OU' in replacements['3d_type_short']: replacements['3d_type_short'] = replacements['3d_type_short'].replace('OU','TAB') for file_type in group['files']: # Move nfo depending on settings if file_type is 'nfo' and not self.conf('rename_nfo'): log.debug('Skipping, renaming of %s disabled', file_type) for current_file in group['files'][file_type]: if self.conf('cleanup') and (not keep_original or self.fileIsAdded(current_file, group)): remove_files.append(current_file) continue # Subtitle extra if file_type is 'subtitle_extra': continue # Move other files multiple = len(group['files'][file_type]) > 1 and not group['is_dvd'] cd = 1 if multiple else 0 for current_file in sorted(list(group['files'][file_type])): current_file = sp(current_file) # Original filename replacements['original'] = os.path.splitext(os.path.basename(current_file))[0] replacements['original_folder'] = fireEvent('scanner.remove_cptag', group['dirname'], single = True) if not replacements['original_folder'] or len(replacements['original_folder']) == 0: replacements['original_folder'] = replacements['original'] # Extension replacements['ext'] = getExt(current_file) # cd # replacements['cd'] = ' cd%d' % cd if multiple else '' replacements['cd_nr'] = cd if multiple else '' # Naming final_folder_name = self.doReplace(folder_name, replacements, folder = True) final_file_name = self.doReplace(file_name, replacements) replacements['filename'] = final_file_name[:-(len(getExt(final_file_name)) + 1)] # Meta naming if file_type is 'trailer': final_file_name = self.doReplace(trailer_name, replacements, remove_multiple = True) elif file_type is 'nfo': final_file_name = self.doReplace(nfo_name, replacements, remove_multiple = True) # Move DVD files (no structure renaming) if group['is_dvd'] and file_type is 'movie': found = False for top_dir in ['video_ts', 'audio_ts', 'bdmv', 'certificate']: has_string = current_file.lower().find(os.path.sep + top_dir + os.path.sep) if has_string >= 0: structure_dir = current_file[has_string:].lstrip(os.path.sep) rename_files[current_file] = os.path.join(destination, final_folder_name, structure_dir) found = True break if not found: log.error('Could not determine dvd structure for: %s', current_file) # Do rename others else: if file_type is 'leftover': if self.conf('move_leftover'): rename_files[current_file] = os.path.join(destination, final_folder_name, os.path.basename(current_file)) elif file_type not in ['subtitle']: rename_files[current_file] = os.path.join(destination, final_folder_name, final_file_name) # Check for extra subtitle files if file_type is 'subtitle': remove_multiple = False if len(group['files']['movie']) == 1: remove_multiple = True sub_langs = group['subtitle_language'].get(current_file, []) # rename subtitles with or without language sub_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) rename_extras = self.getRenameExtras( extra_type = 'subtitle_extra', replacements = replacements, folder_name = folder_name, file_name = file_name, destination = destination, group = group, current_file = current_file, remove_multiple = remove_multiple, ) # Don't add language if multiple languages in 1 subtitle file if len(sub_langs) == 1: sub_suffix = '%s.%s' % (sub_langs[0], replacements['ext']) # Don't add language to subtitle file it it's already there if not sub_name.endswith(sub_suffix): sub_name = sub_name.replace(replacements['ext'], sub_suffix) rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) rename_files = mergeDicts(rename_files, rename_extras) # Filename without cd etc elif file_type is 'movie': rename_extras = self.getRenameExtras( extra_type = 'movie_extra', replacements = replacements, folder_name = folder_name, file_name = file_name, destination = destination, group = group, current_file = current_file ) rename_files = mergeDicts(rename_files, rename_extras) group['filename'] = self.doReplace(file_name, replacements, remove_multiple = True)[:-(len(getExt(final_file_name)) + 1)] group['destination_dir'] = os.path.join(destination, final_folder_name) if multiple: cd += 1 # Before renaming, remove the lower quality files remove_leftovers = True # Get media quality profile profile = None if media.get('profile_id'): try: profile = db.get('id', media['profile_id']) except: # Set profile to None as it does not exist anymore mdia = db.get('id', media['_id']) mdia['profile_id'] = None db.update(mdia) log.error('Error getting quality profile for %s: %s', (media_title, traceback.format_exc())) else: log.debug('Media has no quality profile: %s', media_title) # Mark media for dashboard mark_as_recent = False # Go over current movie releases for release in fireEvent('release.for_media', media['_id'], single = True): # When a release already exists if release.get('status') == 'done': # This is where CP removes older, lesser quality releases or releases that are not wanted anymore is_higher = fireEvent('quality.ishigher', \ group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', False)}, profile, single = True) if is_higher == 'higher': if self.conf('remove_lower_quality_copies'): log.info('Removing lesser or not wanted quality %s for %s.', (media_title, release.get('quality'))) for file_type in release.get('files', {}): for release_file in release['files'][file_type]: remove_files.append(release_file) remove_releases.append(release) # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc elif is_higher == 'equal': if self.conf('remove_lower_quality_copies'): log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality'))) for file_type in release.get('files', {}): for release_file in release['files'][file_type]: remove_files.append(release_file) remove_releases.append(release) # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan else: log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality'))) # Add exists tag to the .ignore file self.tagRelease(group = group, tag = 'exists') # Notify on rename fail download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('quality')) fireEvent('movie.renaming.canceled', message = download_message, data = group) remove_leftovers = False break elif release.get('status') in ['snatched', 'seeding']: if release_download and release_download.get('release_id'): if release_download['release_id'] == release['_id']: if release_download['status'] == 'completed': # Set the release to downloaded fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) group['release_download'] = release_download mark_as_recent = True elif release_download['status'] == 'seeding': # Set the release to seeding fireEvent('release.update_status', release['_id'], status = 'seeding', single = True) mark_as_recent = True elif release.get('quality') == group['meta_data']['quality']['identifier']: # Set the release to downloaded fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) group['release_download'] = release_download mark_as_recent = True # Mark media for dashboard if mark_as_recent: fireEvent('media.tag', group['media'].get('_id'), 'recent', update_edited = True, single = True) # Remove leftover files if not remove_leftovers: # Don't remove anything continue log.debug('Removing leftover files') for current_file in group['files']['leftover']: if self.conf('cleanup') and not self.conf('move_leftover') and \ (not keep_original or self.fileIsAdded(current_file, group)): remove_files.append(current_file) if self.conf('check_space'): total_space, available_space = getFreeSpace(destination) renaming_size = getSize(rename_files.keys()) if renaming_size > available_space: log.error('Not enough space left, need %s MB but only %s MB available', (renaming_size, available_space)) self.tagRelease(group = group, tag = 'not_enough_space') continue # Remove files delete_folders = [] for src in remove_files: if rename_files.get(src): log.debug('Not removing file that will be renamed: %s', src) continue log.info('Removing "%s"', src) try: src = sp(src) if os.path.isfile(src): os.remove(src) parent_dir = os.path.dirname(src) if parent_dir not in delete_folders and os.path.isdir(parent_dir) and \ not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \ isSubFolder(parent_dir, base_folder): delete_folders.append(parent_dir) except: log.error('Failed removing %s: %s', (src, traceback.format_exc())) self.tagRelease(group = group, tag = 'failed_remove') # Delete leftover folder from older releases delete_folders = sorted(delete_folders, key = len, reverse = True) for delete_folder in delete_folders: try: self.deleteEmptyFolder(delete_folder, show_error = False) except Exception as e: log.error('Failed to delete folder: %s %s', (e, traceback.format_exc())) # Rename all files marked group['renamed_files'] = [] failed_rename = False for src in rename_files: if rename_files[src]: dst = rename_files[src] if dst in group['renamed_files']: log.error('File "%s" already renamed once, adding random string at the end to prevent data loss', dst) dst = '%s.random-%s' % (dst, randomString()) # Create dir self.makeDir(os.path.dirname(dst)) try: self.moveFile(src, dst, use_default = not is_torrent or self.fileIsAdded(src, group)) group['renamed_files'].append(dst) except: log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) failed_rename = True break # If renaming failed tag the release folder as failed and continue with next group. Note that all old files have already been deleted. if failed_rename: self.tagRelease(group = group, tag = 'failed_rename') continue # If renaming succeeded, make sure it is not tagged as failed (scanner didn't return a group, but a download_ID was provided in an earlier attempt) else: self.untagRelease(group = group, tag = 'failed_rename') # Tag folder if it is in the 'from' folder and it will not be removed because it is a torrent if self.movieInFromFolder(media_folder) and keep_original: self.tagRelease(group = group, tag = 'renamed_already') # Remove matching releases for release in remove_releases: log.debug('Removing release %s', release.get('identifier')) try: db.delete(release) except: log.error('Failed removing %s: %s', (release, traceback.format_exc())) if group['dirname'] and group['parentdir'] and not keep_original: if media_folder: # Delete the movie folder group_folder = media_folder else: # Delete the first empty subfolder in the tree relative to the 'from' folder group_folder = sp(os.path.join(base_folder, toUnicode(os.path.relpath(group['parentdir'], base_folder)).split(os.path.sep)[0])) try: if self.conf('cleanup') or self.conf('move_leftover'): log.info('Deleting folder: %s', group_folder) self.deleteEmptyFolder(group_folder) except: log.error('Failed removing %s: %s', (group_folder, traceback.format_exc())) # Notify on download, search for trailers etc download_message = 'Downloaded %s (%s%s)' % (media_title, replacements['quality'], (' ' + replacements['3d']) if replacements['3d'] else '') try: fireEvent('renamer.after', message = download_message, group = group, in_order = True) except: log.error('Failed firing (some) of the renamer.after events: %s', traceback.format_exc()) # Break if CP wants to shut down if self.shuttingDown(): break self.renaming_started = False def getRenameExtras(self, extra_type = '', replacements = None, folder_name = '', file_name = '', destination = '', group = None, current_file = '', remove_multiple = False): if not group: group = {} if not replacements: replacements = {} replacements = replacements.copy() rename_files = {} def test(s): return current_file[:-len(replacements['ext'])] in sp(s) for extra in set(filter(test, group['files'][extra_type])): replacements['ext'] = getExt(extra) final_folder_name = self.doReplace(folder_name, replacements, remove_multiple = remove_multiple, folder = True) final_file_name = self.doReplace(file_name, replacements, remove_multiple = remove_multiple) rename_files[extra] = os.path.join(destination, final_folder_name, final_file_name) return rename_files # This adds a file to ignore / tag a release so it is ignored later def tagRelease(self, tag, group = None, release_download = None): if not tag: return text = """This file is from CouchPotato It has marked this release as "%s" This file hides the release from the renamer Remove it if you want it to be renamed (again, or at least let it try again) """ % tag tag_files = [] # Tag movie files if they are known if isinstance(group, dict): tag_files = [sorted(list(group['files']['movie']))[0]] elif isinstance(release_download, dict): # Tag download_files if they are known if release_download.get('files', []): tag_files = [filename for filename in release_download.get('files', []) if os.path.exists(filename)] # Tag all files in release folder elif release_download['folder']: for root, folders, names in os.walk(sp(release_download['folder'])): tag_files.extend([os.path.join(root, name) for name in names]) for filename in tag_files: # Don't tag .ignore files if os.path.splitext(filename)[1] == '.ignore': continue tag_filename = '%s.%s.ignore' % (os.path.splitext(filename)[0], tag) if not os.path.isfile(tag_filename): self.createFile(tag_filename, text) def untagRelease(self, group = None, release_download = None, tag = ''): if not release_download: return tag_files = [] folder = None # Tag movie files if they are known if isinstance(group, dict): tag_files = [sorted(list(group['files']['movie']))[0]] folder = sp(group['parentdir']) if not group.get('dirname') or not os.path.isdir(folder): return False elif isinstance(release_download, dict): folder = sp(release_download['folder']) if not os.path.isdir(folder): return False # Untag download_files if they are known if release_download.get('files'): tag_files = release_download.get('files', []) # Untag all files in release folder else: for root, folders, names in os.walk(folder): tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) if not folder: return False # Find all .ignore files in folder ignore_files = [] for root, dirnames, filenames in os.walk(folder): ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) # Match all found ignore files with the tag_files and delete if found for tag_file in tag_files: ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) for filename in ignore_file: try: os.remove(filename) except: log.debug('Unable to remove ignore file: %s. Error: %s.' % (filename, traceback.format_exc())) def hastagRelease(self, release_download, tag = ''): if not release_download: return False folder = sp(release_download['folder']) if not os.path.isdir(folder): return False tag_files = [] ignore_files = [] # Find tag on download_files if they are known if release_download.get('files'): tag_files = release_download.get('files', []) # Find tag on all files in release folder else: for root, folders, names in os.walk(folder): tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) # Find all .ignore files in folder for root, dirnames, filenames in os.walk(folder): ignore_files.extend(fnmatch.filter([sp(os.path.join(root, filename)) for filename in filenames], '*%s.ignore' % tag)) # Match all found ignore files with the tag_files and return True found for tag_file in [tag_files] if isinstance(tag_files,str) else tag_files: ignore_file = fnmatch.filter(ignore_files, fnEscape('%s.%s.ignore' % (os.path.splitext(tag_file)[0], tag if tag else '*'))) if ignore_file: return True return False def moveFile(self, old, dest, use_default = False): dest = sp(dest) try: if os.path.exists(dest) and os.path.isfile(dest): raise Exception('Destination "%s" already exists' % dest) move_type = self.conf('file_action') if use_default: move_type = self.conf('default_file_action') if move_type not in ['copy', 'link', 'symlink_reversed']: try: log.info('Moving "%s" to "%s"', (old, dest)) shutil.move(old, dest) except: exists = os.path.exists(dest) if exists and os.path.getsize(old) == os.path.getsize(dest): log.error('Successfully moved file "%s", but something went wrong: %s', (dest, traceback.format_exc())) os.unlink(old) else: # remove faultly copied file if exists: os.unlink(dest) raise elif move_type == 'copy': log.info('Copying "%s" to "%s"', (old, dest)) shutil.copy(old, dest) elif move_type == 'symlink_reversed': log.info('Reverse symlink "%s" to "%s"', (old, dest)) try: shutil.move(old, dest) except: log.error('Moving "%s" to "%s" went wrong: %s', (old, dest, traceback.format_exc())) try: symlink(dest, old) except: log.error('Error while linking "%s" back to "%s": %s', (dest, old, traceback.format_exc())) else: log.info('Linking "%s" to "%s"', (old, dest)) # First try to hardlink try: log.debug('Hardlinking file "%s" to "%s"...', (old, dest)) link(old, dest) except: # Try to symlink next log.debug('Couldn\'t hardlink file "%s" to "%s". Symlinking instead. Error: %s.', (old, dest, traceback.format_exc())) shutil.copy(old, dest) try: old_link = '%s.link' % sp(old) symlink(dest, old_link) os.unlink(old) os.rename(old_link, old) except: log.error('Couldn\'t symlink file "%s" to "%s". Copied instead. Error: %s. ', (old, dest, traceback.format_exc())) try: os.chmod(dest, Env.getPermission('file')) if os.name == 'nt' and self.conf('ntfs_permission'): os.popen('icacls "' + dest + '"* /reset /T') except: log.debug('Failed setting permissions for file: %s, %s', (dest, traceback.format_exc(1))) except: log.error('Couldn\'t move file "%s" to "%s": %s', (old, dest, traceback.format_exc())) raise return True def doReplace(self, string, replacements, remove_multiple = False, folder = False): """ replace confignames with the real thing """ replacements = replacements.copy() if remove_multiple: replacements['cd'] = '' replacements['cd_nr'] = '' replaced = toUnicode(string) for x, r in replacements.items(): if x in ['thename', 'namethe']: continue if r is not None: replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) else: #If information is not available, we don't want the tag in the filename replaced = replaced.replace('<' + x + '>', '') if self.conf('replace_doubles'): replaced = self.replaceDoubles(replaced.lstrip('. ')) for x, r in replacements.items(): if x in ['thename', 'namethe']: replaced = replaced.replace(six.u('<%s>') % toUnicode(x), toUnicode(r)) replaced = re.sub(r"[\x00:\*\?\"<>\|]", '', replaced) sep = self.conf('foldersep') if folder else self.conf('separator') return ss(replaced.replace(' ', ' ' if not sep else sep)) def replaceDoubles(self, string): replaces = [ ('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), (' \\\\', '\\\\'), (' /', '/'), ('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-[^\s])+', '-'), (' ]', ']'), ] for r in replaces: reg, replace_with = r string = re.sub(reg, replace_with, string) string = string.rstrip(',_-/\\ ') return string def checkSnatched(self, fire_scan = True): if self.checking_snatched: log.debug('Already checking snatched') return False self.checking_snatched = True try: db = get_db() rels = list(fireEvent('release.with_status', ['snatched', 'seeding', 'missing'], single = True)) if not rels: #No releases found that need status checking self.checking_snatched = False return True # Collect all download information with the download IDs from the releases download_ids = [] no_status_support = [] try: for rel in rels: if not rel.get('download_info'): continue if rel['download_info'].get('id') and rel['download_info'].get('downloader'): download_ids.append(rel['download_info']) ds = rel['download_info'].get('status_support') if ds is False or ds == 'False': no_status_support.append(ss(rel['download_info'].get('downloader'))) except: log.error('Error getting download IDs from database') self.checking_snatched = False return False release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else [] if len(no_status_support) > 0: log.debug('Download status functionality is not implemented for one of the active downloaders: %s', list(set(no_status_support))) if not release_downloads: if fire_scan: self.scan() self.checking_snatched = False return True scan_releases = [] scan_required = False log.debug('Checking status snatched releases...') try: for rel in rels: if not rel.get('media_id'): continue movie_dict = db.get('id', rel.get('media_id')) download_info = rel.get('download_info') if not isinstance(download_info, dict): log.error('Faulty release found without any info, ignoring.') fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) continue # Check if download ID is available if not download_info.get('id') or not download_info.get('downloader'): log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name'])) scan_required = True # Continue with next release continue # Find release in downloaders nzbname = self.createNzbName(rel['info'], movie_dict) found_release = False for release_download in release_downloads: found_release = False if download_info.get('id'): if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']: log.debug('Found release by id: %s', release_download['id']) found_release = True break else: if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == getIdentifier(movie_dict): log.debug('Found release by release name or imdb ID: %s', release_download['name']) found_release = True break if not found_release: #Check status if already missing and for how long, if > 1 week, set to ignored else to missing if rel.get('status') == 'missing': if rel.get('last_edit') < int(time.time()) - 7 * 24 * 60 * 60: log.info('%s not found in downloaders after 7 days, setting status to ignored', nzbname) fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) else: # Set the release to missing log.info('%s not found in downloaders, setting status to missing', nzbname) fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True) # Continue with next release continue # Log that we found the release timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft'] log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft)) # Check status of release if release_download['status'] == 'busy': # Set the release to snatched if it was missing before fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) # Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading if self.movieInFromFolder(release_download['folder']): self.tagRelease(release_download = release_download, tag = 'downloading') elif release_download['status'] == 'seeding': #If linking setting is enabled, process release if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download): log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio'])) # Remove the downloading tag self.untagRelease(release_download = release_download, tag = 'downloading') # Scan and set the torrent to paused if required release_download.update({'pause': True, 'scan': True, 'process_complete': False}) scan_releases.append(release_download) else: #let it seed log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio'])) # Set the release to seeding fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True) elif release_download['status'] == 'failed': # Set the release to failed fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True) fireEvent('download.remove_failed', release_download, single = True) if self.conf('next_on_failed'): fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id')) elif release_download['status'] == 'completed': log.info('Download of %s completed!', release_download['name']) #Make sure the downloader sent over a path to look in if self.statusInfoComplete(release_download): # If the release has been seeding, process now the seeding is done if rel.get('status') == 'seeding': if self.conf('file_action') != 'move': # Set the release to done as the movie has already been renamed fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True) # Allow the downloader to clean-up release_download.update({'pause': False, 'scan': False, 'process_complete': True}) scan_releases.append(release_download) else: # Scan and Allow the downloader to clean-up release_download.update({'pause': False, 'scan': True, 'process_complete': True}) scan_releases.append(release_download) else: # Set the release to snatched if it was missing before fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) # Remove the downloading tag self.untagRelease(release_download = release_download, tag = 'downloading') # Scan and Allow the downloader to clean-up release_download.update({'pause': False, 'scan': True, 'process_complete': True}) scan_releases.append(release_download) else: scan_required = True except: log.error('Failed checking for release in downloader: %s', traceback.format_exc()) # The following can either be done here, or inside the scanner if we pass it scan_items in one go for release_download in scan_releases: # Ask the renamer to scan the item if release_download['scan']: if release_download['pause'] and self.conf('file_action') in ['link', "symlink_reversed"]: fireEvent('download.pause', release_download = release_download, pause = True, single = True) self.scan(release_download = release_download) if release_download['pause'] and self.conf('file_action') in ['link', "symlink_reversed"]: fireEvent('download.pause', release_download = release_download, pause = False, single = True) if release_download['process_complete']: # First make sure the files were successfully processed if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'): # Remove the seeding tag if it exists self.untagRelease(release_download = release_download, tag = 'renamed_already') # Ask the downloader to process the item fireEvent('download.process_complete', release_download = release_download, single = True) if fire_scan and (scan_required or len(no_status_support) > 0): self.scan() self.checking_snatched = False return True except: log.error('Failed checking snatched: %s', traceback.format_exc()) self.checking_snatched = False return False def extendReleaseDownload(self, release_download): rls = None db = get_db() if release_download and release_download.get('id'): try: rls = db.get('release_download', '%s-%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc'] except: log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader'))) if rls: media = db.get('id', rls['media_id']) release_download.update({ 'imdb_id': getIdentifier(media), 'quality': rls['quality'], 'is_3d': rls['is_3d'], 'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'), 'release_id': rls['_id'], }) return release_download def downloadIsTorrent(self, release_download): return release_download and release_download.get('protocol') in ['torrent', 'torrent_magnet'] def fileIsAdded(self, src, group): if not group or not group.get('before_rename'): return False return src in group['before_rename'] def moveTypeIsLinked(self): return self.conf('default_file_action') in ['copy', 'link', "symlink_reversed"] def statusInfoComplete(self, release_download): return release_download.get('id') and release_download.get('downloader') and release_download.get('folder') def movieInFromFolder(self, media_folder): return media_folder and isSubFolder(media_folder, sp(self.conf('from'))) or not media_folder @property def ignored_in_path(self): return self.conf('ignored_in_path').split(":") if self.conf('ignored_in_path') else [] def filesAfterIgnoring(self, original_file_list): kept_files = [] for path in original_file_list: if self.keepFile(path): kept_files.append(path) else: log.debug('Ignored "%s" during renaming', path) return kept_files def keepFile(self, filename): # ignoredpaths for i in self.ignored_in_path: if i in filename.lower(): log.debug('Ignored "%s" contains "%s".', (filename, i)) return False # All is OK return True def extractFiles(self, folder = None, media_folder = None, files = None, cleanup = False): if not files: files = [] # RegEx for finding rar files archive_regex = '(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)' restfile_regex = '(^%s\.(?:part(?!0*1\.rar$)\d+\.rar$|[rstuvw]\d+$))' extr_files = [] from_folder = sp(self.conf('from')) # Check input variables if not folder: folder = from_folder check_file_date = True if media_folder: check_file_date = False if not files: for root, folders, names in os.walk(folder): files.extend([sp(os.path.join(root, name)) for name in names]) # Find all archive files archives = [re.search(archive_regex, name).groupdict() for name in files if re.search(archive_regex, name)] #Extract all found archives for archive in archives: # Check if it has already been processed by CPS if self.hastagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': archive['file']}): continue # Find all related archive files archive['files'] = [name for name in files if re.search(restfile_regex % re.escape(archive['base']), name)] archive['files'].append(archive['file']) # Check if archive is fresh and maybe still copying/moving/downloading, ignore files newer than 1 minute if check_file_date: files_too_new, time_string = self.checkFilesChanged(archive['files']) if files_too_new: log.info('Archive seems to be still copying/moving/downloading or just copied/moved/downloaded (created on %s), ignoring for now: %s', (time_string, os.path.basename(archive['file']))) continue log.info('Archive %s found. Extracting...', os.path.basename(archive['file'])) try: unrar_path = self.conf('unrar_path') unrar_path = unrar_path if unrar_path and (os.path.isfile(unrar_path) or re.match('^[a-zA-Z0-9_/\.\-]+$', unrar_path)) else None rar_handle = RarFile(archive['file'], custom_path = unrar_path) extr_path = os.path.join(from_folder, os.path.relpath(os.path.dirname(archive['file']), folder)) self.makeDir(extr_path) for packedinfo in rar_handle.infolist(): extr_file_path = sp(os.path.join(extr_path, os.path.basename(packedinfo.filename))) if not packedinfo.isdir and not os.path.isfile(extr_file_path): log.debug('Extracting %s...', packedinfo.filename) rar_handle.extract(condition = [packedinfo.index], path = extr_path, withSubpath = False, overwrite = False) if self.conf('unrar_modify_date'): try: os.utime(extr_file_path, (os.path.getatime(archive['file']), os.path.getmtime(archive['file']))) except: log.error('Rar modify date enabled, but failed: %s', traceback.format_exc()) extr_files.append(extr_file_path) del rar_handle # Tag archive as extracted if no cleanup. if not cleanup and os.path.isfile(extr_file_path): self.tagRelease(release_download = {'folder': os.path.dirname(archive['file']), 'files': [archive['file']]}, tag = 'extracted') except Exception as e: log.error('Failed to extract %s: %s %s', (archive['file'], e, traceback.format_exc())) continue # Delete the archive files for filename in archive['files']: if cleanup: try: os.remove(filename) except Exception as e: log.error('Failed to remove %s: %s %s', (filename, e, traceback.format_exc())) continue files.remove(filename) # Move the rest of the files and folders if any files are extracted to the from folder (only if folder was provided) if extr_files and folder != from_folder: for leftoverfile in list(files): move_to = os.path.join(from_folder, os.path.relpath(leftoverfile, folder)) try: self.makeDir(os.path.dirname(move_to)) self.moveFile(leftoverfile, move_to, cleanup) except Exception as e: log.error('Failed moving left over file %s to %s: %s %s', (leftoverfile, move_to, e, traceback.format_exc())) # As we probably tried to overwrite the nfo file, check if it exists and then remove the original if os.path.isfile(move_to) and os.path.getsize(leftoverfile) == os.path.getsize(move_to): if cleanup: log.info('Deleting left over file %s instead...', leftoverfile) os.unlink(leftoverfile) else: continue files.remove(leftoverfile) extr_files.append(move_to) if cleanup: # Remove all left over folders log.debug('Removing old movie folder %s...', media_folder) self.deleteEmptyFolder(media_folder) media_folder = os.path.join(from_folder, os.path.relpath(media_folder, folder)) folder = from_folder if extr_files: files.extend(extr_files) # Cleanup files and folder if media_folder was not provided if not media_folder: files = [] folder = None return folder, media_folder, files, extr_files rename_options = { 'pre': '<', 'post': '>', 'choices': { 'ext': 'Extension (mkv)', 'namethe': 'Moviename, The', 'thename': 'The Moviename', 'year': 'Year (2011)', 'first': 'First letter (M)', 'quality': 'Quality (720p)', 'quality_type': '(HD) or (SD)', '3d': '3D', '3d_type': '3D Type (Full SBS)', '3d_type_short' : 'Short 3D Type (FSBS)', 'video': 'Video (x264)', 'audio': 'Audio (DTS)', 'group': 'Releasegroup name', 'source': 'Source media (Bluray)', 'resolution_width': 'resolution width (1280)', 'resolution_height': 'resolution height (720)', 'audio_channels': 'audio channels (7.1)', 'original': 'Original filename', 'original_folder': 'Original foldername', 'imdb_id': 'IMDB id (tt0123456)', 'cd': 'CD number (cd1)', 'cd_nr': 'Just the cd nr. (1)', 'mpaa': 'MPAA or other certification', 'mpaa_only': 'MPAA only certification (G|PG|PG-13|R|NC-17|Not Rated)', 'category': 'Category label', }, } config = [{ 'name': 'renamer', 'order': 40, 'description': 'Move and rename your downloaded movies to your movie directory.', 'groups': [ { 'tab': 'renamer', 'name': 'renamer', 'label': 'Rename downloaded movies', 'wizard': True, 'options': [ { 'name': 'enabled', 'default': False, 'type': 'enabler', }, { 'name': 'from', 'type': 'directory', 'description': 'Folder where CP searches for movies.', }, { 'name': 'to', 'type': 'directory', 'description': 'Default folder where the movies are moved/copied/linked to.', }, { 'name': 'folder_name', 'label': 'Folder naming', 'description': 'Name of the folder. Keep empty for no folder.', 'default': '<namethe> (<year>)', 'type': 'choice', 'options': rename_options }, { 'name': 'file_name', 'label': 'File naming', 'description': 'Name of the file', 'default': '<thename><cd>.<ext>', 'type': 'choice', 'options': rename_options }, { 'advanced': True, 'name': 'use_tab_threed', 'type': 'bool', 'label': 'Use TAB 3D', 'description': ('Use TAB (Top And Bottom) instead of OU (Over Under).','This will allow Kodi to recognize vertical formatted 3D movies properly.'), 'default': True }, { 'advanced': True, 'name': 'replace_doubles', 'type': 'bool', 'label': 'Clean Name', 'description': ('Attempt to clean up double separaters due to missing data for fields.','Sometimes this eliminates wanted white space (see <a href="https://github.com/CouchPotato/CouchPotatoServer/issues/2782" target="_blank">#2782</a>).'), 'default': True }, { 'name': 'ignored_in_path', 'label': 'Ignored file patterns', 'description': ('A list of globs to path match when scanning, separated by ":"', 'anything on this list will be skipped during rename operations'), 'default': '*/.sync/*', }, { 'name': 'unrar', 'type': 'bool', 'description': 'Extract rar files if found.', 'default': False, }, { 'advanced': True, 'name': 'unrar_path', 'description': 'Custom path to unrar bin', }, { 'advanced': True, 'name': 'unrar_modify_date', 'type': 'bool', 'description': ('Set modify date of unrar-ed files to the rar-file\'s date.', 'This will allow Kodi to recognize extracted files as recently added even if the movie was released some time ago.'), 'default': False, }, { 'name': 'cleanup', 'type': 'bool', 'description': 'Cleanup leftover files after successful rename.', 'default': False, }, { 'name': 'remove_lower_quality_copies', 'type': 'bool', 'label': 'Delete Others', 'description': 'Remove lower/equal quality copies of a release after downloading.', 'default': True, }, { 'advanced': True, 'name': 'run_every', 'label': 'Run every', 'default': 1, 'type': 'int', 'unit': 'min(s)', 'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is <strong>completed</strong> or handle <strong>failed</strong> download if these options are enabled'), }, { 'advanced': True, 'name': 'force_every', 'label': 'Force every', 'default': 2, 'type': 'int', 'unit': 'hour(s)', 'description': 'Forces the renamer to scan every X hours', }, { 'advanced': True, 'name': 'next_on_failed', 'default': True, 'type': 'bool', 'description': 'Try the next best release for a movie after a download failed.', }, { 'name': 'move_leftover', 'type': 'bool', 'description': 'Move all leftover file after renaming, to the movie folder.', 'default': False, 'advanced': True, }, { 'advanced': True, 'name': 'separator', 'label': 'File-Separator', 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), }, { 'advanced': True, 'name': 'foldersep', 'label': 'Folder-Separator', 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), }, { 'name': 'check_space', 'label': 'Check space', 'default': True, 'type': 'bool', 'description': ('Check if there\'s enough available space to rename the files', 'Disable when the filesystem doesn\'t return the proper value'), 'advanced': True, }, { 'name': 'default_file_action', 'label': 'Default File Action', 'default': 'move', 'type': 'dropdown', 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move'), ('Reverse Symlink', 'symlink_reversed')], 'description': ('<strong>Link</strong>, <strong>Copy</strong> or <strong>Move</strong> after download completed.', 'Link first tries <a href="http://en.wikipedia.org/wiki/Hard_link" target="_blank">hard link</a>, then <a href="http://en.wikipedia.org/wiki/Sym_link" target="_blank">sym link</a> and falls back to Copy. Reverse Symlink moves the file and creates symlink to it in the original location'), 'advanced': True, }, { 'name': 'file_action', 'label': 'Torrent File Action', 'default': 'link', 'type': 'dropdown', 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move'), ('Reverse Symlink', 'symlink_reversed')], 'description': 'See above. It is prefered to use link when downloading torrents as it will save you space, while still being able to seed.', 'advanced': True, }, { 'advanced': True, 'name': 'ntfs_permission', 'label': 'NTFS Permission', 'type': 'bool', 'hidden': os.name != 'nt', 'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).', 'default': False, }, ], }, { 'tab': 'renamer', 'name': 'meta_renamer', 'label': 'Advanced renaming', 'description': 'Meta data file renaming. Use &lt;filename&gt; to use the above "File naming" settings, without the file extension.', 'advanced': True, 'options': [ { 'name': 'rename_nfo', 'label': 'Rename .NFO', 'description': 'Rename original .nfo file', 'type': 'bool', 'default': True, }, { 'name': 'nfo_name', 'label': 'NFO naming', 'default': '<filename>.orig.<ext>', 'type': 'choice', 'options': rename_options }, ], }, ], }]
75,457
Python
.py
1,297
40.203547
324
0.518472
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,943
trailer.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/trailer.py
import os from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.variable import getExt, getTitle from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) autoload = 'Trailer' class Trailer(Plugin): def __init__(self): addEvent('renamer.after', self.searchSingle) def searchSingle(self, message = None, group = None): if not group: group = {} if self.isDisabled() or len(group['files']['trailer']) > 0: return trailers = fireEvent('trailer.search', group = group, merge = True) if not trailers or trailers == []: log.info('No trailers found for: %s', getTitle(group)) return False for trailer in trailers.get(self.conf('quality'), []): ext = getExt(trailer) filename = self.conf('name').replace('<filename>', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) destination = os.path.join(group['destination_dir'], filename) if not os.path.isfile(destination): trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) if trailer_file and os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one os.unlink(trailer_file) continue else: log.debug('Trailer already exists: %s', destination) group['renamed_files'].append(destination) # Download first and break break return True config = [{ 'name': 'trailer', 'groups': [ { 'tab': 'renamer', 'name': 'trailer', 'label': 'Download trailer', 'description': 'after rename', 'options': [ { 'name': 'enabled', 'label': 'Search and download trailers', 'default': False, 'type': 'enabler', }, { 'name': 'quality', 'default': '720p', 'type': 'dropdown', 'values': [('1080p', '1080p'), ('720p', '720p'), ('480P', '480p')], }, { 'name': 'name', 'label': 'Naming', 'default': '<filename>-trailer', 'advanced': True, 'description': 'Use <strong>&lt;filename&gt;</strong> to use above settings.' }, ], }, ], }]
2,719
Python
.py
64
29.859375
166
0.514968
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,944
dashboard.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/dashboard.py
import random as rndm import time from CodernityDB.database import RecordDeleted, RecordNotFound from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent from couchpotato.core.helpers.variable import splitString, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) autoload = 'Dashboard' class Dashboard(Plugin): def __init__(self): addApiView('dashboard.soon', self.getSoonView) def getSoonView(self, limit_offset = None, random = False, late = False, **kwargs): db = get_db() now = time.time() # Get profiles first, determine pre or post theater profiles = fireEvent('profile.all', single = True) pre_releases = fireEvent('quality.pre_releases', single = True) # See what the profile contain and cache it profile_pre = {} for profile in profiles: contains = {} for q_identifier in profile.get('qualities', []): contains['theater' if q_identifier in pre_releases else 'dvd'] = True profile_pre[profile.get('_id')] = contains # Add limit limit = 12 if limit_offset: splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset limit = tryInt(splt[0]) # Get all active medias active_ids = [x['_id'] for x in fireEvent('media.with_status', 'active', with_doc = False, single = True)] medias = [] if len(active_ids) > 0: # Order by title or randomize if not random: orders_ids = db.all('media_title') active_ids = [x['_id'] for x in orders_ids if x['_id'] in active_ids] else: rndm.shuffle(active_ids) for media_id in active_ids: try: media = db.get('id', media_id) except RecordDeleted: log.debug('Record already deleted: %s', media_id) continue except RecordNotFound: log.debug('Record not found: %s', media_id) continue pp = profile_pre.get(media.get('profile_id')) if not pp: continue eta = media['info'].get('release_date', {}) or {} coming_soon = False # Theater quality if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, media['info']['year'], single = True): coming_soon = 'theater' elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, media['info']['year'], single = True): coming_soon = 'dvd' if coming_soon: # Don't list older movies eta_date = eta.get(coming_soon) eta_3month_passed = eta_date < (now - 7862400) # Release was more than 3 months ago if (not late and not eta_3month_passed) or \ (late and eta_3month_passed): add = True # Check if it doesn't have any releases if late: media['releases'] = fireEvent('release.for_media', media['_id'], single = True) for release in media.get('releases', []): if release.get('status') in ['snatched', 'available', 'seeding', 'downloaded']: add = False break if add: medias.append(media) if len(medias) >= limit: break return { 'success': True, 'empty': len(medias) == 0, 'movies': medias, } getLateView = getSoonView
4,048
Python
.py
84
33.428571
136
0.532553
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,945
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/base.py
import threading from urllib import quote, getproxies from urlparse import urlparse import os.path import time import traceback from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.encoding import ss, toSafeString, \ toUnicode, sp from couchpotato.core.helpers.variable import md5, isLocalIP, scanForPassword, tryInt, getIdentifier, \ randomString from couchpotato.core.logger import CPLog from couchpotato.environment import Env import requests from requests.packages.urllib3 import Timeout from requests.packages.urllib3.exceptions import MaxRetryError from tornado import template log = CPLog(__name__) class Plugin(object): _class_name = None _database = None plugin_path = None enabled_option = 'enabled' _needs_shutdown = False _running = None _locks = {} user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:45.0) Gecko/20100101 Firefox/45.0' http_last_use = {} http_last_use_queue = {} http_time_between_calls = 0 http_failed_request = {} http_failed_disabled = {} def __new__(cls, *args, **kwargs): new_plugin = super(Plugin, cls).__new__(cls) new_plugin.registerPlugin() return new_plugin def registerPlugin(self): addEvent('app.do_shutdown', self.doShutdown) addEvent('plugin.running', self.isRunning) self._running = [] # Setup database if self._database: addEvent('database.setup', self.databaseSetup) def databaseSetup(self): for index_name in self._database: klass = self._database[index_name] fireEvent('database.setup_index', index_name, klass) def conf(self, attr, value = None, default = None, section = None): class_name = self.getName().lower().split(':')[0].lower() return Env.setting(attr, section = section if section else class_name, value = value, default = default) def deleteConf(self, attr): return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower()) def getName(self): return self._class_name or self.__class__.__name__ def setName(self, name): self._class_name = name def renderTemplate(self, parent_file, templ, **params): t = template.Template(open(os.path.join(os.path.dirname(parent_file), templ), 'r').read()) return t.generate(**params) def createFile(self, path, content, binary = False): path = sp(path) self.makeDir(os.path.dirname(path)) if os.path.exists(path): log.debug('%s already exists, overwriting file with new version', path) write_type = 'w+' if not binary else 'w+b' # Stream file using response object if isinstance(content, requests.models.Response): # Write file to temp with open('%s.tmp' % path, write_type) as f: for chunk in content.iter_content(chunk_size = 1048576): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() # Rename to destination os.rename('%s.tmp' % path, path) else: try: f = open(path, write_type) f.write(content) f.close() try: os.chmod(path, Env.getPermission('file')) except: log.error('Failed writing permission to file "%s": %s', (path, traceback.format_exc())) except: log.error('Unable to write file "%s": %s', (path, traceback.format_exc())) if os.path.isfile(path): os.remove(path) def makeDir(self, path): path = sp(path) try: if not os.path.isdir(path): os.makedirs(path, Env.getPermission('folder')) os.chmod(path, Env.getPermission('folder')) return True except Exception as e: log.error('Unable to create folder "%s": %s', (path, e)) return False def deleteEmptyFolder(self, folder, show_error = True, only_clean = None): folder = sp(folder) for item in os.listdir(folder): full_folder = sp(os.path.join(folder, item)) if not only_clean or (item in only_clean and os.path.isdir(full_folder)): for subfolder, dirs, files in os.walk(full_folder, topdown = False): try: os.rmdir(subfolder) except: if show_error: log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc())) try: os.rmdir(folder) except: if show_error: log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) # http request def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True, stream = False): url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]") if not headers: headers = {} if not data: data = {} # Fill in some headers parsed_url = urlparse(url) host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else '')) headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host)) headers['Host'] = headers.get('Host', None) headers['User-Agent'] = headers.get('User-Agent', self.user_agent) headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip') headers['Connection'] = headers.get('Connection', 'keep-alive') headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0') use_proxy = Env.setting('use_proxy') proxy_url = None if use_proxy: proxy_server = Env.setting('proxy_server') proxy_username = Env.setting('proxy_username') proxy_password = Env.setting('proxy_password') if proxy_server: loc = "{0}:{1}@{2}".format(proxy_username, proxy_password, proxy_server) if proxy_username else proxy_server proxy_url = { "http": "http://"+loc, "https": "https://"+loc, } else: proxy_url = getproxies() r = Env.get('http_opener') # Don't try for failed requests if self.http_failed_disabled.get(host, 0) > 0: if self.http_failed_disabled[host] > (time.time() - 900): log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host) if not show_error: raise Exception('Disabled calls to %s for 15 minutes because so many failed requests' % host) else: return '' else: del self.http_failed_request[host] del self.http_failed_disabled[host] self.wait(host, url) status_code = None try: kwargs = { 'headers': headers, 'data': data if len(data) > 0 else None, 'timeout': timeout, 'files': files, 'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates.. 'stream': stream, 'proxies': proxy_url, } method = 'post' if len(data) > 0 or files else 'get' log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data')) response = r.request(method, url, **kwargs) status_code = response.status_code if response.status_code == requests.codes.ok: data = response if stream else response.content else: response.raise_for_status() self.http_failed_request[host] = 0 except (IOError, MaxRetryError, Timeout): if show_error: log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0))) # Save failed requests by hosts try: # To many requests if status_code in [429]: self.http_failed_request[host] = 1 self.http_failed_disabled[host] = time.time() if not self.http_failed_request.get(host): self.http_failed_request[host] = 1 else: self.http_failed_request[host] += 1 # Disable temporarily if self.http_failed_request[host] > 5 and not isLocalIP(host): self.http_failed_disabled[host] = time.time() except: log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc())) raise self.http_last_use[host] = time.time() return data def wait(self, host = '', url = ''): if self.http_time_between_calls == 0: return try: if host not in self.http_last_use_queue: self.http_last_use_queue[host] = [] self.http_last_use_queue[host].append(url) while True and not self.shuttingDown(): wait = (self.http_last_use.get(host, 0) - time.time()) + self.http_time_between_calls if self.http_last_use_queue[host][0] != url: time.sleep(.1) continue if wait > 0: log.debug('Waiting for %s, %d seconds', (self.getName(), max(1, wait))) time.sleep(min(wait, 30)) else: self.http_last_use_queue[host] = self.http_last_use_queue[host][1:] self.http_last_use[host] = time.time() break except: log.error('Failed handling waiting call: %s', traceback.format_exc()) time.sleep(self.http_time_between_calls) def beforeCall(self, handler): self.isRunning('%s.%s' % (self.getName(), handler.__name__)) def afterCall(self, handler): self.isRunning('%s.%s' % (self.getName(), handler.__name__), False) def doShutdown(self, *args, **kwargs): self.shuttingDown(True) return True def shuttingDown(self, value = None): if value is None: return self._needs_shutdown self._needs_shutdown = value def isRunning(self, value = None, boolean = True): if value is None: return self._running if boolean: self._running.append(value) else: try: self._running.remove(value) except: log.error("Something went wrong when finishing the plugin function. Could not find the 'is_running' key") def getCache(self, cache_key, url = None, **kwargs): use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files') if use_cache: cache_key_md5 = md5(cache_key) cache = Env.get('cache').get(cache_key_md5) if cache: if not Env.get('dev'): log.debug('Getting cache %s', cache_key) return cache if url: try: cache_timeout = 300 if 'cache_timeout' in kwargs: cache_timeout = kwargs.get('cache_timeout') del kwargs['cache_timeout'] data = self.urlopen(url, **kwargs) if data and cache_timeout > 0 and use_cache: self.setCache(cache_key, data, timeout = cache_timeout) return data except: if not kwargs.get('show_error', True): raise log.debug('Failed getting cache: %s', (traceback.format_exc(0))) return '' def setCache(self, cache_key, value, timeout = 300): cache_key_md5 = md5(cache_key) log.debug('Setting cache %s', cache_key) Env.get('cache').set(cache_key_md5, value, timeout) return value def createNzbName(self, data, media, unique_tag = False): release_name = data.get('name') tag = self.cpTag(media, unique_tag = unique_tag) # Check if password is filename name_password = scanForPassword(data.get('name')) if name_password: release_name, password = name_password tag += '{{%s}}' % password elif data.get('password'): tag += '{{%s}}' % data.get('password') max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag) def createFileName(self, data, filedata, media, unique_tag = False): name = self.createNzbName(data, media, unique_tag = unique_tag) if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata: return '%s.%s' % (name, 'rar') return '%s.%s' % (name, data.get('protocol')) def cpTag(self, media, unique_tag = False): tag = '' if Env.setting('enabled', 'renamer') or unique_tag: identifier = getIdentifier(media) or '' unique_tag = ', ' + randomString() if unique_tag else '' tag = '.cp(' tag += identifier tag += ', ' if unique_tag and identifier else '' tag += randomString() if unique_tag else '' tag += ')' return tag if len(tag) > 7 else '' def checkFilesChanged(self, files, unchanged_for = 60): now = time.time() file_too_new = False file_time = [] for cur_file in files: # File got removed while checking if not os.path.isfile(cur_file): file_too_new = now break # File has changed in last 60 seconds file_time = self.getFileTimes(cur_file) for t in file_time: if t > now - unchanged_for: file_too_new = tryInt(time.time() - t) break if file_too_new: break if file_too_new: try: time_string = time.ctime(file_time[0]) except: try: time_string = time.ctime(file_time[1]) except: time_string = 'unknown' return file_too_new, time_string return False, None def getFileTimes(self, file_path): return [os.path.getmtime(file_path), os.path.getctime(file_path) if os.name != 'posix' else 0] def isDisabled(self): return not self.isEnabled() def isEnabled(self): return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None def acquireLock(self, key): lock = self._locks.get(key) if not lock: self._locks[key] = threading.RLock() log.debug('Acquiring lock: %s', key) self._locks.get(key).acquire() def releaseLock(self, key): lock = self._locks.get(key) if lock: log.debug('Releasing lock: %s', key) self._locks.get(key).release()
15,483
Python
.py
339
33.566372
136
0.558497
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,946
automation.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/automation.py
from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env log = CPLog(__name__) autoload = 'Automation' class Automation(Plugin): def __init__(self): addEvent('app.load', self.setCrons) if not Env.get('dev'): addEvent('app.load', self.addMovies) addApiView('automation.add_movies', self.addMoviesFromApi, docs = { 'desc': 'Manually trigger the automation scan. Hangs until scan is complete. Useful for webhooks.', 'return': {'type': 'object: {"success": true}'}, }) addEvent('setting.save.automation.hour.after', self.setCrons) def setCrons(self): fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12)) def addMoviesFromApi(self, **kwargs): self.addMovies() return { 'success': True } def addMovies(self): movies = fireEvent('automation.get_movies', merge = True) movie_ids = [] for imdb_id in movies: if self.shuttingDown(): break prop_name = 'automation.added.%s' % imdb_id added = Env.prop(prop_name, default = False) if not added: added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_after = True, single = True) if added_movie: movie_ids.append(added_movie['_id']) Env.prop(prop_name, True) for movie_id in movie_ids: if self.shuttingDown(): break movie_dict = fireEvent('media.get', movie_id, single = True) if movie_dict: fireEvent('movie.searcher.single', movie_dict) return True config = [{ 'name': 'automation', 'order': 101, 'groups': [ { 'tab': 'automation', 'name': 'automation', 'label': 'Minimal movie requirements', 'options': [ { 'name': 'year', 'default': 2011, 'type': 'int', }, { 'name': 'votes', 'default': 1000, 'type': 'int', }, { 'name': 'rating', 'default': 7.0, 'type': 'float', }, { 'name': 'hour', 'advanced': True, 'default': 12, 'label': 'Check every', 'type': 'int', 'unit': 'hours', 'description': 'hours', }, { 'name': 'required_genres', 'label': 'Required Genres', 'default': '', 'placeholder': 'Example: Action, Crime & Drama', 'description': ('Ignore movies that don\'t contain at least one set of genres.', 'Sets are separated by "," and each word within a set must be separated with "&"') }, { 'name': 'ignored_genres', 'label': 'Ignored Genres', 'default': '', 'placeholder': 'Example: Horror, Comedy & Drama & Romance', 'description': 'Ignore movies that contain at least one set of genres. Sets work the same as above.' }, ], }, ], }]
3,770
Python
.py
95
26.042105
183
0.488232
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,947
custom.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/custom.py
import os from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env log = CPLog(__name__) autoload = 'Custom' class Custom(Plugin): def __init__(self): addEvent('app.load', self.createStructure) def createStructure(self): custom_dir = os.path.join(Env.get('data_dir'), 'custom_plugins') if not os.path.isdir(custom_dir): self.makeDir(custom_dir) self.createFile(os.path.join(custom_dir, '__init__.py'), '# Don\'t remove this file')
615
Python
.py
15
35.6
97
0.705085
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,948
scanner.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/scanner.py
import os import re import threading import time import traceback from couchpotato import get_db from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.encoding import toUnicode, simplifyString, sp, ss from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \ splitString, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from guessit import guess_movie_info from subliminal.videos import Video import enzyme from six.moves import filter, map, zip log = CPLog(__name__) autoload = 'Scanner' class Scanner(Plugin): ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo', 'thumbs.db', 'ehthumbs.db', 'desktop.ini'] # unpacking, smb-crap, hidden files ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate'] ignored_extensions = ['ignore', 'lftp-pget-status'] extensions = { 'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v', 'flv'], 'movie_extra': ['mds'], 'dvd': ['vts_*', 'vob'], 'nfo': ['nfo', 'txt', 'tag'], 'subtitle': ['sub', 'srt', 'ssa', 'ass'], 'subtitle_extra': ['idx'], 'trailer': ['mov', 'mp4', 'flv'] } threed_types = { 'Half SBS': [('half', 'sbs'), ('h', 'sbs'), 'hsbs'], 'Full SBS': [('full', 'sbs'), ('f', 'sbs'), 'fsbs'], 'SBS': ['sbs'], 'Half OU': [('half', 'ou'), ('h', 'ou'), ('half', 'tab'), ('h', 'tab'), 'htab', 'hou'], 'Full OU': [('full', 'ou'), ('f', 'ou'), ('full', 'tab'), ('f', 'tab'), 'ftab', 'fou'], 'OU': ['ou', 'tab'], 'Frame Packed': ['mvc', ('complete', 'bluray')], '3D': ['3d'] } file_types = { 'subtitle': ('subtitle', 'subtitle'), 'subtitle_extra': ('subtitle', 'subtitle_extra'), 'trailer': ('video', 'trailer'), 'nfo': ('nfo', 'nfo'), 'movie': ('video', 'movie'), 'movie_extra': ('movie', 'movie_extra'), 'backdrop': ('image', 'backdrop'), 'poster': ('image', 'poster'), 'thumbnail': ('image', 'thumbnail'), 'leftover': ('leftover', 'leftover'), } file_sizes = { # in MB 'movie': {'min': 200}, 'trailer': {'min': 2, 'max': 199}, 'backdrop': {'min': 0, 'max': 5}, } codecs = { 'audio': ['DTS', 'AC3', 'AC3D', 'MP3'], 'video': ['x264', 'H264', 'x265', 'H265', 'DivX', 'Xvid'] } resolutions = { '2160p': {'resolution_width': 3840, 'resolution_height': 2160, 'aspect': 1.78}, '1080p': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78}, '1080i': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78}, '720p': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78}, '720i': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78}, '480p': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33}, '480i': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33}, 'default': {'resolution_width': 0, 'resolution_height': 0, 'aspect': 1}, } audio_codec_map = { 0x2000: 'AC3', 0x2001: 'DTS', 0x0055: 'MP3', 0x0050: 'MP2', 0x0001: 'PCM', 0x003: 'WAV', 0x77a1: 'TTA1', 0x5756: 'WAV', 0x6750: 'Vorbis', 0xF1AC: 'FLAC', 0x00ff: 'AAC', } source_media = { 'Blu-ray': ['bluray', 'blu-ray', 'brrip', 'br-rip'], 'HD DVD': ['hddvd', 'hd-dvd'], 'DVD': ['dvd'], 'HDTV': ['hdtv'] } clean = '([ _\,\.\(\)\[\]\-]|^)(3d|hsbs|sbs|half.sbs|full.sbs|ou|half.ou|full.ou|extended|extended.cut|directors.cut|french|fr|swedisch|sw|danish|dutch|nl|swesub|subs|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \ '|hdtvrip|webdl|web.dl|webrip|web.rip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|x265|h265|xvid|xvidvd|xxx|www.www|hc|\[.*\])(?=[ _\,\.\(\)\[\]\-]|$)' multipart_regex = [ '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 '[ _\.-]+part[ _\.-]*([0-9a-d]+)', #*part1 '[ _\.-]+dis[ck][ _\.-]*([0-9a-d]+)', #*disk1 'cd[ _\.-]*([0-9a-d]+)$', #cd1.ext 'dvd[ _\.-]*([0-9a-d]+)$', #dvd1.ext 'part[ _\.-]*([0-9a-d]+)$', #part1.mkv 'dis[ck][ _\.-]*([0-9a-d]+)$', #disk1.mkv '()[ _\.-]+([0-9]*[abcd]+)(\.....?)$', '([a-z])([0-9]+)(\.....?)$', '()([ab])(\.....?)$' #*a.mkv ] cp_imdb = '\.cp\((?P<id>tt[0-9]+),?\s?(?P<random>[A-Za-z0-9]+)?\)' def __init__(self): addEvent('scanner.create_file_identifier', self.createStringIdentifier) addEvent('scanner.remove_cptag', self.removeCPTag) addEvent('scanner.scan', self.scan) addEvent('scanner.name_year', self.getReleaseNameYear) addEvent('scanner.partnumber', self.getPartNumber) def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, check_file_date = True, on_found = None): folder = sp(folder) if not folder or not os.path.isdir(folder): log.error('Folder doesn\'t exists: %s', folder) return {} # Get movie "master" files movie_files = {} leftovers = [] # Scan all files of the folder if no files are set if not files: try: files = [] for root, dirs, walk_files in os.walk(folder, followlinks=True): files.extend([sp(os.path.join(sp(root), ss(filename))) for filename in walk_files]) # Break if CP wants to shut down if self.shuttingDown(): break except: log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) log.debug('Found %s files to scan and group in %s', (len(files), folder)) else: check_file_date = False files = [sp(x) for x in files] for file_path in files: if not os.path.exists(file_path): continue # Remove ignored files if self.isSampleFile(file_path): leftovers.append(file_path) continue elif not self.keepFile(file_path): continue is_dvd_file = self.isDVDFile(file_path) if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file # Normal identifier identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) identifiers = [identifier] # Identifier with quality quality = fireEvent('quality.guess', files = [file_path], size = self.getFileSize(file_path), single = True) if not is_dvd_file else {'identifier':'dvdr'} if quality: identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) identifiers = [identifier_with_quality, identifier] if not movie_files.get(identifier): movie_files[identifier] = { 'unsorted_files': [], 'identifiers': identifiers, 'is_dvd': is_dvd_file, } movie_files[identifier]['unsorted_files'].append(file_path) else: leftovers.append(file_path) # Break if CP wants to shut down if self.shuttingDown(): break # Cleanup del files # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" # files will be grouped first. leftovers = set(sorted(leftovers, reverse = True)) # Group files minus extension ignored_identifiers = [] for identifier, group in movie_files.items(): if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) log.debug('Grouping files: %s', identifier) has_ignored = 0 for file_path in list(group['unsorted_files']): ext = getExt(file_path) wo_ext = file_path[:-(len(ext) + 1)] found_files = set([i for i in leftovers if wo_ext in i]) group['unsorted_files'].extend(found_files) leftovers = leftovers - found_files has_ignored += 1 if ext in self.ignored_extensions else 0 if has_ignored == 0: for file_path in list(group['unsorted_files']): ext = getExt(file_path) has_ignored += 1 if ext in self.ignored_extensions else 0 if has_ignored > 0: ignored_identifiers.append(identifier) # Break if CP wants to shut down if self.shuttingDown(): break # Create identifiers for all leftover files path_identifiers = {} for file_path in leftovers: identifier = self.createStringIdentifier(file_path, folder) if not path_identifiers.get(identifier): path_identifiers[identifier] = [] path_identifiers[identifier].append(file_path) # Group the files based on the identifier delete_identifiers = [] for identifier, found_files in path_identifiers.items(): log.debug('Grouping files on identifier: %s', identifier) group = movie_files.get(identifier) if group: group['unsorted_files'].extend(found_files) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers = leftovers - set(found_files) # Break if CP wants to shut down if self.shuttingDown(): break # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Group based on folder delete_identifiers = [] for identifier, found_files in path_identifiers.items(): log.debug('Grouping files on foldername: %s', identifier) for ff in found_files: new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder) group = movie_files.get(new_identifier) if group: group['unsorted_files'].extend([ff]) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers -= leftovers - set([ff]) # Break if CP wants to shut down if self.shuttingDown(): break # leftovers should be empty if leftovers: log.debug('Some files are still left over: %s', leftovers) # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Make sure we remove older / still extracting files valid_files = {} while True and not self.shuttingDown(): try: identifier, group = movie_files.popitem() except: break # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute if check_file_date: files_too_new, time_string = self.checkFilesChanged(group['unsorted_files']) if files_too_new: log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) # Delete the unsorted list del group['unsorted_files'] continue # Only process movies newer than x if newer_than and newer_than > 0: has_new_files = False for cur_file in group['unsorted_files']: file_time = self.getFileTimes(cur_file) if file_time[0] > newer_than or file_time[1] > newer_than: has_new_files = True break if not has_new_files: log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) # Delete the unsorted list del group['unsorted_files'] continue valid_files[identifier] = group del movie_files total_found = len(valid_files) # Make sure only one movie was found if a download ID is provided if release_download and total_found == 0: log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id')) elif release_download and total_found > 1: log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files))) release_download = None # Determine file types processed_movies = {} while True and not self.shuttingDown(): try: identifier, group = valid_files.popitem() except: break if return_ignored is False and identifier in ignored_identifiers: log.debug('Ignore file found, ignoring release: %s', identifier) total_found -= 1 continue # Group extra (and easy) files first group['files'] = { 'movie_extra': self.getMovieExtras(group['unsorted_files']), 'subtitle': self.getSubtitles(group['unsorted_files']), 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), 'nfo': self.getNfo(group['unsorted_files']), 'trailer': self.getTrailers(group['unsorted_files']), 'leftover': set(group['unsorted_files']), } # Media files if group['is_dvd']: group['files']['movie'] = self.getDVDFiles(group['unsorted_files']) else: group['files']['movie'] = self.getMediaFiles(group['unsorted_files']) if len(group['files']['movie']) == 0: log.error('Couldn\'t find any movie files for %s', identifier) total_found -= 1 continue log.debug('Getting metadata for %s', identifier) group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download) # Subtitle meta group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} # Get parent dir from movie files for movie_file in group['files']['movie']: group['parentdir'] = os.path.dirname(movie_file) group['dirname'] = None folder_names = group['parentdir'].replace(folder, '').split(os.path.sep) folder_names.reverse() # Try and get a proper dirname, so no "A", "Movie", "Download" etc for folder_name in folder_names: if folder_name.lower() not in self.ignore_names and len(folder_name) > 2: group['dirname'] = folder_name break break # Leftover "sorted" files for file_type in group['files']: if not file_type is 'leftover': group['files']['leftover'] -= set(group['files'][file_type]) group['files'][file_type] = list(group['files'][file_type]) group['files']['leftover'] = list(group['files']['leftover']) # Delete the unsorted list del group['unsorted_files'] # Determine movie group['media'] = self.determineMedia(group, release_download = release_download) if not group['media']: log.error('Unable to determine media: %s', group['identifiers']) else: group['identifier'] = getIdentifier(group['media']) or group['media']['info'].get('imdb') processed_movies[identifier] = group # Notify parent & progress on something found if on_found: on_found(group, total_found, len(valid_files)) # Wait for all the async events calm down a bit while threading.activeCount() > 100 and not self.shuttingDown(): log.debug('Too many threads active, waiting a few seconds') time.sleep(10) if len(processed_movies) > 0: log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) else: log.debug('Found no movies in the folder %s', folder) return processed_movies def getMetaData(self, group, folder = '', release_download = None): data = {} files = list(group['files']['movie']) for cur_file in files: if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files if not data.get('audio'): # Only get metadata from first media file meta = self.getMeta(cur_file) try: data['titles'] = meta.get('titles', []) data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) data['audio_channels'] = meta.get('audio_channels', 2.0) if meta.get('resolution_width'): data['resolution_width'] = meta.get('resolution_width') data['resolution_height'] = meta.get('resolution_height') data['aspect'] = round(float(meta.get('resolution_width')) / meta.get('resolution_height', 1), 2) else: data.update(self.getResolution(cur_file)) except: log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) pass data['size'] = data.get('size', 0) + self.getFileSize(cur_file) data['quality'] = None quality = fireEvent('quality.guess', size = data.get('size'), files = files, extra = data, single = True) # Use the quality that we snatched but check if it matches our guess if release_download and release_download.get('quality'): data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True) data['quality']['is_3d'] = release_download.get('is_3d', 0) if data['quality']['identifier'] != quality['identifier']: log.info('Different quality snatched than detected for %s: %s vs. %s. Assuming snatched quality is correct.', (files[0], data['quality']['identifier'], quality['identifier'])) if data['quality']['is_3d'] != quality['is_3d']: log.info('Different 3d snatched than detected for %s: %s vs. %s. Assuming snatched 3d is correct.', (files[0], data['quality']['is_3d'], quality['is_3d'])) if not data['quality']: data['quality'] = quality if not data['quality']: data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD' filename = re.sub(self.cp_imdb, '', files[0]) data['group'] = self.getGroup(filename[len(folder):]) data['source'] = self.getSourceMedia(filename) if data['quality'].get('is_3d', 0): data['3d_type'] = self.get3dType(filename) return data def get3dType(self, filename): filename = ss(filename) words = re.split('\W+', filename.lower()) for key in self.threed_types: tags = self.threed_types.get(key, []) for tag in tags: if (isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words)) or (isinstance(tag, (str, unicode)) and ss(tag.lower()) in words): log.debug('Found %s in %s', (tag, filename)) return key return '' def getMeta(self, filename): try: p = enzyme.parse(filename) # Video codec vc = ('H264' if p.video[0].codec == 'AVC1' else 'x265' if p.video[0].codec == 'HEVC' else p.video[0].codec) # Audio codec ac = p.audio[0].codec try: ac = self.audio_codec_map.get(p.audio[0].codec) except: pass # Find title in video headers titles = [] try: if p.title and self.findYear(p.title): titles.append(ss(p.title)) except: log.error('Failed getting title from meta: %s', traceback.format_exc()) for video in p.video: try: if video.title and self.findYear(video.title): titles.append(ss(video.title)) except: log.error('Failed getting title from meta: %s', traceback.format_exc()) return { 'titles': list(set(titles)), 'video': vc, 'audio': ac, 'resolution_width': tryInt(p.video[0].width), 'resolution_height': tryInt(p.video[0].height), 'audio_channels': p.audio[0].channels, } except enzyme.exceptions.ParseError: log.debug('Failed to parse meta for %s', filename) except enzyme.exceptions.NoParserError: log.debug('No parser found for %s', filename) except: log.debug('Failed parsing %s', filename) return {} def getSubtitleLanguage(self, group): detected_languages = {} # Subliminal scanner paths = None try: paths = group['files']['movie'] scan_result = [] for p in paths: if not group['is_dvd']: video = Video.from_path(toUnicode(sp(p))) video_result = [(video, video.scan())] scan_result.extend(video_result) for video, detected_subtitles in scan_result: for s in detected_subtitles: if s.language and s.path not in paths: detected_languages[s.path] = [s.language] except: log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc())) # IDX for extra in group['files']['subtitle_extra']: try: if os.path.isfile(extra): output = open(extra, 'r') txt = output.read() output.close() idx_langs = re.findall('\nid: (\w+)', txt) sub_file = '%s.sub' % os.path.splitext(extra)[0] if len(idx_langs) > 0 and os.path.isfile(sub_file): detected_languages[sub_file] = idx_langs except: log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc())) return detected_languages def determineMedia(self, group, release_download = None): # Get imdb id from downloader imdb_id = release_download and release_download.get('imdb_id') if imdb_id: log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id')) files = group['files'] # Check for CP(imdb_id) string in the file paths if not imdb_id: for cur_file in files['movie']: imdb_id = self.getCPImdb(cur_file) if imdb_id: log.debug('Found movie via CP tag: %s', cur_file) break # Check and see if nfo contains the imdb-id nfo_file = None if not imdb_id: try: for nf in files['nfo']: imdb_id = getImdb(nf, check_inside = True) if imdb_id: log.debug('Found movie via nfo file: %s', nf) nfo_file = nf break except: pass # Check and see if filenames contains the imdb-id if not imdb_id: try: for filetype in files: for filetype_file in files[filetype]: imdb_id = getImdb(filetype_file) if imdb_id: log.debug('Found movie via imdb in filename: %s', nfo_file) break except: pass # Search based on identifiers if not imdb_id: for identifier in group['identifiers']: if len(identifier) > 2: try: filename = list(group['files'].get('movie'))[0] except: filename = None name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None) if name_year.get('name') and name_year.get('year'): search_q = '%(name)s %(year)s' % name_year movie = fireEvent('movie.search', q = search_q, merge = True, limit = 1) # Try with other if len(movie) == 0 and name_year.get('other') and name_year['other'].get('name') and name_year['other'].get('year'): search_q2 = '%(name)s %(year)s' % name_year.get('other') if search_q2 != search_q: movie = fireEvent('movie.search', q = search_q2, merge = True, limit = 1) if len(movie) > 0: imdb_id = movie[0].get('imdb') log.debug('Found movie via search: %s', identifier) if imdb_id: break else: log.debug('Identifier to short to use for search: %s', identifier) if imdb_id: try: db = get_db() return db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc'] except: log.debug('Movie "%s" not in library, just getting info', imdb_id) return { 'identifier': imdb_id, 'info': fireEvent('movie.info', identifier = imdb_id, merge = True, extended = False) } log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) return {} def getCPImdb(self, string): try: m = re.search(self.cp_imdb, string.lower()) id = m.group('id') if id: return id except AttributeError: pass return False def removeCPTag(self, name): try: return re.sub(self.cp_imdb, '', name).strip() except: pass return name def getSamples(self, files): return set(filter(lambda s: self.isSampleFile(s), files)) def getMediaFiles(self, files): def test(s): return self.filesizeBetween(s, self.file_sizes['movie']) and getExt(s.lower()) in self.extensions['movie'] and not self.isSampleFile(s) return set(filter(test, files)) def getMovieExtras(self, files): return set(filter(lambda s: getExt(s.lower()) in self.extensions['movie_extra'], files)) def getDVDFiles(self, files): def test(s): return self.isDVDFile(s) return set(filter(test, files)) def getSubtitles(self, files): return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle'], files)) def getSubtitlesExtras(self, files): return set(filter(lambda s: getExt(s.lower()) in self.extensions['subtitle_extra'], files)) def getNfo(self, files): return set(filter(lambda s: getExt(s.lower()) in self.extensions['nfo'], files)) def getTrailers(self, files): def test(s): return re.search('(^|[\W_])trailer\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['trailer']) return set(filter(test, files)) def getImages(self, files): def test(s): return getExt(s.lower()) in ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tbn'] files = set(filter(test, files)) images = { 'backdrop': set(filter(lambda s: re.search('(^|[\W_])fanart|backdrop\d*[\W_]', s.lower()) and self.filesizeBetween(s, self.file_sizes['backdrop']), files)) } # Rest images['rest'] = files - images['backdrop'] return images def isDVDFile(self, file_name): if list(set(file_name.lower().split(os.path.sep)) & set(['video_ts', 'audio_ts'])): return True for needle in ['vts_', 'video_ts', 'audio_ts', 'bdmv', 'certificate']: if needle in file_name.lower(): return True return False def keepFile(self, filename): # ignoredpaths for i in self.ignored_in_path: if i in filename.lower(): log.debug('Ignored "%s" contains "%s".', (filename, i)) return False # All is OK return True def isSampleFile(self, filename): is_sample = re.search('(^|[\W_])sample\d*[\W_]', filename.lower()) if is_sample: log.debug('Is sample file: %s', filename) return is_sample def filesizeBetween(self, file, file_size = None): if not file_size: file_size = [] try: return file_size.get('min', 0) < self.getFileSize(file) < file_size.get('max', 100000) except: log.error('Couldn\'t get filesize of %s.', file) return False def getFileSize(self, file): try: return os.path.getsize(file) / 1024 / 1024 except: return None def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder identifier = os.path.splitext(identifier)[0] # ext # Exclude file name path if needed (f.e. for DVD files) if exclude_filename: identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] # Make sure the identifier is lower case as all regex is with lower case tags identifier = identifier.lower() try: path_split = splitString(identifier, os.path.sep) identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename except: pass # multipart identifier = self.removeMultipart(identifier) # remove cptag identifier = self.removeCPTag(identifier) # simplify the string identifier = simplifyString(identifier) year = self.findYear(file_path) # groups, release tags, scenename cleaner identifier = re.sub(self.clean, '::', identifier).strip(':') # Year if year and identifier[:4] != year: split_by = ':::' if ':::' in identifier else year identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year) else: identifier = identifier.split('::')[0] # Remove duplicates out = [] for word in identifier.split(): if not word in out: out.append(word) identifier = ' '.join(out) return simplifyString(identifier) def removeMultipart(self, name): for regex in self.multipart_regex: try: found = re.sub(regex, '', name) if found != name: name = found except: pass return name def getPartNumber(self, name): for regex in self.multipart_regex: try: found = re.search(regex, name) if found: return found.group(1) return 1 except: pass return 1 def getCodec(self, filename, codecs): codecs = map(re.escape, codecs) try: codec = re.search('[^A-Z0-9](?P<codec>' + '|'.join(codecs) + ')[^A-Z0-9]', filename, re.I) return (codec and codec.group('codec')) or '' except: return '' def getResolution(self, filename): try: for key in self.resolutions: if key in filename.lower() and key != 'default': return self.resolutions[key] except: pass return self.resolutions['default'] def getGroup(self, file): try: match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I) return match[-1] or '' except: return '' def getSourceMedia(self, file): for media in self.source_media: for alias in self.source_media[media]: if alias in file.lower(): return media return None def findYear(self, text): # Search year inside () or [] first matches = re.findall('(\(|\[)(?P<year>19[0-9]{2}|20[0-9]{2})(\]|\))', text) if matches: return matches[-1][1] # Search normal matches = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', text) if matches: return matches[-1] return '' def getReleaseNameYear(self, release_name, file_name = None): release_name = release_name.strip(' .-_') # Use guessit first guess = {} if file_name: try: guessit = guess_movie_info(toUnicode(file_name)) if guessit.get('title') and guessit.get('year'): guess = { 'name': guessit.get('title'), 'year': guessit.get('year'), } except: log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) # Backup to simple release_name = os.path.basename(release_name.replace('\\', '/')) cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) cleaned = re.sub(self.clean, ' ', cleaned) year = None for year_str in [file_name, release_name, cleaned]: if not year_str: continue year = self.findYear(year_str) if year: break cp_guess = {} if year: # Split name on year try: movie_name = cleaned.rsplit(year, 1).pop(0).strip() if movie_name: cp_guess = { 'name': movie_name, 'year': int(year), } except: pass if not cp_guess: # Split name on multiple spaces try: movie_name = cleaned.split(' ').pop(0).strip() cp_guess = { 'name': movie_name, 'year': int(year) if movie_name[:4] != year else 0, } except: pass if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): cp_guess['other'] = guess return cp_guess elif guess == {}: cp_guess['other'] = guess return cp_guess guess['other'] = cp_guess return guess
37,247
Python
.py
763
35.72346
386
0.539161
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,949
manage.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/manage.py
import os import time import traceback from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent, fireEventAsync from couchpotato.core.helpers.encoding import sp from couchpotato.core.helpers.variable import splitString, getTitle, tryInt, getIdentifier, getFreeSpace from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env log = CPLog(__name__) autoload = 'Manage' class Manage(Plugin): in_progress = False def __init__(self): fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) addEvent('manage.update', self.updateLibrary) addEvent('manage.diskspace', self.getDiskSpace) # Add files after renaming def after_rename(message = None, group = None): if not group: group = {} return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'], release_download = group['release_download']) addEvent('renamer.after', after_rename, priority = 110) addApiView('manage.update', self.updateLibraryView, docs = { 'desc': 'Update the library by scanning for new movies', 'params': { 'full': {'desc': 'Do a full update or just recently changed/added movies.'}, } }) addApiView('manage.progress', self.getProgress, docs = { 'desc': 'Get the progress of current manage update', 'return': {'type': 'object', 'example': """{ 'progress': False || object, total & to_go, }"""}, }) if not Env.get('dev') and self.conf('startup_scan'): addEvent('app.load', self.updateLibraryQuick) addEvent('app.load', self.setCrons) # Enable / disable interval addEvent('setting.save.manage.library_refresh_interval.after', self.setCrons) def setCrons(self): fireEvent('schedule.remove', 'manage.update_library') refresh = tryInt(self.conf('library_refresh_interval')) if refresh > 0: fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True) return True def getProgress(self, **kwargs): return { 'progress': self.in_progress } def updateLibraryView(self, full = 1, **kwargs): fireEventAsync('manage.update', full = True if full == '1' else False) return { 'progress': self.in_progress, 'success': True } def updateLibraryQuick(self): return self.updateLibrary(full = False) def updateLibrary(self, full = True): last_update_key = 'manage.last_update%s' % ('_full' if full else '') last_update = float(Env.prop(last_update_key, default = 0)) if self.in_progress: log.info('Already updating library: %s', self.in_progress) return elif self.isDisabled() or (last_update > time.time() - 20): return self.in_progress = {} fireEvent('notify.frontend', type = 'manage.updating', data = True) try: directories = self.directories() directories.sort() added_identifiers = [] # Add some progress for directory in directories: self.in_progress[os.path.normpath(directory)] = { 'started': False, 'eta': -1, 'total': None, 'to_go': None, } for directory in directories: folder = os.path.normpath(directory) self.in_progress[os.path.normpath(directory)]['started'] = tryInt(time.time()) if not os.path.isdir(folder): if len(directory) > 0: log.error('Directory doesn\'t exist: %s', folder) continue log.info('Updating manage library: %s', folder) fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) onFound = self.createAddToLibrary(folder, added_identifiers) fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, check_file_date = False, on_found = onFound, single = True) # Break if CP wants to shut down if self.shuttingDown(): break # If cleanup option is enabled, remove offline files from database if self.conf('cleanup') and full and not self.shuttingDown(): # Get movies with done status total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True) deleted_releases = [] for done_movie in done_movies: if getIdentifier(done_movie) not in added_identifiers: fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all') else: releases = done_movie.get('releases', []) for release in releases: if release.get('files'): brk = False for file_type in release.get('files', {}): for release_file in release['files'][file_type]: # Remove release not available anymore if not os.path.isfile(sp(release_file)): fireEvent('release.clean', release['_id']) brk = True break if brk: break # Check if there are duplicate releases (different quality) use the last one, delete the rest if len(releases) > 1: used_files = {} for release in releases: for file_type in release.get('files', {}): for release_file in release['files'][file_type]: already_used = used_files.get(release_file) if already_used: release_id = release['_id'] if already_used.get('last_edit', 0) > release.get('last_edit', 0) else already_used['_id'] if release_id not in deleted_releases: fireEvent('release.delete', release_id, single = True) deleted_releases.append(release_id) break else: used_files[release_file] = release del used_files # Break if CP wants to shut down if self.shuttingDown(): break if not self.shuttingDown(): db = get_db() db.reindex() Env.prop(last_update_key, time.time()) except: log.error('Failed updating library: %s', (traceback.format_exc())) while self.in_progress and len(self.in_progress) > 0 and not self.shuttingDown(): delete_me = {} # noinspection PyTypeChecker for folder in self.in_progress: if self.in_progress[folder]['to_go'] <= 0: delete_me[folder] = True for delete in delete_me: del self.in_progress[delete] time.sleep(1) fireEvent('notify.frontend', type = 'manage.updating', data = False) self.in_progress = False # noinspection PyDefaultArgument def createAddToLibrary(self, folder, added_identifiers = []): def addToLibrary(group, total_found, to_go): if self.in_progress[folder]['total'] is None: self.in_progress[folder].update({ 'total': total_found, 'to_go': total_found, }) self.updateProgress(folder, to_go) if group['media'] and group['identifier']: added_identifiers.append(group['identifier']) # Add it to release and update the info fireEvent('release.add', group = group, update_info = False) fireEvent('movie.update', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier'])) return addToLibrary def createAfterUpdate(self, folder, identifier): # Notify frontend def afterUpdate(): if not self.in_progress or self.shuttingDown(): return total = self.in_progress[folder]['total'] movie_dict = fireEvent('media.get', identifier, single = True) if movie_dict: fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict)) return afterUpdate def updateProgress(self, folder, to_go): pr = self.in_progress[folder] if to_go < pr['to_go']: pr['to_go'] = to_go avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go']) pr['eta'] = tryInt(avg * pr['to_go']) def directories(self): try: return self.conf('library', default = []) except: pass return [] def scanFilesToLibrary(self, folder = None, files = None, release_download = None): folder = os.path.normpath(folder) groups = fireEvent('scanner.scan', folder = folder, files = files, single = True) if groups: for group in groups.values(): if group.get('media'): if release_download and release_download.get('release_id'): fireEvent('release.add', group = group, update_id = release_download.get('release_id')) else: fireEvent('release.add', group = group) def getDiskSpace(self): return getFreeSpace(self.directories()) config = [{ 'name': 'manage', 'groups': [ { 'tab': 'manage', 'label': 'Movie Library Manager', 'description': 'Add your existing movie folders.', 'options': [ { 'name': 'enabled', 'default': False, 'type': 'enabler', }, { 'name': 'library', 'type': 'directories', 'description': 'Folder where the movies should be moved to.', }, { 'label': 'Cleanup After', 'name': 'cleanup', 'type': 'bool', 'description': 'Remove movie from db if it can\'t be found after re-scan.', 'default': True, }, { 'label': 'Scan at startup', 'name': 'startup_scan', 'type': 'bool', 'default': True, 'advanced': True, 'description': 'Do a quick scan on startup. On slow systems better disable this.', }, { 'label': 'Full library refresh', 'name': 'library_refresh_interval', 'type': 'int', 'default': 0, 'advanced': True, 'description': 'Do a full scan every X hours. (0 is disabled)', }, ], }, ], }]
12,274
Python
.py
247
33.603239
178
0.517821
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,950
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/wizard/__init__.py
from .main import Wizard def autoload(): return Wizard() config = [{ 'name': 'core', 'groups': [ { 'tab': 'general', 'name': 'advanced', 'options': [ { 'name': 'show_wizard', 'label': 'Run the wizard', 'default': 1, 'type': 'bool', }, ], }, ], }]
442
Python
.py
20
11.35
46
0.331742
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,951
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/wizard/main.py
from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin log = CPLog(__name__) class Wizard(Plugin): pass
147
Python
.py
5
27
48
0.798561
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,952
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/userscript/main.py
import os import traceback import time from base64 import b64encode, b64decode from couchpotato import index from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.variable import isDict from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from tornado.web import RequestHandler log = CPLog(__name__) class Userscript(Plugin): version = 8 def __init__(self): addApiView('userscript.get/(.*)/(.*)', self.getUserScript, static = True) addApiView('userscript', self.iFrame) addApiView('userscript.add_via_url', self.getViaUrl) addApiView('userscript.includes', self.getIncludes) addApiView('userscript.bookmark', self.bookmark) addEvent('userscript.get_version', self.getVersion) addEvent('app.test', self.doTest) def bookmark(self, host = None, **kwargs): params = { 'includes': fireEvent('userscript.get_includes', merge = True), 'excludes': fireEvent('userscript.get_excludes', merge = True), 'host': host, } return self.renderTemplate(__file__, 'bookmark.js_tmpl', **params) def getIncludes(self, **kwargs): return { 'includes': fireEvent('userscript.get_includes', merge = True), 'excludes': fireEvent('userscript.get_excludes', merge = True), } def getUserScript(self, script_route, **kwargs): klass = self class UserscriptHandler(RequestHandler): def get(self, random, route): bookmarklet_host = Env.setting('bookmarklet_host') loc = bookmarklet_host if bookmarklet_host else "{0}://{1}".format(self.request.protocol, self.request.headers.get('X-Forwarded-Host') or self.request.headers.get('host')) params = { 'includes': fireEvent('userscript.get_includes', merge = True), 'excludes': fireEvent('userscript.get_excludes', merge = True), 'version': klass.getVersion(), 'api': '%suserscript/' % Env.get('api_base'), 'host': loc, } script = klass.renderTemplate(__file__, 'template.js_tmpl', **params) klass.createFile(os.path.join(Env.get('cache_dir'), 'couchpotato.user.js'), script) self.redirect(Env.get('api_base') + 'file.cache/couchpotato.user.js') Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), script_route), UserscriptHandler)]) def getVersion(self): versions = fireEvent('userscript.get_provider_version') version = self.version for v in versions: version += v return version def iFrame(self, **kwargs): return index() def getViaUrl(self, url = None, **kwargs): params = { 'url': url, 'movie': fireEvent('userscript.get_movie_via_url', url = url, single = True) } if not isDict(params['movie']): log.error('Failed adding movie via url: %s', url) params['error'] = params['movie'] if params['movie'] else 'Failed getting movie info' return params def doTest(self): time.sleep(1) tests = [ 'aHR0cDovL3d3dy5hbGxvY2luZS5mci9maWxtL2ZpY2hlZmlsbV9nZW5fY2ZpbG09MjAxMTA1Lmh0bWw=', 'aHR0cDovL3RyYWlsZXJzLmFwcGxlLmNvbS90cmFpbGVycy9wYXJhbW91bnQvbWlzc2lvbmltcG9zc2libGVyb2d1ZW5hdGlvbi8=', 'aHR0cDovL3d3dy55b3V0aGVhdGVyLmNvbS92aWV3LnBocD9pZD0xMTI2Mjk5', 'aHR0cDovL3RyYWt0LnR2L21vdmllcy9taXNzaW9uLWltcG9zc2libGUtcm9ndWUtbmF0aW9uLTIwMTU=', 'aHR0cHM6Ly93d3cucmVkZGl0LmNvbS9yL0lqdXN0d2F0Y2hlZC9jb21tZW50cy8zZjk3bzYvaWp3X21pc3Npb25faW1wb3NzaWJsZV9yb2d1ZV9uYXRpb25fMjAxNS8=', 'aHR0cDovL3d3dy5yb3R0ZW50b21hdG9lcy5jb20vbS9taXNzaW9uX2ltcG9zc2libGVfcm9ndWVfbmF0aW9uLw==', 'aHR0cHM6Ly93d3cudGhlbW92aWVkYi5vcmcvbW92aWUvMTc3Njc3LW1pc3Npb24taW1wb3NzaWJsZS01', 'aHR0cDovL3d3dy5jcml0aWNrZXIuY29tL2ZpbG0vTWlzc2lvbl9JbXBvc3NpYmxlX1JvZ3VlLw==', 'aHR0cDovL2ZpbG1jZW50cnVtLm5sL2ZpbG1zLzE4MzIzL21pc3Npb24taW1wb3NzaWJsZS1yb2d1ZS1uYXRpb24v', 'aHR0cDovL3d3dy5maWxtc3RhcnRzLmRlL2tyaXRpa2VuLzIwMTEwNS5odG1s', 'aHR0cDovL3d3dy5maWxtd2ViLnBsL2ZpbG0vTWlzc2lvbiUzQStJbXBvc3NpYmxlKy0rUm9ndWUrTmF0aW9uLTIwMTUtNjU1MDQ4', 'aHR0cDovL3d3dy5mbGlja2NoYXJ0LmNvbS9tb3ZpZS8zM0NFMzEyNUJB', 'aHR0cDovL3d3dy5pbWRiLmNvbS90aXRsZS90dDIzODEyNDkv', 'aHR0cDovL2xldHRlcmJveGQuY29tL2ZpbG0vbWlzc2lvbi1pbXBvc3NpYmxlLXJvZ3VlLW5hdGlvbi8=', 'aHR0cDovL3d3dy5tb3ZpZW1ldGVyLm5sL2ZpbG0vMTA0MTcw', 'aHR0cDovL21vdmllcy5pby9tLzMxL2Vu', ] success = 0 for x in tests: x = b64decode(x) try: movie = self.getViaUrl(x) movie = movie.get('movie', {}) or {} imdb = movie.get('imdb') if imdb and b64encode(imdb) in ['dHQxMjI5MjM4', 'dHQyMzgxMjQ5']: success += 1 continue except: log.error('Failed userscript test "%s": %s', (x, traceback.format_exc())) log.error('Failed userscript test "%s"', x) if success == len(tests): log.debug('All userscript tests successful') else: log.error('Failed userscript tests, %s out of %s', (success, len(tests)))
5,591
Python
.py
106
41.849057
187
0.658957
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,953
scores.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/score/scores.py
import re import traceback from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.environment import Env log = CPLog(__name__) name_scores = [ # Tags 'proper:15', 'repack:15', 'directors cut:15', 'extended:7', 'unrated:2', # Video 'x264:1', 'h264:1', # Audio 'dts:4', 'ac3:2', # Quality '720p:10', '1080p:10', '2160p:10', 'bluray:10', 'dvd:1', 'dvdrip:1', 'brrip:1', 'bdrip:1', 'bd50:1', 'bd25:1', # Language / Subs 'german:-10', 'french:-10', 'spanish:-10', 'swesub:-20', 'danish:-10', 'dutch:-10', # Release groups 'imbt:1', 'cocain:1', 'vomit:1', 'fico:1', 'arrow:1', 'pukka:1', 'prism:1', 'devise:1', 'esir:1', 'ctrlhd:1', 'metis:10', 'diamond:10', 'wiki:10', 'cbgb:10', 'crossbow:1', 'sinners:10', 'amiable:10', 'refined:1', 'twizted:1', 'felony:1', 'hubris:1', 'machd:1', # Extras 'extras:-40', 'trilogy:-40', ] def nameScore(name, year, preferred_words): """ Calculate score for words in the NZB name """ try: score = 0 name = name.lower() # give points for the cool stuff for value in name_scores: v = value.split(':') add = int(v.pop()) if v.pop() in name: score += add # points if the year is correct if str(year) in name: score += 5 # Contains preferred word nzb_words = re.split('\W+', simplifyString(name)) score += 100 * len(list(set(nzb_words) & set(preferred_words))) return score except: log.error('Failed doing nameScore: %s', traceback.format_exc()) return 0 def nameRatioScore(nzb_name, movie_name): try: nzb_words = re.split('\W+', fireEvent('scanner.create_file_identifier', nzb_name, single = True)) movie_words = re.split('\W+', simplifyString(movie_name)) left_over = set(nzb_words) - set(movie_words) return 10 - len(left_over) except: log.error('Failed doing nameRatioScore: %s', traceback.format_exc()) return 0 def namePositionScore(nzb_name, movie_name): score = 0 nzb_words = re.split('\W+', simplifyString(nzb_name)) qualities = fireEvent('quality.all', single = True) try: nzb_name = re.search(r'([\'"])[^\1]*\1', nzb_name).group(0) except: pass name_year = fireEvent('scanner.name_year', nzb_name, single = True) # Give points for movies beginning with the correct name split_by = simplifyString(movie_name) name_split = [] if len(split_by) > 0: name_split = simplifyString(nzb_name).split(split_by) if name_split[0].strip() == '': score += 10 # If year is second in line, give more points if len(name_split) > 1 and name_year: after_name = name_split[1].strip() if tryInt(after_name[:4]) == name_year.get('year', None): score += 10 after_name = after_name[4:] # Give -point to crap between year and quality found_quality = None for quality in qualities: # Main in words if quality['identifier'] in nzb_words: found_quality = quality['identifier'] # Alt in words for alt in quality['alternative']: if alt in nzb_words: found_quality = alt break if not found_quality: return score - 20 allowed = [] for value in name_scores: name, sc = value.split(':') allowed.append(name) inbetween = re.split('\W+', after_name.split(found_quality)[0].strip()) score -= (10 * len(set(inbetween) - set(allowed))) return score def sizeScore(size): return 0 if size else -20 def providerScore(provider): try: score = tryInt(Env.setting('extra_score', section = provider.lower(), default = 0)) except: score = 0 return score def duplicateScore(nzb_name, movie_name): try: nzb_words = re.split('\W+', simplifyString(nzb_name)) movie_words = re.split('\W+', simplifyString(movie_name)) # minus for duplicates duplicates = [x for i, x in enumerate(nzb_words) if nzb_words[i:].count(x) > 1] return len(list(set(duplicates) - set(movie_words))) * -4 except: log.error('Failed doing duplicateScore: %s', traceback.format_exc()) return 0 def partialIgnoredScore(nzb_name, movie_name, ignored_words): try: nzb_name = nzb_name.lower() movie_name = movie_name.lower() score = 0 for ignored_word in ignored_words: if ignored_word in nzb_name and ignored_word not in movie_name: score -= 5 return score except: log.error('Failed doing partialIgnoredScore: %s', traceback.format_exc()) return 0 def halfMultipartScore(nzb_name): try: wrong_found = 0 for nr in [1, 2, 3, 4, 5, 'i', 'ii', 'iii', 'iv', 'v', 'a', 'b', 'c', 'd', 'e']: for wrong in ['cd', 'part', 'dis', 'disc', 'dvd']: if '%s%s' % (wrong, nr) in nzb_name.lower(): wrong_found += 1 if wrong_found == 1: return -30 return 0 except: log.error('Failed doing halfMultipartScore: %s', traceback.format_exc()) return 0 def sceneScore(nzb_name): check_names = [nzb_name] # Match names between " try: check_names.append(re.search(r'([\'"])[^\1]*\1', nzb_name).group(0)) except: pass # Match longest name between [] try: check_names.append(max(re.findall(r'[^[]*\[([^]]*)\]', nzb_name), key = len).strip()) except: pass for name in check_names: # Strip twice, remove possible file extensions name = name.lower().strip(' "\'\.-_\[\]') name = re.sub('\.([a-z0-9]{0,4})$', '', name) name = name.strip(' "\'\.-_\[\]') # Make sure year and groupname is in there year = re.findall('(?P<year>19[0-9]{2}|20[0-9]{2})', name) group = re.findall('\-([a-z0-9]+)$', name) if len(year) > 0 and len(group) > 0: try: validate = fireEvent('release.validate', name, single = True) if validate and tryInt(validate.get('score')) != 0: log.debug('Release "%s" scored %s, reason: %s', (nzb_name, validate['score'], validate['reasons'])) return tryInt(validate.get('score')) except: log.error('Failed scoring scene: %s', traceback.format_exc()) return 0
6,753
Python
.py
165
32.624242
154
0.580408
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,954
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/score/main.py
from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import getTitle, splitString, removeDuplicate from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.score.scores import nameScore, nameRatioScore, \ sizeScore, providerScore, duplicateScore, partialIgnoredScore, namePositionScore, \ halfMultipartScore, sceneScore from couchpotato.environment import Env log = CPLog(__name__) class Score(Plugin): def __init__(self): addEvent('score.calculate', self.calculate) def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = removeDuplicate(preferred_words + splitString(movie['category']['preferred'].lower())) except: pass score = nameScore(toUnicode(nzb['name']), movie['info']['year'], preferred_words) for movie_title in movie['info']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie)) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = removeDuplicate(ignored_words + splitString(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
2,494
Python
.py
50
41.4
117
0.66488
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,955
index.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/category/index.py
from CodernityDB.tree_index import TreeBasedIndex class CategoryIndex(TreeBasedIndex): _version = 1 def __init__(self, *args, **kwargs): kwargs['key_format'] = 'i' super(CategoryIndex, self).__init__(*args, **kwargs) def make_key(self, key): return key def make_key_value(self, data): if data.get('_t') == 'category': return data.get('order', -99), None class CategoryMediaIndex(TreeBasedIndex): _version = 1 def __init__(self, *args, **kwargs): kwargs['key_format'] = '32s' super(CategoryMediaIndex, self).__init__(*args, **kwargs) def make_key(self, key): return str(key) def make_key_value(self, data): if data.get('_t') == 'media' and data.get('category_id'): return str(data.get('category_id')), None
836
Python
.py
21
32.619048
65
0.608696
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,956
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/category/main.py
import traceback from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from .index import CategoryIndex, CategoryMediaIndex log = CPLog(__name__) class CategoryPlugin(Plugin): _database = { 'category': CategoryIndex, 'category_media': CategoryMediaIndex, } def __init__(self): addApiView('category.save', self.save) addApiView('category.save_order', self.saveOrder) addApiView('category.delete', self.delete) addApiView('category.list', self.allView, docs = { 'desc': 'List all available categories', 'return': {'type': 'object', 'example': """{ 'success': True, 'categories': array, categories }"""} }) addEvent('category.all', self.all) def allView(self, **kwargs): return { 'success': True, 'categories': self.all() } def all(self): db = get_db() categories = db.all('category', with_doc = True) return [x['doc'] for x in categories] def save(self, **kwargs): try: db = get_db() category = { '_t': 'category', 'order': kwargs.get('order', 999), 'label': toUnicode(kwargs.get('label', '')), 'ignored': toUnicode(kwargs.get('ignored', '')), 'preferred': toUnicode(kwargs.get('preferred', '')), 'required': toUnicode(kwargs.get('required', '')), 'destination': toUnicode(kwargs.get('destination', '')), } try: c = db.get('id', kwargs.get('id')) category['order'] = c.get('order', category['order']) c.update(category) db.update(c) except: c = db.insert(category) c.update(category) return { 'success': True, 'category': c } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False, 'category': None } def saveOrder(self, **kwargs): try: db = get_db() order = 0 for category_id in kwargs.get('ids', []): c = db.get('id', category_id) c['order'] = order db.update(c) order += 1 return { 'success': True } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False } def delete(self, id = None, **kwargs): try: db = get_db() success = False message = '' try: c = db.get('id', id) db.delete(c) # Force defaults on all empty category movies self.removeFromMovie(id) success = True except: message = log.error('Failed deleting category: %s', traceback.format_exc()) return { 'success': success, 'message': message } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False } def removeFromMovie(self, category_id): try: db = get_db() movies = [x['doc'] for x in db.get_many('category_media', category_id, with_doc = True)] if len(movies) > 0: for movie in movies: movie['category_id'] = None db.update(movie) except: log.error('Failed: %s', traceback.format_exc())
3,963
Python
.py
114
23.026316
100
0.501442
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,957
index.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/release/index.py
from hashlib import md5 from CodernityDB.hash_index import HashIndex from CodernityDB.tree_index import TreeBasedIndex class ReleaseIndex(TreeBasedIndex): _version = 1 def __init__(self, *args, **kwargs): kwargs['key_format'] = '32s' super(ReleaseIndex, self).__init__(*args, **kwargs) def make_key(self, key): return key def make_key_value(self, data): if data.get('_t') == 'release' and data.get('media_id'): return data['media_id'], None class ReleaseStatusIndex(TreeBasedIndex): _version = 1 def __init__(self, *args, **kwargs): kwargs['key_format'] = '32s' super(ReleaseStatusIndex, self).__init__(*args, **kwargs) def make_key(self, key): return md5(key).hexdigest() def make_key_value(self, data): if data.get('_t') == 'release' and data.get('status'): return md5(data.get('status')).hexdigest(), {'media_id': data.get('media_id')} class ReleaseIDIndex(HashIndex): _version = 1 def __init__(self, *args, **kwargs): kwargs['key_format'] = '32s' super(ReleaseIDIndex, self).__init__(*args, **kwargs) def make_key(self, key): return md5(key).hexdigest() def make_key_value(self, data): if data.get('_t') == 'release' and data.get('identifier'): return md5(data.get('identifier')).hexdigest(), {'media_id': data.get('media_id')} class ReleaseDownloadIndex(HashIndex): _version = 2 def __init__(self, *args, **kwargs): kwargs['key_format'] = '32s' super(ReleaseDownloadIndex, self).__init__(*args, **kwargs) def make_key(self, key): return md5(key.lower()).hexdigest() def make_key_value(self, data): if data.get('_t') == 'release' and data.get('download_info') and data['download_info']['id'] and data['download_info']['downloader']: return md5(('%s-%s' % (data['download_info']['downloader'], data['download_info']['id'])).lower()).hexdigest(), None
2,015
Python
.py
43
39.790698
141
0.619682
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,958
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/release/main.py
from inspect import ismethod, isfunction import os import time import traceback from CodernityDB.database import RecordDeleted, RecordNotFound from couchpotato import md5, get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.helpers.encoding import toUnicode, sp from couchpotato.core.helpers.variable import getTitle, tryInt from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from .index import ReleaseIndex, ReleaseStatusIndex, ReleaseIDIndex, ReleaseDownloadIndex from couchpotato.environment import Env log = CPLog(__name__) class Release(Plugin): _database = { 'release': ReleaseIndex, 'release_status': ReleaseStatusIndex, 'release_identifier': ReleaseIDIndex, 'release_download': ReleaseDownloadIndex } def __init__(self): addApiView('release.manual_download', self.manualDownload, docs = { 'desc': 'Send a release manually to the downloaders', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addApiView('release.delete', self.deleteView, docs = { 'desc': 'Delete releases', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addApiView('release.ignore', self.ignore, docs = { 'desc': 'Toggle ignore, for bad or wrong releases', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addEvent('release.add', self.add) addEvent('release.download', self.download) addEvent('release.try_download_result', self.tryDownloadResult) addEvent('release.create_from_search', self.createFromSearch) addEvent('release.delete', self.delete) addEvent('release.clean', self.clean) addEvent('release.update_status', self.updateStatus) addEvent('release.with_status', self.withStatus) addEvent('release.for_media', self.forMedia) # Clean releases that didn't have activity in the last week addEvent('app.load', self.cleanDone, priority = 1000) fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12) def cleanDone(self): log.debug('Removing releases from dashboard') now = time.time() week = 604800 db = get_db() # Get (and remove) parentless releases releases = db.all('release', with_doc = False) media_exist = [] reindex = 0 for release in releases: if release.get('key') in media_exist: continue try: try: doc = db.get('id', release.get('_id')) except RecordDeleted: reindex += 1 continue db.get('id', release.get('key')) media_exist.append(release.get('key')) try: if doc.get('status') == 'ignore': doc['status'] = 'ignored' db.update(doc) except: log.error('Failed fixing mis-status tag: %s', traceback.format_exc()) except ValueError: fireEvent('database.delete_corrupted', release.get('key'), traceback_error = traceback.format_exc(0)) reindex += 1 except RecordDeleted: db.delete(doc) log.debug('Deleted orphaned release: %s', doc) reindex += 1 except: log.debug('Failed cleaning up orphaned releases: %s', traceback.format_exc()) if reindex > 0: db.reindex() del media_exist # get movies last_edit more than a week ago medias = fireEvent('media.with_status', ['done', 'active'], single = True) for media in medias: if media.get('last_edit', 0) > (now - week): continue for rel in self.forMedia(media['_id']): # Remove all available releases if rel['status'] in ['available']: self.delete(rel['_id']) # Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the media elif rel['status'] in ['snatched', 'downloaded']: self.updateStatus(rel['_id'], status = 'ignored') if 'recent' in media.get('tags', []): fireEvent('media.untag', media.get('_id'), 'recent', single = True) def add(self, group, update_info = True, update_id = None): try: db = get_db() release_identifier = '%s.%s.%s' % (group['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier']) # Add movie if it doesn't exist try: media = db.get('media', 'imdb-%s' % group['identifier'], with_doc = True)['doc'] except: media = fireEvent('movie.add', params = { 'identifier': group['identifier'], 'profile_id': None, }, search_after = False, update_after = update_info, notify_after = False, status = 'done', single = True) release = None if update_id: try: release = db.get('id', update_id) release.update({ 'identifier': release_identifier, 'last_edit': int(time.time()), 'status': 'done', }) except: log.error('Failed updating existing release: %s', traceback.format_exc()) else: # Add Release if not release: release = { '_t': 'release', 'media_id': media['_id'], 'identifier': release_identifier, 'quality': group['meta_data']['quality'].get('identifier'), 'is_3d': group['meta_data']['quality'].get('is_3d', 0), 'last_edit': int(time.time()), 'status': 'done' } try: r = db.get('release_identifier', release_identifier, with_doc = True)['doc'] r['media_id'] = media['_id'] except: log.debug('Failed updating release by identifier "%s". Inserting new.', release_identifier) r = db.insert(release) # Update with ref and _id release.update({ '_id': r['_id'], '_rev': r['_rev'], }) # Empty out empty file groups release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v) db.update(release) fireEvent('media.restatus', media['_id'], allowed_restatus = ['done'], single = True) return True except: log.error('Failed: %s', traceback.format_exc()) return False def deleteView(self, id = None, **kwargs): return { 'success': self.delete(id) } def delete(self, release_id): try: db = get_db() rel = db.get('id', release_id) db.delete(rel) return True except RecordDeleted: log.debug('Already deleted: %s', release_id) return True except: log.error('Failed: %s', traceback.format_exc()) return False def clean(self, release_id): try: db = get_db() rel = db.get('id', release_id) raw_files = rel.get('files') if len(raw_files) == 0: self.delete(rel['_id']) else: files = {} for file_type in raw_files: for release_file in raw_files.get(file_type, []): if os.path.isfile(sp(release_file)): if file_type not in files: files[file_type] = [] files[file_type].append(release_file) rel['files'] = files db.update(rel) return True except: log.error('Failed: %s', traceback.format_exc()) return False def ignore(self, id = None, **kwargs): db = get_db() try: if id: rel = db.get('id', id, with_doc = True) self.updateStatus(id, 'available' if rel['status'] in ['ignored', 'failed'] else 'ignored') return { 'success': True } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False } def manualDownload(self, id = None, **kwargs): db = get_db() try: release = db.get('id', id) item = release['info'] movie = db.get('id', release['media_id']) fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name']) # Get matching provider provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) if item.get('protocol') != 'torrent_magnet': item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download success = self.download(data = item, media = movie, manual = True) if success: fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name']) return { 'success': success == True } except: log.error('Couldn\'t find release with id: %s: %s', (id, traceback.format_exc())) return { 'success': False } def download(self, data, media, manual = False): # Test to see if any downloaders are enabled for this type downloader_enabled = fireEvent('download.enabled', manual, data, single = True) if not downloader_enabled: log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol')) return False # Download NZB or torrent file filedata = None if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))): try: filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id')) except: log.error('Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc())) return False if filedata == 'try_next': return filedata elif not filedata: return False # Send NZB or torrent file to downloader download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True) if not download_result: log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol')) return False log.debug('Downloader result: %s', download_result) try: db = get_db() try: rls = db.get('release_identifier', md5(data['url']), with_doc = True)['doc'] except: log.error('No release found to store download information in') return False renamer_enabled = Env.setting('enabled', 'renamer') # Save download-id info if returned if isinstance(download_result, dict): rls['download_info'] = download_result db.update(rls) log_movie = '%s (%s) in %s' % (getTitle(media), media['info'].get('year'), rls['quality']) snatch_message = 'Snatched "%s": %s from %s' % (data.get('name'), log_movie, (data.get('provider', '') + data.get('provider_extra', ''))) log.info(snatch_message) fireEvent('%s.snatched' % data['type'], message = snatch_message, data = media) # Mark release as snatched if renamer_enabled: self.updateStatus(rls['_id'], status = 'snatched') # If renamer isn't used, mark media done if finished or release downloaded else: if media['status'] == 'active': profile = db.get('id', media['profile_id']) if fireEvent('quality.isfinish', {'identifier': rls['quality'], 'is_3d': rls.get('is_3d', False)}, profile, single = True): log.info('Renamer disabled, marking media as finished: %s', log_movie) # Mark release done self.updateStatus(rls['_id'], status = 'done') # Mark media done fireEvent('media.restatus', media['_id'], single = True) return True # Assume release downloaded self.updateStatus(rls['_id'], status = 'downloaded') except: log.error('Failed storing download status: %s', traceback.format_exc()) return False return True def tryDownloadResult(self, results, media, quality_custom): wait_for = False let_through = False filtered_results = [] minimum_seeders = tryInt(Env.setting('minimum_seeders', section = 'torrent', default = 1)) # Filter out ignored and other releases we don't want for rel in results: if rel['status'] in ['ignored', 'failed']: log.info('Ignored: %s', rel['name']) continue if rel['score'] < quality_custom.get('minimum_score'): log.info('Ignored, score "%s" too low, need at least "%s": %s', (rel['score'], quality_custom.get('minimum_score'), rel['name'])) continue if rel['size'] <= 50: log.info('Ignored, size "%sMB" too low: %s', (rel['size'], rel['name'])) continue if 'seeders' in rel and rel.get('seeders') < minimum_seeders: log.info('Ignored, not enough seeders, has %s needs %s: %s', (rel.get('seeders'), minimum_seeders, rel['name'])) continue # If a single release comes through the "wait for", let through all rel['wait_for'] = False if quality_custom.get('index') != 0 and quality_custom.get('wait_for', 0) > 0 and rel.get('age') <= quality_custom.get('wait_for', 0): rel['wait_for'] = True else: let_through = True filtered_results.append(rel) # Loop through filtered results for rel in filtered_results: # Only wait if not a single release is old enough if rel.get('wait_for') and not let_through: log.info('Ignored, waiting %s days: %s', (quality_custom.get('wait_for') - rel.get('age'), rel['name'])) wait_for = True continue downloaded = fireEvent('release.download', data = rel, media = media, single = True) if downloaded is True: return True elif downloaded != 'try_next': break return wait_for def createFromSearch(self, search_results, media, quality): try: db = get_db() found_releases = [] is_3d = False try: is_3d = quality['custom']['3d'] except: pass for rel in search_results: rel_identifier = md5(rel['url']) release = { '_t': 'release', 'identifier': rel_identifier, 'media_id': media.get('_id'), 'quality': quality.get('identifier'), 'is_3d': is_3d, 'status': rel.get('status', 'available'), 'last_edit': int(time.time()), 'info': {} } # Add downloader info if provided try: release['download_info'] = rel['download_info'] del rel['download_info'] except: pass try: rls = db.get('release_identifier', rel_identifier, with_doc = True)['doc'] except: rls = db.insert(release) rls.update(release) # Update info, but filter out functions for info in rel: try: if not isinstance(rel[info], (str, unicode, int, long, float)): continue rls['info'][info] = toUnicode(rel[info]) if isinstance(rel[info], (str, unicode)) else rel[info] except: log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc())) db.update(rls) # Update release in search_results rel['status'] = rls.get('status') if rel['status'] == 'available': found_releases.append(rel_identifier) return found_releases except: log.error('Failed: %s', traceback.format_exc()) return [] def updateStatus(self, release_id, status = None): if not status: return False try: db = get_db() rel = db.get('id', release_id) if rel and rel.get('status') != status: release_name = None if rel.get('files'): for file_type in rel.get('files', {}): if file_type == 'movie': for release_file in rel['files'][file_type]: release_name = os.path.basename(release_file) break if not release_name and rel.get('info'): release_name = rel['info'].get('name') #update status in Db log.debug('Marking release %s as %s', (release_name, status)) rel['status'] = status rel['last_edit'] = int(time.time()) db.update(rel) #Update all movie info as there is no release update function fireEvent('notify.frontend', type = 'release.update_status', data = rel) return True except: log.error('Failed: %s', traceback.format_exc()) return False def withStatus(self, status, with_doc = True): db = get_db() status = list(status if isinstance(status, (list, tuple)) else [status]) for s in status: for ms in db.get_many('release_status', s): if with_doc: try: doc = db.get('id', ms['_id']) yield doc except RecordNotFound: log.debug('Record not found, skipping: %s', ms['_id']) else: yield ms def forMedia(self, media_id): db = get_db() raw_releases = db.get_many('release', media_id) releases = [] for r in raw_releases: try: doc = db.get('id', r.get('_id')) releases.append(doc) except RecordDeleted: pass except (ValueError, EOFError): fireEvent('database.delete_corrupted', r.get('_id'), traceback_error = traceback.format_exc(0)) releases = sorted(releases, key = lambda k: k.get('info', {}).get('score', 0), reverse = True) # Sort based on preferred search method download_preference = self.conf('preferred_method', section = 'searcher') if download_preference != 'both': releases = sorted(releases, key = lambda k: k.get('info', {}).get('protocol', '')[:3], reverse = (download_preference == 'torrent')) return releases or []
20,736
Python
.py
435
33.177011
156
0.519536
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,959
index.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/profile/index.py
from CodernityDB.tree_index import TreeBasedIndex class ProfileIndex(TreeBasedIndex): _version = 1 def __init__(self, *args, **kwargs): kwargs['key_format'] = 'i' super(ProfileIndex, self).__init__(*args, **kwargs) def make_key(self, key): return key def make_key_value(self, data): if data.get('_t') == 'profile': return data.get('order', 99), None
415
Python
.py
11
30.818182
59
0.614035
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,960
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/profile/main.py
import traceback from couchpotato import get_db, tryInt from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from .index import ProfileIndex log = CPLog(__name__) class ProfilePlugin(Plugin): _database = { 'profile': ProfileIndex } def __init__(self): addEvent('profile.all', self.all) addEvent('profile.default', self.default) addApiView('profile.save', self.save) addApiView('profile.save_order', self.saveOrder) addApiView('profile.delete', self.delete) addApiView('profile.list', self.allView, docs = { 'desc': 'List all available profiles', 'return': {'type': 'object', 'example': """{ 'success': True, 'list': array, profiles }"""} }) addEvent('app.initialize', self.fill, priority = 90) addEvent('app.load', self.forceDefaults, priority = 110) def forceDefaults(self): db = get_db() # Fill qualities and profiles if they are empty somehow.. if db.count(db.all, 'profile') == 0: if db.count(db.all, 'quality') == 0: fireEvent('quality.fill', single = True) self.fill() # Get all active movies without profile try: medias = fireEvent('media.with_status', 'active', single = True) profile_ids = [x.get('_id') for x in self.all()] default_id = profile_ids[0] for media in medias: if media.get('profile_id') not in profile_ids: media['profile_id'] = default_id db.update(media) except: log.error('Failed: %s', traceback.format_exc()) # Cleanup profiles that have empty qualites profiles = self.all() for profile in profiles: try: if '' in profile.get('qualities') or '-1' in profile.get('qualities'): log.warning('Found profile with empty qualities, cleaning it up') p = db.get('id', profile.get('_id')) p['qualities'] = [x for x in p['qualities'] if (x != '' and x != '-1')] db.update(p) except: log.error('Failed: %s', traceback.format_exc()) def allView(self, **kwargs): return { 'success': True, 'list': self.all() } def all(self): db = get_db() profiles = db.all('profile', with_doc = True) return [x['doc'] for x in profiles] def save(self, **kwargs): try: db = get_db() profile = { '_t': 'profile', 'label': toUnicode(kwargs.get('label')), 'order': tryInt(kwargs.get('order', 999)), 'core': kwargs.get('core', False), 'minimum_score': tryInt(kwargs.get('minimum_score', 1)), 'qualities': [], 'wait_for': [], 'stop_after': [], 'finish': [], '3d': [] } # Update types order = 0 for type in kwargs.get('types', []): profile['qualities'].append(type.get('quality')) profile['wait_for'].append(tryInt(kwargs.get('wait_for', 0))) profile['stop_after'].append(tryInt(kwargs.get('stop_after', 0))) profile['finish'].append((tryInt(type.get('finish')) == 1) if order > 0 else True) profile['3d'].append(tryInt(type.get('3d'))) order += 1 id = kwargs.get('id') try: p = db.get('id', id) profile['order'] = tryInt(kwargs.get('order', p.get('order', 999))) except: p = db.insert(profile) p.update(profile) db.update(p) return { 'success': True, 'profile': p } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False } def default(self): db = get_db() return list(db.all('profile', limit = 1, with_doc = True))[0]['doc'] def saveOrder(self, **kwargs): try: db = get_db() order = 0 for profile_id in kwargs.get('ids', []): p = db.get('id', profile_id) p['hide'] = tryInt(kwargs.get('hidden')[order]) == 1 p['order'] = order db.update(p) order += 1 return { 'success': True } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False } def delete(self, id = None, **kwargs): try: db = get_db() success = False message = '' try: p = db.get('id', id) db.delete(p) # Force defaults on all empty profile movies self.forceDefaults() success = True except Exception as e: message = log.error('Failed deleting Profile: %s', e) return { 'success': success, 'message': message } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False } def fill(self): try: db = get_db() profiles = [{ 'label': 'Best', 'qualities': ['720p', '1080p', 'brrip', 'dvdrip'] }, { 'label': 'HD', 'qualities': ['720p', '1080p'] }, { 'label': 'SD', 'qualities': ['dvdrip', 'dvdr'] }, { 'label': 'Prefer 3D HD', 'qualities': ['1080p', '720p', '720p', '1080p'], '3d': [True, True] }, { 'label': '3D HD', 'qualities': ['1080p', '720p'], '3d': [True, True] }, { 'label': 'UHD 4K', 'qualities': ['720p', '1080p', '2160p'] }] # Create default quality profile order = 0 for profile in profiles: log.info('Creating default profile: %s', profile.get('label')) pro = { '_t': 'profile', 'label': toUnicode(profile.get('label')), 'order': order, 'qualities': profile.get('qualities'), 'minimum_score': 1, 'finish': [], 'wait_for': [], 'stop_after': [], '3d': [] } threed = profile.get('3d', []) for q in profile.get('qualities'): pro['finish'].append(True) pro['wait_for'].append(0) pro['stop_after'].append(0) pro['3d'].append(threed.pop() if threed else False) db.insert(pro) order += 1 return True except: log.error('Failed: %s', traceback.format_exc()) return False
7,558
Python
.py
201
24.283582
98
0.464731
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,961
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/log/main.py
import os import re import traceback from couchpotato.api import addApiView from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env log = CPLog(__name__) class Logging(Plugin): def __init__(self): addApiView('logging.get', self.get, docs = { 'desc': 'Get the full log file by number', 'params': { 'nr': {'desc': 'Number of the log to get.'} }, 'return': {'type': 'object', 'example': """{ 'success': True, 'log': [{ 'time': '03-12 09:12:59', 'type': 'INFO', 'message': 'Log message' }, ..], //Log file 'total': int, //Total log files available }"""} }) addApiView('logging.partial', self.partial, docs = { 'desc': 'Get a partial log', 'params': { 'type': {'desc': 'Type of log', 'type': 'string: all(default), error, info, debug'}, 'lines': {'desc': 'Number of lines. Last to first. Default 30'}, }, 'return': {'type': 'object', 'example': """{ 'success': True, 'log': [{ 'time': '03-12 09:12:59', 'type': 'INFO', 'message': 'Log message' }, ..] }"""} }) addApiView('logging.clear', self.clear, docs = { 'desc': 'Remove all the log files' }) addApiView('logging.log', self.log, docs = { 'desc': 'Log errors', 'params': { 'type': {'desc': 'Type of logging, default "error"'}, '**kwargs': {'type': 'object', 'desc': 'All other params will be printed in the log string.'}, } }) def get(self, nr = 0, **kwargs): nr = tryInt(nr) current_path = None total = 1 for x in range(0, 50): path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '') # Check see if the log exists if not os.path.isfile(path): total = x - 1 break # Set current path if x is nr: current_path = path log_content = '' if current_path: f = open(current_path, 'r') log_content = f.read() logs = self.toList(log_content) return { 'success': True, 'log': logs, 'total': total, } def partial(self, type = 'all', lines = 30, offset = 0, **kwargs): total_lines = tryInt(lines) offset = tryInt(offset) log_lines = [] for x in range(0, 50): path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '') # Check see if the log exists if not os.path.isfile(path): break f = open(path, 'r') log_content = toUnicode(f.read()) raw_lines = self.toList(log_content) raw_lines.reverse() brk = False for line in raw_lines: if type == 'all' or line.get('type') == type.upper(): log_lines.append(line) if len(log_lines) >= (total_lines + offset): brk = True break if brk: break log_lines = log_lines[offset:] log_lines.reverse() return { 'success': True, 'log': log_lines, } def toList(self, log_content = ''): logs_raw = re.split(r'\[0m\n', toUnicode(log_content)) logs = [] re_split = r'\x1b' for log_line in logs_raw: split = re.split(re_split, log_line) if split and len(split) == 3: try: date, time, log_type = splitString(split[0], ' ') timestamp = '%s %s' % (date, time) except: timestamp = 'UNKNOWN' log_type = 'UNKNOWN' message = ''.join(split[1]) if len(split) > 1 else split[0] message = re.sub('\[\d+m\[', '[', message) logs.append({ 'time': timestamp, 'type': log_type, 'message': message }) return logs def clear(self, **kwargs): for x in range(0, 50): path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '') if not os.path.isfile(path): continue try: # Create empty file for current logging if x is 0: self.createFile(path, '') else: os.remove(path) except: log.error('Couldn\'t delete file "%s": %s', (path, traceback.format_exc())) return { 'success': True } def log(self, type = 'error', **kwargs): try: log_message = 'API log: %s' % kwargs try: getattr(log, type)(log_message) except: log.error(log_message) except: log.error('Couldn\'t log via API: %s', kwargs) return { 'success': True }
5,403
Python
.py
152
23.815789
110
0.469662
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,962
index.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/quality/index.py
from hashlib import md5 from CodernityDB.hash_index import HashIndex class QualityIndex(HashIndex): _version = 1 def __init__(self, *args, **kwargs): kwargs['key_format'] = '32s' super(QualityIndex, self).__init__(*args, **kwargs) def make_key(self, key): return md5(key).hexdigest() def make_key_value(self, data): if data.get('_t') == 'quality' and data.get('identifier'): return md5(data.get('identifier')).hexdigest(), None
494
Python
.py
12
34.666667
66
0.638655
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,963
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/plugins/quality/main.py
from math import fabs, ceil import traceback import re from CodernityDB.database import RecordNotFound from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode, ss from couchpotato.core.helpers.variable import mergeDicts, getExt, tryInt, splitString, tryFloat from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.core.plugins.quality.index import QualityIndex log = CPLog(__name__) class QualityPlugin(Plugin): _database = { 'quality': QualityIndex } qualities = [ {'identifier': '2160p', 'hd': True, 'allow_3d': True, 'size': (10000, 650000), 'median_size': 20000, 'label': '2160p', 'width': 3840, 'height': 2160, 'alternative': [], 'allow': [], 'ext':['mkv'], 'tags': ['x264', 'h264', '2160']}, {'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'median_size': 40000, 'label': 'BR-Disk', 'alternative': ['bd25', ('br', 'disk')], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']}, {'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'median_size': 10000, 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts', 'ts'], 'tags': ['m2ts', 'x264', 'h264', '1080']}, {'identifier': '720p', 'hd': True, 'allow_3d': True, 'size': (3000, 10000), 'median_size': 5500, 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264', '720']}, {'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'median_size': 2000, 'label': 'BR-Rip', 'alternative': ['bdrip', ('br', 'rip'), 'hdtv', 'hdrip'], 'allow': ['720p', '1080p', '2160p'], 'ext':['mp4', 'avi'], 'tags': ['webdl', ('web', 'dl')]}, {'identifier': 'dvdr', 'size': (3000, 10000), 'median_size': 4500, 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']}, {'identifier': 'dvdrip', 'size': (600, 2400), 'median_size': 1500, 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':['avi'], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]}, {'identifier': 'scr', 'size': (600, 1600), 'median_size': 700, 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr', 'webrip', ('web', 'rip')], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': []}, {'identifier': 'r5', 'size': (600, 1000), 'median_size': 700, 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr', '720p', '1080p'], 'ext':[]}, {'identifier': 'tc', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': ['720p', '1080p'], 'ext':[]}, {'identifier': 'ts', 'size': (600, 1000), 'median_size': 700, 'label': 'TeleSync', 'alternative': ['telesync', 'hdts'], 'allow': ['720p', '1080p'], 'ext':[]}, {'identifier': 'cam', 'size': (600, 1000), 'median_size': 700, 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': ['720p', '1080p'], 'ext':[]} ] pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr'] threed_tags = { 'sbs': [('half', 'sbs'), 'hsbs', ('full', 'sbs'), 'fsbs'], 'ou': [('half', 'ou'), 'hou', ('full', 'ou'), 'fou'], '3d': ['2d3d', '3d2d', '3d'], } cached_qualities = None cached_order = None def __init__(self): addEvent('quality.all', self.all) addEvent('quality.single', self.single) addEvent('quality.guess', self.guess) addEvent('quality.pre_releases', self.preReleases) addEvent('quality.order', self.getOrder) addEvent('quality.ishigher', self.isHigher) addEvent('quality.isfinish', self.isFinish) addEvent('quality.fill', self.fill) addApiView('quality.size.save', self.saveSize) addApiView('quality.list', self.allView, docs = { 'desc': 'List all available qualities', 'return': {'type': 'object', 'example': """{ 'success': True, 'list': array, qualities }"""} }) addEvent('app.initialize', self.fill, priority = 10) addEvent('app.load', self.fillBlank, priority = 120) addEvent('app.test', self.doTest) self.order = [] self.addOrder() def addOrder(self): self.order = [] for q in self.qualities: self.order.append(q.get('identifier')) def getOrder(self): return self.order def preReleases(self): return self.pre_releases def allView(self, **kwargs): return { 'success': True, 'list': self.all() } def all(self): if self.cached_qualities: return self.cached_qualities db = get_db() temp = [] for quality in self.qualities: quality_doc = db.get('quality', quality.get('identifier'), with_doc = True)['doc'] q = mergeDicts(quality, quality_doc) temp.append(q) if len(temp) == len(self.qualities): self.cached_qualities = temp return temp def single(self, identifier = ''): db = get_db() quality_dict = {} try: quality = db.get('quality', identifier, with_doc = True)['doc'] except RecordNotFound: log.error("Unable to find '%s' in the quality DB", indentifier) quality = None if quality: quality_dict = mergeDicts(self.getQuality(quality['identifier']), quality) return quality_dict def getQuality(self, identifier): for q in self.qualities: if identifier == q.get('identifier'): return q def saveSize(self, **kwargs): try: db = get_db() quality = db.get('quality', kwargs.get('identifier'), with_doc = True) if quality: quality['doc'][kwargs.get('value_type')] = tryInt(kwargs.get('value')) db.update(quality['doc']) self.cached_qualities = None return { 'success': True } except: log.error('Failed: %s', traceback.format_exc()) return { 'success': False } def fillBlank(self): db = get_db() try: existing = list(db.all('quality')) if len(self.qualities) > len(existing): log.error('Filling in new qualities') self.fill(reorder = True) except: log.error('Failed filling quality database with new qualities: %s', traceback.format_exc()) def fill(self, reorder = False): try: db = get_db() order = 0 for q in self.qualities: existing = None try: existing = db.get('quality', q.get('identifier'), with_doc = reorder) except RecordNotFound: pass if not existing: db.insert({ '_t': 'quality', 'order': order, 'identifier': q.get('identifier'), 'size_min': tryInt(q.get('size')[0]), 'size_max': tryInt(q.get('size')[1]), }) log.info('Creating profile: %s', q.get('label')) db.insert({ '_t': 'profile', 'order': order + 20, # Make sure it goes behind other profiles 'core': True, 'qualities': [q.get('identifier')], 'label': toUnicode(q.get('label')), 'finish': [True], 'wait_for': [0], }) elif reorder: log.info2('Updating quality order') existing['doc']['order'] = order db.update(existing['doc']) order += 1 return True except: log.error('Failed: %s', traceback.format_exc()) return False def guess(self, files, extra = None, size = None, use_cache = True): if not extra: extra = {} # Create hash for cache cache_key = str([f.replace('.' + getExt(f), '') if len(getExt(f)) < 4 else f for f in files]) if use_cache: cached = self.getCache(cache_key) if cached and len(extra) == 0: return cached qualities = self.all() # Start with 0 score = {} for quality in qualities: score[quality.get('identifier')] = { 'score': 0, '3d': {} } # Use metadata titles as extra check if extra and extra.get('titles'): files.extend(extra.get('titles')) for cur_file in files: words = re.split('\W+', cur_file.lower()) name_year = fireEvent('scanner.name_year', cur_file, file_name = cur_file, single = True) threed_words = words if name_year and name_year.get('name'): split_name = splitString(name_year.get('name'), ' ') threed_words = [x for x in words if x not in split_name] for quality in qualities: contains_score = self.containsTagScore(quality, words, cur_file) threedscore = self.contains3D(quality, threed_words, cur_file) if quality.get('allow_3d') else (0, None) self.calcScore(score, quality, contains_score, threedscore, penalty = contains_score) size_scores = [] for quality in qualities: # Evaluate score based on size size_score = self.guessSizeScore(quality, size = size) loose_score = self.guessLooseScore(quality, extra = extra) if size_score > 0: size_scores.append(quality) self.calcScore(score, quality, size_score + loose_score) # Add additional size score if only 1 size validated if len(size_scores) == 1: self.calcScore(score, size_scores[0], 7) del size_scores # Return nothing if all scores are <= 0 has_non_zero = 0 for s in score: if score[s]['score'] > 0: has_non_zero += 1 if not has_non_zero: return None heighest_quality = max(score, key = lambda p: score[p]['score']) if heighest_quality: for quality in qualities: if quality.get('identifier') == heighest_quality: quality['is_3d'] = False if score[heighest_quality].get('3d'): quality['is_3d'] = True return self.setCache(cache_key, quality) return None def containsTagScore(self, quality, words, cur_file = ''): cur_file = ss(cur_file) score = 0.0 extension = words[-1] words = words[:-1] points = { 'identifier': 25, 'label': 25, 'alternative': 20, 'tags': 11, 'ext': 5, } scored_on = [] # Check alt and tags for tag_type in ['identifier', 'alternative', 'tags', 'label']: qualities = quality.get(tag_type, []) qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities for alt in qualities: if isinstance(alt, tuple): if len(set(words) & set(alt)) == len(alt): log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) score += points.get(tag_type) if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words and ss(alt.lower()) not in scored_on: log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) score += points.get(tag_type) # Don't score twice on same tag scored_on.append(ss(alt).lower()) # Check extension for ext in quality.get('ext', []): if ext == extension: log.debug('Found %s with .%s extension in %s', (quality['identifier'], ext, cur_file)) score += points['ext'] return score def contains3D(self, quality, words, cur_file = ''): cur_file = ss(cur_file) for key in self.threed_tags: tags = self.threed_tags.get(key, []) for tag in tags: if isinstance(tag, tuple): if len(set(words) & set(tag)) == len(tag): log.debug('Found %s in %s', (tag, cur_file)) return 1, key elif tag in words: log.debug('Found %s in %s', (tag, cur_file)) return 1, key return 0, None def guessLooseScore(self, quality, extra = None): score = 0 if extra: # Check width resolution, range 20 if quality.get('width') and (quality.get('width') - 20) <= extra.get('resolution_width', 0) <= (quality.get('width') + 20): log.debug('Found %s via resolution_width: %s == %s', (quality['identifier'], quality.get('width'), extra.get('resolution_width', 0))) score += 10 # Check height resolution, range 20 if quality.get('height') and (quality.get('height') - 20) <= extra.get('resolution_height', 0) <= (quality.get('height') + 20): log.debug('Found %s via resolution_height: %s == %s', (quality['identifier'], quality.get('height'), extra.get('resolution_height', 0))) score += 5 if quality.get('identifier') == 'dvdrip' and 480 <= extra.get('resolution_width', 0) <= 720: log.debug('Add point for correct dvdrip resolutions') score += 1 return score def guessSizeScore(self, quality, size = None): score = 0 if size: size = tryFloat(size) size_min = tryFloat(quality['size_min']) size_max = tryFloat(quality['size_max']) if size_min <= size <= size_max: log.debug('Found %s via release size: %s MB < %s MB < %s MB', (quality['identifier'], size_min, size, size_max)) proc_range = size_max - size_min size_diff = size - size_min size_proc = (size_diff / proc_range) median_diff = quality['median_size'] - size_min median_proc = (median_diff / proc_range) max_points = 8 score += ceil(max_points - (fabs(size_proc - median_proc) * max_points)) else: score -= 5 return score def calcScore(self, score, quality, add_score, threedscore = (0, None), penalty = 0): score[quality['identifier']]['score'] += add_score threedscore, threedtag = threedscore if threedscore and threedtag: if threedscore not in score[quality['identifier']]['3d']: score[quality['identifier']]['3d'][threedtag] = 0 score[quality['identifier']]['3d'][threedtag] += threedscore # Set order for allow calculation (and cache) if not self.cached_order: self.cached_order = {} for q in self.qualities: self.cached_order[q.get('identifier')] = self.qualities.index(q) if penalty and add_score != 0: for allow in quality.get('allow', []): score[allow]['score'] -= ((penalty * 2) if self.cached_order[allow] < self.cached_order[quality['identifier']] else penalty) * 2 # Give panelty for all other qualities for q in self.qualities: if quality.get('identifier') != q.get('identifier') and score.get(q.get('identifier')): score[q.get('identifier')]['score'] -= 1 def isFinish(self, quality, profile, release_age = 0): if not isinstance(profile, dict) or not profile.get('qualities'): # No profile so anything (scanned) is good enough return True try: index = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else False) == bool(quality.get('is_3d', False))][0] if index == 0 or (profile['finish'][index] and int(release_age) >= int(profile.get('stop_after', [0])[0])): return True return False except: return False def isHigher(self, quality, compare_with, profile = None): if not isinstance(profile, dict) or not profile.get('qualities'): profile = fireEvent('profile.default', single = True) # Try to find quality in profile, if not found: a quality we do not want is lower than anything else try: quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0] except: log.debug('Quality %s not found in profile identifiers %s', (quality['identifier'] + (' 3D' if quality.get('is_3d', 0) else ''), \ [identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])])) return 'lower' # Try to find compare quality in profile, if not found: anything is higher than a not wanted quality try: compare_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == compare_with['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(compare_with.get('is_3d', 0))][0] except: log.debug('Compare quality %s not found in profile identifiers %s', (compare_with['identifier'] + (' 3D' if compare_with.get('is_3d', 0) else ''), \ [identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])])) return 'higher' # Note to self: a lower number means higher quality if quality_order > compare_order: return 'lower' elif quality_order == compare_order: return 'equal' else: return 'higher' def doTest(self): tests = { 'Movie Name (1999)-DVD-Rip.avi': {'size': 700, 'quality': 'dvdrip'}, 'Movie Name 1999 720p Bluray.mkv': {'size': 4200, 'quality': '720p'}, 'Movie Name 1999 BR-Rip 720p.avi': {'size': 1000, 'quality': 'brrip'}, 'Movie Name 1999 720p Web Rip.avi': {'size': 1200, 'quality': 'scr'}, 'Movie Name 1999 Web DL.avi': {'size': 800, 'quality': 'brrip'}, 'Movie.Name.1999.1080p.WEBRip.H264-Group': {'size': 1500, 'quality': 'scr'}, 'Movie.Name.1999.DVDRip-Group': {'size': 750, 'quality': 'dvdrip'}, 'Movie.Name.1999.DVD-Rip-Group': {'size': 700, 'quality': 'dvdrip'}, 'Movie.Name.1999.DVD-R-Group': {'size': 4500, 'quality': 'dvdr'}, 'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': {'size': 5500, 'quality': '720p'}, 'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': {'size': 8500, 'extra': {'resolution_width': 1920, 'resolution_height': 1080} , 'quality': '1080p'}, 'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': {'size': 8000, 'quality': '1080p'}, 'Movie.Name.2013.BR-Disk-Group.iso': {'size': 48000, 'quality': 'bd50'}, 'Movie.Name.2013.2D+3D.BR-Disk-Group.iso': {'size': 52000, 'quality': 'bd50', 'is_3d': True}, 'Movie.Rising.Name.Girl.2011.NTSC.DVD9-GroupDVD': {'size': 7200, 'quality': 'dvdr'}, 'Movie Name (2013) 2D + 3D': {'size': 49000, 'quality': 'bd50', 'is_3d': True}, 'Movie Monuments 2013 BrRip 1080p': {'size': 1800, 'quality': 'brrip'}, 'Movie Monuments 2013 BrRip 720p': {'size': 1300, 'quality': 'brrip'}, 'The.Movie.2014.3D.1080p.BluRay.AVC.DTS-HD.MA.5.1-GroupName': {'size': 30000, 'quality': 'bd50', 'is_3d': True}, '/home/namehou/Movie Monuments (2012)/Movie Monuments.mkv': {'size': 5500, 'quality': '720p', 'is_3d': False}, '/home/namehou/Movie Monuments (2012)/Movie Monuments Full-OU.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True}, '/home/namehou/Movie Monuments (2013)/Movie Monuments.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': False}, '/home/namehou/Movie Monuments (2013)/Movie Monuments Full-OU.mkv': {'size': 10000, 'quality': '1080p', 'is_3d': True}, '/volume1/Public/3D/Moviename/Moviename (2009).3D.SBS.ts': {'size': 7500, 'quality': '1080p', 'is_3d': True}, '/volume1/Public/Moviename/Moviename (2009).ts': {'size': 7500, 'quality': '1080p'}, '/movies/BluRay HDDVD H.264 MKV 720p EngSub/QuiQui le fou (criterion collection #123, 1915)/QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'}, 'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'}, 'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) half-sbs 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True}, 'Moviename 2014 720p HDCAM XviD DualAudio': {'size': 4000, 'quality': 'cam'}, 'Moviename (2014) - 720p CAM x264': {'size': 2250, 'quality': 'cam'}, 'Movie Name (2014).mp4': {'size': 750, 'quality': 'brrip'}, 'Moviename.2014.720p.R6.WEB-DL.x264.AC3-xyz': {'size': 750, 'quality': 'r5'}, 'Movie name 2014 New Source 720p HDCAM x264 AC3 xyz': {'size': 750, 'quality': 'cam'}, 'Movie.Name.2014.720p.HD.TS.AC3.x264': {'size': 750, 'quality': 'ts'}, 'Movie.Name.2014.1080p.HDrip.x264.aac-ReleaseGroup': {'size': 7000, 'quality': 'brrip'}, 'Movie.Name.2014.HDCam.Chinese.Subs-ReleaseGroup': {'size': 15000, 'quality': 'cam'}, 'Movie Name 2014 HQ DVDRip X264 AC3 (bla)': {'size': 0, 'quality': 'dvdrip'}, 'Movie Name1 (2012).mkv': {'size': 4500, 'quality': '720p'}, 'Movie Name (2013).mkv': {'size': 8500, 'quality': '1080p'}, 'Movie Name (2014).mkv': {'size': 4500, 'quality': '720p', 'extra': {'titles': ['Movie Name 2014 720p Bluray']}}, 'Movie Name (2015).mkv': {'size': 500, 'quality': '1080p', 'extra': {'resolution_width': 1920}}, 'Movie Name (2015).mp4': {'size': 6500, 'quality': 'brrip'}, 'Movie Name.2014.720p Web-Dl Aac2.0 h264-ReleaseGroup': {'size': 3800, 'quality': 'brrip'}, 'Movie Name.2014.720p.WEBRip.x264.AC3-ReleaseGroup': {'size': 3000, 'quality': 'scr'}, 'Movie.Name.2014.1080p.HDCAM.-.ReleaseGroup': {'size': 5300, 'quality': 'cam'}, 'Movie.Name.2014.720p.HDSCR.4PARTS.MP4.AAC.ReleaseGroup': {'size': 2401, 'quality': 'scr'}, 'Movie.Name.2014.720p.BluRay.x264-ReleaseGroup': {'size': 10300, 'quality': '720p'}, 'Movie.Name.2014.720.Bluray.x264.DTS-ReleaseGroup': {'size': 9700, 'quality': '720p'}, 'Movie Name 2015 2160p SourceSite WEBRip DD5 1 x264-ReleaseGroup': {'size': 21800, 'quality': '2160p'}, 'Movie Name 2012 2160p WEB-DL FLAC 5 1 x264-ReleaseGroup': {'size': 59650, 'quality': '2160p'} } correct = 0 for name in tests: test_quality = self.guess(files = [name], extra = tests[name].get('extra', None), size = tests[name].get('size', None), use_cache = False) or {} success = test_quality.get('identifier') == tests[name]['quality'] and test_quality.get('is_3d') == tests[name].get('is_3d', False) if not success: log.error('%s failed check, thinks it\'s "%s" expecting "%s"', (name, test_quality.get('identifier') + (' 3D' if test_quality.get('is_3d') else ''), tests[name]['quality'] + (' 3D' if tests[name].get('is_3d') else '') )) correct += success if correct == len(tests): log.info('Quality test successful') return True else: log.error('Quality test failed: %s out of %s succeeded', (correct, len(tests)))
25,108
Python
.py
422
46.744076
281
0.548844
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,964
slack.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/slack.py
import json from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Slack' class Slack(Notification): url = 'https://slack.com/api/chat.postMessage' required_confs = ('token', 'channels',) def notify(self, message='', data=None, listener=None): for key in self.required_confs: if not self.conf(key): log.warning('Slack notifications are enabled, but ' '"{0}" is not specified.'.format(key)) return False data = data or {} message = message.strip() if self.conf('include_imdb') and 'identifier' in data: template = ' http://www.imdb.com/title/{0[identifier]}/' message += template.format(data) payload = { 'token': self.conf('token'), 'text': message, 'username': self.conf('bot_name'), 'unfurl_links': self.conf('include_imdb'), 'as_user': self.conf('as_user'), 'icon_url': self.conf('icon_url'), 'icon_emoji': self.conf('icon_emoji') } channels = self.conf('channels').split(',') for channel in channels: payload['channel'] = channel.strip() response = self.urlopen(self.url, data=payload) response = json.loads(response) if not response['ok']: log.warning('Notification sending to Slack has failed. Error ' 'code: %s.', response['error']) return False return True config = [{ 'name': 'slack', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'slack', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'token', 'description': ( 'Your Slack authentication token.', 'Can be created at https://api.slack.com/web' ) }, { 'name': 'channels', 'description': ( 'Channel to send notifications to.', 'Can be a public channel, private group or IM ' 'channel. Can be an encoded ID or a name ' '(staring with a hashtag, e.g. #general). ' 'Separate with commas in order to notify multiple ' 'channels. It is however recommended to send ' 'notifications to only one channel due to ' 'the Slack API rate limits.' ) }, { 'name': 'include_imdb', 'default': True, 'type': 'bool', 'descrpition': 'Include a link to the movie page on IMDB.' }, { 'name': 'bot_name', 'description': 'Name of bot.', 'default': 'CouchPotato', 'advanced': True, }, { 'name': 'as_user', 'description': 'Send message as the authentication token ' ' user.', 'default': False, 'type': 'bool', 'advanced': True }, { 'name': 'icon_url', 'description': 'URL to an image to use as the icon for ' 'notifications.', 'advanced': True, }, { 'name': 'icon_emoji', 'description': ( 'Emoji to use as the icon for notifications.', 'Overrides icon_url' ), 'advanced': True, }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
4,503
Python
.py
116
22.336207
79
0.419694
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,965
growl.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/growl.py
import traceback from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from couchpotato.environment import Env from gntp import notifier log = CPLog(__name__) autoload = 'Growl' class Growl(Notification): registered = False def __init__(self): super(Growl, self).__init__() self.growl = None if self.isEnabled(): addEvent('app.load', self.register) def register(self): if self.registered: return try: hostname = self.conf('hostname') password = self.conf('password') port = self.conf('port') self.growl = notifier.GrowlNotifier( applicationName = Env.get('appname'), notifications = ['Updates'], defaultNotifications = ['Updates'], applicationIcon = self.getNotificationImage('medium'), hostname = hostname if hostname else 'localhost', password = password if password else None, port = port if port else 23053 ) self.growl.register() self.registered = True except Exception as e: if 'timed out' in str(e): self.registered = True else: log.error('Failed register of growl: %s', traceback.format_exc()) def notify(self, message = '', data = None, listener = None): if not data: data = {} self.register() try: self.growl.notify( noteType = 'Updates', title = self.default_title, description = message, sticky = False, priority = 1, ) log.info('Growl notifications sent.') return True except: log.error('Failed growl notification.') return False config = [{ 'name': 'growl', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'growl', 'description': 'Version 1.4+', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'on_snatch', 'default': False, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, { 'name': 'hostname', 'description': 'Notify growl over network. Needs restart.', 'advanced': True, }, { 'name': 'port', 'type': 'int', 'advanced': True, }, { 'name': 'password', 'type': 'password', 'advanced': True, }, ], } ], }]
3,163
Python
.py
93
20.88172
81
0.471148
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,966
webhook.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/webhook.py
import traceback from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Webhook' class Webhook(Notification): def notify(self, message = '', data = None, listener = None): if not data: data = {} post_data = { 'message': toUnicode(message) } if getIdentifier(data): post_data.update({ 'imdb_id': getIdentifier(data) }) headers = { 'Content-type': 'application/x-www-form-urlencoded' } try: self.urlopen(self.conf('url'), headers = headers, data = post_data, show_error = False) return True except: log.error('Webhook notification failed: %s', traceback.format_exc()) return False config = [{ 'name': 'webhook', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'webhook', 'label': 'Webhook', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'url', 'description': 'The URL to send notification data to when something happens' }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', } ] } ] }]
1,813
Python
.py
55
21.327273
99
0.496275
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,967
email_.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/email_.py
from email.mime.text import MIMEText from email.utils import formatdate, make_msgid import smtplib import traceback from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from couchpotato.environment import Env log = CPLog(__name__) autoload = 'Email' class Email(Notification): def notify(self, message = '', data = None, listener = None): if not data: data = {} # Extract all the settings from settings from_address = self.conf('from') to_address = self.conf('to') ssl = self.conf('ssl') smtp_server = self.conf('smtp_server') smtp_user = self.conf('smtp_user') smtp_pass = self.conf('smtp_pass') smtp_port = self.conf('smtp_port') starttls = self.conf('starttls') # Make the basic message email = MIMEText(toUnicode(message), _charset = Env.get('encoding')) email['Subject'] = '%s: %s' % (self.default_title, toUnicode(message)) email['From'] = from_address email['To'] = to_address email['Date'] = formatdate(localtime = 1) email['Message-ID'] = make_msgid() try: # Open the SMTP connection, via SSL if requested log.debug("Connecting to host %s on port %s" % (smtp_server, smtp_port)) log.debug("SMTP over SSL %s", ("enabled" if ssl == 1 else "disabled")) mailserver = smtplib.SMTP_SSL(smtp_server, smtp_port) if ssl == 1 else smtplib.SMTP(smtp_server, smtp_port) if starttls: log.debug("Using StartTLS to initiate the connection with the SMTP server") mailserver.starttls() # Say hello to the server mailserver.ehlo() # Check too see if an login attempt should be attempted if len(smtp_user) > 0: log.debug("Logging on to SMTP server using username \'%s\'%s", (smtp_user, " and a password" if len(smtp_pass) > 0 else "")) mailserver.login(smtp_user.encode('utf-8'), smtp_pass.encode('utf-8')) # Send the e-mail log.debug("Sending the email") mailserver.sendmail(from_address, splitString(to_address), email.as_string()) # Close the SMTP connection mailserver.quit() log.info('Email notification sent') return True except: log.error('E-mail failed: %s', traceback.format_exc()) return False config = [{ 'name': 'email', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'email', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'from', 'label': 'Send e-mail from', }, { 'name': 'to', 'label': 'Send e-mail to', }, { 'name': 'smtp_server', 'label': 'SMTP server', }, { 'name': 'smtp_port', 'label': 'SMTP server port', 'default': '25', 'type': 'int', }, { 'name': 'ssl', 'label': 'Enable SSL', 'default': 0, 'type': 'bool', }, { 'name': 'starttls', 'label': 'Enable StartTLS', 'default': 0, 'type': 'bool', }, { 'name': 'smtp_user', 'label': 'SMTP user', }, { 'name': 'smtp_pass', 'label': 'SMTP password', 'type': 'password', }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
4,482
Python
.py
117
24.65812
140
0.474568
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,968
pushover.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/pushover.py
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import getTitle, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Pushover' class Pushover(Notification): api_url = 'https://api.pushover.net' def notify(self, message = '', data = None, listener = None): if not data: data = {} api_data = { 'user': self.conf('user_key'), 'token': self.conf('api_token'), 'message': toUnicode(message), 'priority': self.conf('priority'), 'sound': self.conf('sound'), } if data and getIdentifier(data): api_data.update({ 'url': toUnicode('http://www.imdb.com/title/%s/' % getIdentifier(data)), 'url_title': toUnicode('%s on IMDb' % getTitle(data)), }) try: data = self.urlopen('%s/%s' % (self.api_url, '1/messages.json'), headers = {'Content-type': 'application/x-www-form-urlencoded'}, data = api_data) log.info2('Pushover responded with: %s', data) return True except: return False config = [{ 'name': 'pushover', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'pushover', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'user_key', 'description': 'Register on pushover.net to get one.' }, { 'name': 'api_token', 'description': '<a href="https://pushover.net/apps/clone/couchpotato" target="_blank">Register on pushover.net</a> to get one.', 'advanced': True, 'default': 'YkxHMYDZp285L265L3IwH3LmzkTaCy', }, { 'name': 'priority', 'default': 0, 'type': 'dropdown', 'values': [('Lowest', -2), ('Low', -1), ('Normal', 0), ('High', 1)], }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, { 'name': 'sound', 'advanced': True, 'description': 'Define <a href="https://pushover.net/api%23sounds" target="_blank">custom sound</a> for Pushover alert.' }, ], } ], }]
2,903
Python
.py
75
24.84
148
0.462877
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,969
nmj.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/nmj.py
import re import telnetlib from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree log = CPLog(__name__) autoload = 'NMJ' class NMJ(Notification): # noinspection PyMissingConstructor def __init__(self): addApiView(self.testNotifyName(), self.test) addApiView('notify.nmj.auto_config', self.autoConfig) addEvent('renamer.after', self.addToLibrary) def autoConfig(self, host = 'localhost', **kwargs): mount = '' try: terminal = telnetlib.Telnet(host) except Exception: log.error('Warning: unable to get a telnet session to %s', host) return self.failed() log.debug('Connected to %s via telnet', host) terminal.read_until('sh-3.00# ') terminal.write('cat /tmp/source\n') terminal.write('cat /tmp/netshare\n') terminal.write('exit\n') tnoutput = terminal.read_all() match = re.search(r'(.+\.db)\r\n?(.+)(?=sh-3.00# cat /tmp/netshare)', tnoutput) if match: database = match.group(1) device = match.group(2) log.info('Found NMJ database %s on device %s', (database, device)) else: log.error('Could not get current NMJ database on %s, NMJ is probably not running!', host) return self.failed() if device.startswith('NETWORK_SHARE/'): match = re.search('.*(?=\r\n?%s)' % (re.escape(device[14:])), tnoutput) if match: mount = match.group().replace('127.0.0.1', host) log.info('Found mounting url on the Popcorn Hour in configuration: %s', mount) else: log.error('Detected a network share on the Popcorn Hour, but could not get the mounting url') return self.failed() return { 'success': True, 'database': database, 'mount': mount, } def addToLibrary(self, message = None, group = None): if self.isDisabled(): return if not group: group = {} host = self.conf('host') mount = self.conf('mount') database = self.conf('database') if mount: log.debug('Try to mount network drive via url: %s', mount) try: self.urlopen(mount) except: return False params = { 'arg0': 'scanner_start', 'arg1': database, 'arg2': 'background', 'arg3': '', } params = tryUrlencode(params) update_url = 'http://%(host)s:8008/metadata_database?%(params)s' % {'host': host, 'params': params} try: response = self.urlopen(update_url) except: return False try: et = etree.fromstring(response) result = et.findtext('returnValue') except SyntaxError as e: log.error('Unable to parse XML returned from the Popcorn Hour: %s', e) return False if int(result) > 0: log.error('Popcorn Hour returned an errorcode: %s', result) return False else: log.info('NMJ started background scan') return True def failed(self): return { 'success': False } def test(self, **kwargs): return { 'success': self.addToLibrary() } config = [{ 'name': 'nmj', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'nmj', 'label': 'NMJ', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'host', 'default': 'localhost', }, { 'name': 'database', }, { 'name': 'mount', }, ], } ], }]
4,379
Python
.py
125
24.232
109
0.530651
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,970
telegrambot.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/telegrambot.py
from couchpotato.core.helpers.variable import getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification import requests import six log = CPLog(__name__) autoload = 'TelegramBot' class TelegramBot(Notification): TELEGRAM_API = "https://api.telegram.org/bot%s/%s" def notify(self, message = '', data = None, listener = None): if not data: data = {} # Get configuration data token = self.conf('bot_token') usr_id = self.conf('receiver_user_id') # Add IMDB url to message: if data: imdb_id = getIdentifier(data) if imdb_id: url = 'http://www.imdb.com/title/{0}/'.format(imdb_id) message = '{0}\n{1}'.format(message, url) # Cosntruct message payload = {'chat_id': usr_id, 'text': message, 'parse_mode': 'Markdown'} # Send message user Telegram's Bot API response = requests.post(self.TELEGRAM_API % (token, "sendMessage"), data=payload) # Error logging sent_successfuly = True if not response.status_code == 200: log.error('Could not send notification to TelegramBot (token=%s). Response: [%s]', (token, response.text)) sent_successfuly = False return sent_successfuly config = [{ 'name': 'telegrambot', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'telegrambot', 'label': 'Telegram Bot', 'description': 'Notification provider which utilizes the bot API of the famous Telegram IM.', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'bot_token', 'description': 'Your bot token. Contact <a href="http://telegram.me/BotFather" target="_blank">@BotFather</a> on Telegram to get one.' }, { 'name': 'receiver_user_id', 'label': 'Recieving User/Group ID', 'description': 'Receiving user/group - notifications will be sent to this user or group. Contact <a href="http://telegram.me/myidbot" target="_blank">@myidbot</a> on Telegram to get an ID.' }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,720
Python
.py
65
29.876923
209
0.535201
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,971
synoindex.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/synoindex.py
import os import subprocess from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Synoindex' class Synoindex(Notification): index_path = '/usr/syno/bin/synoindex' def __init__(self): addApiView(self.testNotifyName(), self.test) addEvent('renamer.after', self.addToLibrary) def addToLibrary(self, message = None, group = None): if self.isDisabled(): return if not group: group = {} command = [self.index_path, '-A', group.get('destination_dir')] log.info('Executing synoindex command: %s ', command) try: p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) out = p.communicate() log.info('Result from synoindex: %s', str(out)) return True except OSError as e: log.error('Unable to run synoindex: %s', e) return False def test(self, **kwargs): return { 'success': os.path.isfile(self.index_path) } config = [{ 'name': 'synoindex', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'synoindex', 'description': 'Automaticly adds index to Synology Media Server.', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', } ], } ], }]
1,650
Python
.py
48
25.395833
95
0.57782
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,972
trakt.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/trakt.py
from couchpotato.core.helpers.variable import getTitle, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media.movie.providers.automation.trakt.main import TraktBase from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Trakt' class Trakt(Notification, TraktBase): urls = { 'library': 'sync/collection', 'unwatchlist': 'sync/watchlist/remove', 'test': 'sync/last_activities', } listen_to = ['renamer.after'] enabled_option = 'notification_enabled' def notify(self, message = '', data = None, listener = None): if not data: data = {} if listener == 'test': result = self.call((self.urls['test'])) return result else: post_data = { 'movies': [{'ids': {'imdb': getIdentifier(data)}}] if data else [] } result = self.call((self.urls['library']), post_data) if self.conf('remove_watchlist_enabled'): result = result and self.call((self.urls['unwatchlist']), post_data) return result config = [{ 'name': 'trakt', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'trakt', 'label': 'Trakt', 'description': 'add movies to your collection once downloaded. Connect your account in <a href="../automation/">Automation Trakt settings</a>', 'options': [ { 'name': 'notification_enabled', 'default': False, 'type': 'enabler', }, { 'name': 'remove_watchlist_enabled', 'label': 'Remove from watchlist', 'default': False, 'type': 'bool', }, ], } ], }]
1,950
Python
.py
52
26.596154
155
0.535847
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,973
boxcar2.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/boxcar2.py
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Boxcar2' class Boxcar2(Notification): url = 'https://new.boxcar.io/api/notifications' LOGO_URL = 'https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/notify.couch.small.png' def notify(self, message = '', data = None, listener = None): if not data: data = {} try: message = message.strip() long_message = '' if listener == 'test': long_message = 'This is a test message' elif data.get('identifier'): long_message = 'More movie info <a href="http://www.imdb.com/title/%s/">on IMDB</a>' % data['identifier'] data = { 'user_credentials': self.conf('token'), 'notification[title]': toUnicode('%s - %s' % (self.default_title, message)), 'notification[long_message]': toUnicode(long_message), 'notification[icon_url]': self.LOGO_URL, 'notification[source_name]': 'CouchPotato', } self.urlopen(self.url, data = data) except: log.error('Make sure the token provided is for the correct device') return False log.info('Boxcar notification successful.') return True def isEnabled(self): return super(Boxcar2, self).isEnabled() and self.conf('token') config = [{ 'name': 'boxcar2', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'boxcar2', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'token', 'description': ('Your Boxcar access token.', 'Can be found in the app under settings') }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,405
Python
.py
60
27.716667
136
0.522523
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,974
prowl.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/prowl.py
import traceback from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Prowl' class Prowl(Notification): urls = { 'api': 'https://api.prowlapp.com/publicapi/add' } def notify(self, message = '', data = None, listener = None): if not data: data = {} data = { 'apikey': self.conf('api_key'), 'application': self.default_title, 'description': toUnicode(message), 'priority': self.conf('priority'), } headers = { 'Content-type': 'application/x-www-form-urlencoded' } try: self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) log.info('Prowl notifications sent.') return True except: log.error('Prowl failed: %s', traceback.format_exc()) return False config = [{ 'name': 'prowl', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'prowl', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'api_key', 'label': 'Api key', }, { 'name': 'priority', 'default': '0', 'type': 'dropdown', 'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)] }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,047
Python
.py
62
20.709677
112
0.449797
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,975
androidpn.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/androidpn.py
import traceback from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'AndroidPN' class AndroidPN(Notification): def notify(self, message = '', data = None, listener = None): if not data: data = {} data = { 'action': "send", 'uri': "", 'title': self.default_title, 'message': toUnicode(message), 'broadcast': self.conf('broadcast'), 'username': self.conf('username'), } headers = { 'Content-type': 'application/x-www-form-urlencoded' } try: self.urlopen(self.conf('url'), headers = headers, data = data, show_error = False) return True except: log.error('AndroidPN failed: %s', traceback.format_exc()) return False config = [{ 'name': 'androidpn', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'androidpn', 'description': 'Self hosted Android push notification server', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'broadcast', 'label': 'Broadcast', 'default': 1, 'type': 'bool', 'description': 'Send notification to all users', }, { 'name': 'username', 'label': 'Username', 'description': 'Required if broadcast not selected', }, { 'name': 'url', 'label': 'Url', 'description': 'URL of server', }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,293
Python
.py
68
20.352941
94
0.444846
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,976
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/__init__.py
config = [{ 'name': 'notification_providers', 'groups': [ { 'label': 'Notifications', 'description': 'Notify when movies are done or snatched', 'type': 'list', 'name': 'notification_providers', 'tab': 'notifications', 'options': [], }, ], }]
342
Python
.py
13
17.615385
69
0.471125
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,977
pushalot.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/pushalot.py
import traceback from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Pushalot' class Pushalot(Notification): urls = { 'api': 'https://pushalot.com/api/sendmessage' } def notify(self, message = '', data = None, listener = None): if not data: data = {} data = { 'AuthorizationToken': self.conf('auth_token'), 'Title': self.default_title, 'Body': toUnicode(message), 'IsImportant': self.conf('important'), 'IsSilent': self.conf('silent'), 'Image': toUnicode(self.getNotificationImage('medium') + '?1'), 'Source': toUnicode(self.default_title) } headers = { 'Content-type': 'application/x-www-form-urlencoded' } try: self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) return True except: log.error('PushAlot failed: %s', traceback.format_exc()) return False config = [{ 'name': 'pushalot', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'pushalot', 'description': 'for Windows Phone and Windows 8', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'auth_token', 'label': 'Auth Token', }, { 'name': 'silent', 'label': 'Silent', 'default': 0, 'type': 'bool', 'description': 'Don\'t send Toast notifications. Only update Live Tile', }, { 'name': 'important', 'label': 'High Priority', 'default': 0, 'type': 'bool', 'description': 'Send message with High priority.', }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,534
Python
.py
73
21.520548
94
0.456886
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,978
script.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/script.py
import traceback import subprocess import os from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import getIdentifier from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Script' class Script(Notification): def __init__(self): addApiView(self.testNotifyName(), self.test) addEvent('renamer.after', self.runScript) def runScript(self, message = None, group = None): if self.isDisabled(): return if not group: group = {} command = [self.conf('path'), group.get('destination_dir')] log.info('Executing script command: %s ', command) try: p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) out = p.communicate() log.info('Result from script: %s', str(out)) return True except OSError as e: log.error('Unable to run script: %s', e) return False def test(self, **kwargs): return { 'success': os.path.isfile(self.conf('path')) } config = [{ 'name': 'script', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'script', 'label': 'Script', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'path', 'description': 'The path to the script to execute.' } ] } ] }]
1,805
Python
.py
54
24.092593
95
0.564767
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,979
homey.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/homey.py
import traceback from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import getIdentifier, getTitle from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Homey' class Homey(Notification): listen_to = [ 'media.available', 'renamer.after', 'movie.snatched', ] def notify(self, message = '', data = None, listener = None): if not data: data = {} url = self.conf('url') if not url: log.error('Please provide the URL') return False post_data = { 'type': listener, 'movie': getTitle(data) if listener != 'test' else 'Test Movie Title (2016)', 'message': toUnicode(message) } try: self.urlopen(url, data = post_data, show_error = False) return True except: log.error('Webhook notification failed: %s', traceback.format_exc()) return False config = [{ 'name': 'homey', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'homey', 'label': 'Homey', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'url', 'description': 'Create a new one at <a href="https://webhooks.athom.com/" target="_blank">webhooks.athom.com</a> and add to to Homey Settings' } ] } ] }]
1,700
Python
.py
51
23.215686
162
0.530562
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,980
base.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/base.py
from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.media._base.providers.base import Provider from couchpotato.environment import Env log = CPLog(__name__) class Notification(Provider): type = 'notification' default_title = Env.get('appname') test_message = 'ZOMG Lazors Pewpewpew!' listen_to = [ 'media.available', 'renamer.after', 'movie.snatched', 'updater.available', 'updater.updated', 'core.message.important', ] dont_listen_to = [] def __init__(self): addEvent('notify.%s' % self.getName().lower(), self._notify) addApiView(self.testNotifyName(), self.test) # Attach listeners for listener in self.listen_to: if not listener in self.dont_listen_to: addEvent(listener, self.createNotifyHandler(listener)) def createNotifyHandler(self, listener): def notify(message = None, group = None, data = None): if not group: group = {} if not self.conf('on_snatch', default = True) and listener == 'movie.snatched': return return self._notify(message = message, data = data if data else group, listener = listener) return notify def getNotificationImage(self, size = 'small'): return 'https://raw.github.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/notify.couch.%s.png' % size def _notify(self, *args, **kwargs): if self.isEnabled(): return self.notify(*args, **kwargs) return False def notify(self, message = '', data = None, listener = None): if not data: data = {} def test(self, **kwargs): test_type = self.testNotifyName() log.info('Sending test to %s', test_type) success = self._notify( message = self.test_message, data = {}, listener = 'test' ) return { 'success': success } def testNotifyName(self): return 'notify.%s.test' % self.getName().lower()
2,154
Python
.py
52
33.096154
129
0.631427
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,981
discord.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/discord.py
from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification import json import requests log = CPLog(__name__) autoload = 'Discord' class Discord(Notification): required_confs = ('webhook_url',) def notify(self, message='', data=None, listener=None): for key in self.required_confs: if not self.conf(key): log.warning('Discord notifications are enabled, but ' '"{0}" is not specified.'.format(key)) return False data = data or {} message = message.strip() if self.conf('include_imdb') and 'identifier' in data: template = ' http://www.imdb.com/title/{0[identifier]}/' message += template.format(data) headers = {b"Content-Type": b"application/json"} try: r = requests.post(self.conf('webhook_url'), data=json.dumps(dict(content=message, username=self.conf('bot_name'), avatar_url=self.conf('avatar_url'), tts=self.conf('discord_tts'))), headers=headers) r.status_code except Exception as e: log.warning('Error Sending Discord response error code: {0}'.format(r.status_code)) return False return True config = [{ 'name': 'discord', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'discord', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'webhook_url', 'description': ( 'Your Discord authentication webhook URL.', 'Created under channel settings.' ) }, { 'name': 'include_imdb', 'default': True, 'type': 'bool', 'descrpition': 'Include a link to the movie page on IMDB.' }, { 'name': 'bot_name', 'description': 'Name of bot.', 'default': 'CouchPotato', 'advanced': True, }, { 'name': 'avatar_url', 'description': 'URL to an image to use as the avatar for ' 'notifications.', 'default': 'https://couchpota.to/media/images/couch.png', 'advanced': True, }, { 'name': 'discord_tts', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Send notification using text-to-speech.', }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
3,219
Python
.py
84
23.27381
210
0.450096
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,982
pushbullet.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/pushbullet.py
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Pushbullet' class Pushbullet(Notification): url = 'https://api.pushbullet.com/v2/%s' def notify(self, message = '', data = None, listener = None): if not data: data = {} # Get all the device IDs linked to this user devices = self.getDevices() or [None] successful = 0 for device in devices: response = self.request( 'pushes', device_iden = device, type = 'note', title = self.default_title, body = toUnicode(message) ) if response: successful += 1 else: log.error('Unable to push notification to Pushbullet device with ID %s' % device) for channel in self.getChannels(): self.request( 'pushes', channel_tag = channel, type = 'note', title = self.default_title, body = toUnicode(message) ) return successful == len(devices) def getDevices(self): return splitString(self.conf('devices')) def getChannels(self): return splitString(self.conf('channels')) def request(self, method, **kwargs): try: headers = { 'Access-Token': self.conf('api_key') } if kwargs.get('device_iden') is None: try: del kwargs['device_iden'] except: pass return self.getJsonData(self.url % method, cache_timeout = -1, headers = headers, data = kwargs) except Exception as ex: log.error('Pushbullet request failed') log.debug(ex) return None config = [{ 'name': 'pushbullet', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'pushbullet', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'api_key', 'label': 'Access Token', 'description': 'Can be found on <a href="https://www.pushbullet.com/account" target="_blank">Account Settings</a>', }, { 'name': 'devices', 'default': '', 'advanced': True, 'description': 'IDs of devices to send notifications to, empty = all devices' }, { 'name': 'channels', 'default': '', 'advanced': True, 'description': 'IDs of channels to send notifications to, empty = no channels' }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
3,391
Python
.py
92
23.380435
135
0.483074
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,983
join.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/join.py
from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Join' class Join(Notification): # URL for request url = 'https://joinjoaomgcd.appspot.com/_ah/api/messaging/v1/sendPush?title=%s&text=%s&deviceId=%s&icon=%s' # URL for notification icon icon = tryUrlencode('https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/icons/android.png') def notify(self, message = '', data = None, listener = None): if not data: data = {} # default for devices device_default = [None] apikey = self.conf('apikey') if apikey is not None: # Add apikey to request url self.url = self.url + '&apikey=' + apikey # If api key is present, default to sending to all devices device_default = ['group.all'] devices = self.getDevices() or device_default successful = 0 for device in devices: response = self.urlopen(self.url % (self.default_title, tryUrlencode(toUnicode(message)), device, self.icon)) if response: successful += 1 else: log.error('Unable to push notification to Join device with ID %s' % device) return successful == len(devices) def getDevices(self): return splitString(self.conf('devices')) config = [{ 'name': 'join', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'join', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'devices', 'default': '', 'description': 'IDs of devices to notify, or group to send to if API key is specified (ex: group.all)' }, { 'name': 'apikey', 'default': '', 'advanced': True, 'description': 'API Key for sending to all devices, or group' }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,722
Python
.py
68
27.970588
141
0.537908
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,984
notifymyandroid.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/notifymyandroid.py
from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification import pynma import six log = CPLog(__name__) autoload = 'NotifyMyAndroid' class NotifyMyAndroid(Notification): def notify(self, message = '', data = None, listener = None): if not data: data = {} nma = pynma.PyNMA() keys = splitString(self.conf('api_key')) nma.addkey(keys) nma.developerkey(self.conf('dev_key')) response = nma.push( application = self.default_title, event = message.split(' ')[0], description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1 ) successful = 0 for key in keys: if not response[str(key)]['code'] == six.u('200'): log.error('Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) else: successful += 1 return successful == len(keys) config = [{ 'name': 'notifymyandroid', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'notifymyandroid', 'label': 'Notify My Android', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'api_key', 'description': 'Multiple keys seperated by a comma. Maximum of 5.' }, { 'name': 'dev_key', 'advanced': True, }, { 'name': 'priority', 'default': 0, 'type': 'dropdown', 'values': [('Very Low', -2), ('Moderate', -1), ('Normal', 0), ('High', 1), ('Emergency', 2)], }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,345
Python
.py
67
22.19403
117
0.458315
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,985
toasty.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/toasty.py
import traceback from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Toasty' class Toasty(Notification): urls = { 'api': 'http://api.supertoasty.com/notify/%s?%s' } def notify(self, message = '', data = None, listener = None): if not data: data = {} data = { 'title': self.default_title, 'text': toUnicode(message), 'sender': toUnicode("CouchPotato"), 'image': 'https://raw.github.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/homescreen.png', } try: self.urlopen(self.urls['api'] % (self.conf('api_key'), tryUrlencode(data)), show_error = False) return True except: log.error('Toasty failed: %s', traceback.format_exc()) return False config = [{ 'name': 'toasty', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'toasty', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'api_key', 'label': 'Device ID', }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
1,741
Python
.py
52
21.923077
124
0.48926
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,986
xbmc.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/xbmc.py
import base64 import json import socket import traceback import urllib from couchpotato.core.helpers.variable import splitString, getTitle from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from requests.exceptions import ConnectionError, Timeout from requests.packages.urllib3.exceptions import MaxRetryError log = CPLog(__name__) autoload = 'XBMC' class XBMC(Notification): listen_to = ['renamer.after', 'movie.snatched'] use_json_notifications = {} http_time_between_calls = 0 def notify(self, message = '', data = None, listener = None): if not data: data = {} hosts = splitString(self.conf('host')) successful = 0 max_successful = 0 for host in hosts: if self.use_json_notifications.get(host) is None: self.getXBMCJSONversion(host, message = message) if self.use_json_notifications.get(host): calls = [ ('GUI.ShowNotification', None, {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}), ] if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0): param = {} if not self.conf('force_full_scan') and (self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0])): param = {'directory': data['destination_dir']} calls.append(('VideoLibrary.Scan', None, param)) max_successful += len(calls) response = self.request(host, calls) else: response = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message}) if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0): response += self.request(host, [('VideoLibrary.Scan', None, {})]) max_successful += 1 max_successful += 1 try: for result in response: if result.get('result') and result['result'] == 'OK': successful += 1 elif result.get('error'): log.error('Kodi error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) except: log.error('Failed parsing results: %s', traceback.format_exc()) return successful == max_successful def getXBMCJSONversion(self, host, message = ''): success = False # Kodi JSON-RPC version request response = self.request(host, [ ('JSONRPC.Version', None, {}) ]) for result in response: if result.get('result') and type(result['result']['version']).__name__ == 'int': # only v2 and v4 return an int object # v6 (as of XBMC v12(Frodo)) is required to send notifications xbmc_rpc_version = str(result['result']['version']) log.debug('Kodi JSON-RPC Version: %s ; Notifications by JSON-RPC only supported for v6 [as of XBMC v12(Frodo)]', xbmc_rpc_version) # disable JSON use self.use_json_notifications[host] = False # send the text message resp = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message}) for r in resp: if r.get('result') and r['result'] == 'OK': log.debug('Message delivered successfully!') success = True break elif r.get('error'): log.error('Kodi error; %s: %s (%s)', (r['id'], r['error']['message'], r['error']['code'])) break elif result.get('result') and type(result['result']['version']).__name__ == 'dict': # Kodi JSON-RPC v6 returns an array object containing # major, minor and patch number xbmc_rpc_version = str(result['result']['version']['major']) xbmc_rpc_version += '.' + str(result['result']['version']['minor']) xbmc_rpc_version += '.' + str(result['result']['version']['patch']) log.debug('Kodi JSON-RPC Version: %s', xbmc_rpc_version) # ok, Kodi version is supported self.use_json_notifications[host] = True # send the text message resp = self.request(host, [('GUI.ShowNotification', None, {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})]) for r in resp: if r.get('result') and r['result'] == 'OK': log.debug('Message delivered successfully!') success = True break elif r.get('error'): log.error('Kodi error; %s: %s (%s)', (r['id'], r['error']['message'], r['error']['code'])) break # error getting version info (we do have contact with Kodi though) elif result.get('error'): log.error('Kodi error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) log.debug('Use JSON notifications: %s ', self.use_json_notifications) return success def notifyXBMCnoJSON(self, host, data): server = 'http://%s/xbmcCmds/' % host # Notification(title, message [, timeout , image]) cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % (urllib.quote(getTitle(data)), urllib.quote(data['message']), urllib.quote(self.getNotificationImage('medium'))) server += cmd # I have no idea what to set to, just tried text/plain and seems to be working :) headers = { 'Content-Type': 'text/plain', } # authentication support if self.conf('password'): base64string = base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password'))).replace('\n', '') headers['Authorization'] = 'Basic %s' % base64string try: log.debug('Sending non-JSON-type request to %s: %s', (host, data)) # response wil either be 'OK': # <html> # <li>OK # </html> # # or 'Error': # <html> # <li>Error:<message> # </html> # response = self.urlopen(server, headers = headers, timeout = 3, show_error = False) if 'OK' in response: log.debug('Returned from non-JSON-type request %s: %s', (host, response)) # manually fake expected response array return [{'result': 'OK'}] else: log.error('Returned from non-JSON-type request %s: %s', (host, response)) # manually fake expected response array return [{'result': 'Error'}] except (MaxRetryError, Timeout, ConnectionError): log.info2('Couldn\'t send request to Kodi, assuming it\'s turned off') return [{'result': 'Error'}] except: log.error('Failed sending non-JSON-type request to Kodi: %s', traceback.format_exc()) return [{'result': 'Error'}] def request(self, host, do_requests): server = 'http://%s/jsonrpc' % host data = [] for req in do_requests: method, id, kwargs = req data.append({ 'method': method, 'params': kwargs, 'jsonrpc': '2.0', 'id': id if id else method, }) data = json.dumps(data) headers = { 'Content-Type': 'application/json', } if self.conf('password'): base64string = base64.encodestring('%s:%s' % (self.conf('username'), self.conf('password'))).replace('\n', '') headers['Authorization'] = 'Basic %s' % base64string try: log.debug('Sending request to %s: %s', (host, data)) response = self.getJsonData(server, headers = headers, data = data, timeout = 3, show_error = False) log.debug('Returned from request %s: %s', (host, response)) return response except (MaxRetryError, Timeout, ConnectionError): log.info2('Couldn\'t send request to Kodi, assuming it\'s turned off') return [] except: log.error('Failed sending request to Kodi: %s', traceback.format_exc()) return [] config = [{ 'name': 'xbmc', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'xbmc', 'label': 'Kodi', 'description': 'v14 (Helix), v15 (Isengard)', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'host', 'default': 'localhost:8080', }, { 'name': 'username', 'default': 'xbmc', }, { 'name': 'password', 'default': '', 'type': 'password', }, { 'name': 'only_first', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Only update the first host when movie snatched, useful for synced Kodi', }, { 'name': 'remote_dir_scan', 'label': 'Remote Folder Scan', 'default': 0, 'type': 'bool', 'advanced': True, 'description': ('Only scan new movie folder at remote Kodi servers.', 'Useful if the Kodi path is different from the path CPS uses.'), }, { 'name': 'force_full_scan', 'label': 'Always do a full scan', 'default': 0, 'type': 'bool', 'advanced': True, 'description': ('Do a full scan instead of only the new movie.', 'Useful if the Kodi path is different from the path CPS uses.'), }, { 'name': 'on_snatch', 'default': False, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
11,024
Python
.py
229
33.467249
186
0.506236
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,987
xmpp_.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/xmpp_.py
from time import sleep import traceback from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification import xmpp log = CPLog(__name__) autoload = 'Xmpp' class Xmpp(Notification): def notify(self, message = '', data = None, listener = None): if not data: data = {} try: jid = xmpp.protocol.JID(self.conf('username')) client = xmpp.Client(jid.getDomain(), debug = []) # Connect if not client.connect(server = (self.conf('hostname'), self.conf('port'))): log.error('XMPP failed: Connection to server failed.') return False # Authenticate if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()): log.error('XMPP failed: Failed to authenticate.') return False # Send message client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat')) # Disconnect # some older servers will not send the message if you disconnect immediately after sending sleep(1) client.disconnect() log.info('XMPP notifications sent.') return True except: log.error('XMPP failed: %s', traceback.format_exc()) return False config = [{ 'name': 'xmpp', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'xmpp', 'label': 'XMPP', 'description`': 'for Jabber, Hangouts (Google Talk), AIM...', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'username', 'description': 'User sending the message. For Hangouts, e-mail of a single-step authentication Google account.', }, { 'name': 'password', 'type': 'Password', }, { 'name': 'hostname', 'default': 'talk.google.com', }, { 'name': 'to', 'description': 'Username (or e-mail for Hangouts) of the person to send the messages to.', }, { 'name': 'port', 'type': 'int', 'default': 5222, }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,910
Python
.py
79
23.012658
132
0.463042
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,988
emby.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/emby.py
import json import urllib, urllib2 from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) autoload = 'Emby' class Emby(Notification): def notify(self, message = '', data = None, listener = None): host = self.conf('host') apikey = self.conf('apikey') host = cleanHost(host) url = '%semby/Library/Movies/Updated' % (host) values = {} data = urllib.urlencode(values) try: req = urllib2.Request(url, data) req.add_header('X-MediaBrowser-Token', apikey) response = urllib2.urlopen(req) result = response.read() response.close() return True except (urllib2.URLError, IOError), e: return False def test(self, **kwargs): host = self.conf('host') apikey = self.conf('apikey') message = self.test_message host = cleanHost(host) url = '%semby/Notifications/Admin' % (host) values = {'Name': 'CouchPotato', 'Description': message, 'ImageUrl': 'https://raw.githubusercontent.com/CouchPotato/CouchPotatoServer/master/couchpotato/static/images/notify.couch.small.png'} data = json.dumps(values) try: req = urllib2.Request(url, data) req.add_header('X-MediaBrowser-Token', apikey) req.add_header('Content-Type', 'application/json') response = urllib2.urlopen(req) result = response.read() response.close() return { 'success': True } except (urllib2.URLError, IOError), e: return False config = [{ 'name': 'emby', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'emby', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'host', 'default': 'localhost:8096', 'description': 'IP:Port, default localhost:8096' }, { 'name': 'apikey', 'label': 'API Key', 'default': '', }, ], } ], }]
2,483
Python
.py
71
23.633803
199
0.519215
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,989
client.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/plex/client.py
import json from couchpotato import CPLog from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import tryUrlencode import requests log = CPLog(__name__) class PlexClientProtocol(object): def __init__(self, plex): self.plex = plex addEvent('notify.plex.notifyClient', self.notify) def notify(self, client, message): raise NotImplementedError() class PlexClientHTTP(PlexClientProtocol): def request(self, command, client): url = 'http://%s:%s/xbmcCmds/xbmcHttp/?%s' % ( client['address'], client['port'], tryUrlencode(command) ) headers = {} try: self.plex.urlopen(url, headers = headers, timeout = 3, show_error = False) except Exception as err: log.error("Couldn't sent command to Plex: %s", err) return False return True def notify(self, client, message): if client.get('protocol') != 'xbmchttp': return None data = { 'command': 'ExecBuiltIn', 'parameter': 'Notification(CouchPotato, %s)' % message } return self.request(data, client) class PlexClientJSON(PlexClientProtocol): def request(self, method, params, client): log.debug('sendJSON("%s", %s, %s)', (method, params, client)) url = 'http://%s:%s/jsonrpc' % ( client['address'], client['port'] ) headers = { 'Content-Type': 'application/json' } request = { 'id': 1, 'jsonrpc': '2.0', 'method': method, 'params': params } try: requests.post(url, headers = headers, timeout = 3, data = json.dumps(request)) except Exception as err: log.error("Couldn't sent command to Plex: %s", err) return False return True def notify(self, client, message): if client.get('protocol') not in ['xbmcjson', 'plex']: return None params = { 'title': 'CouchPotato', 'message': message } return self.request('GUI.ShowNotification', params, client)
2,238
Python
.py
64
25.734375
90
0.575081
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,990
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/plex/__init__.py
from .main import Plex def autoload(): return Plex() config = [{ 'name': 'plex', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'plex', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'media_server', 'label': 'Media Server', 'default': 'localhost', 'description': 'Hostname/IP, default localhost' }, { 'name': 'media_server_port', 'label': 'Port', 'default': '32400', 'description': 'Connection to the Media Server should use this port' }, { 'name': 'use_https', 'label': 'Use HTTPS', 'default': '0', 'type': 'bool', 'description': 'Connection to the Media Server should use HTTPS instead of HTTP' }, { 'name': 'username', 'label': 'Username', 'default': '', 'description': 'Required for myPlex' }, { 'name': 'password', 'label': 'Password', 'default': '', 'type': 'password', 'description': 'Required for myPlex' }, { 'name': 'auth_token', 'label': 'Auth Token', 'default': '', 'advanced': True, 'description': 'Required for myPlex' }, { 'name': 'clients', 'default': '', 'description': 'Comma separated list of client names\'s (computer names). Top right when you start Plex' }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, ], } ], }]
2,434
Python
.py
71
17.183099
124
0.346186
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,991
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/plex/main.py
from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from .client import PlexClientHTTP, PlexClientJSON from .server import PlexServer log = CPLog(__name__) class Plex(Notification): http_time_between_calls = 0 def __init__(self): super(Plex, self).__init__() self.server = PlexServer(self) self.client_protocols = { 'http': PlexClientHTTP(self), 'json': PlexClientJSON(self) } addEvent('renamer.after', self.addToLibrary) def addToLibrary(self, message = None, group = None): if self.isDisabled(): return if not group: group = {} return self.server.refresh() def getClientNames(self): return [ x.strip().lower() for x in self.conf('clients').split(',') ] def notifyClients(self, message, client_names): success = True for client_name in client_names: client_success = False client = self.server.clients.get(client_name) if client and client['found']: client_success = fireEvent('notify.plex.notifyClient', client, message, single = True) if not client_success: if self.server.staleClients() or not client: log.info('Failed to send notification to client "%s". ' 'Client list is stale, updating the client list and retrying.', client_name) self.server.updateClients(self.getClientNames()) else: log.warning('Failed to send notification to client %s, skipping this time', client_name) success = False return success def notify(self, message = '', data = None, listener = None): if not data: data = {} return self.notifyClients(message, self.getClientNames()) def test(self, **kwargs): test_type = self.testNotifyName() log.info('Sending test to %s', test_type) notify_success = self.notify( message = self.test_message, data = {}, listener = 'test' ) refresh_success = self.addToLibrary() return {'success': notify_success or refresh_success}
2,356
Python
.py
54
33.203704
108
0.610184
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,992
server.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/plex/server.py
from datetime import timedelta, datetime from urlparse import urlparse import traceback from couchpotato.core.helpers.variable import cleanHost from couchpotato import CPLog try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree log = CPLog(__name__) class PlexServer(object): def __init__(self, plex): self.plex = plex self.clients = {} self.last_clients_update = None def staleClients(self): if not self.last_clients_update: return True return self.last_clients_update + timedelta(minutes=15) < datetime.now() def request(self, path, data_type='xml'): if not self.plex.conf('media_server'): log.warning("Plex media server hostname is required") return None if path.startswith('/'): path = path[1:] #Maintain support for older Plex installations without myPlex if not self.plex.conf('auth_token') and not self.plex.conf('username') and not self.plex.conf('password'): data = self.plex.urlopen('%s/%s' % ( self.createHost(self.plex.conf('media_server'), port = self.plex.conf('media_server_port'), use_https = self.plex.conf('use_https')), path )) else: #Fetch X-Plex-Token if it doesn't exist but a username/password do if not self.plex.conf('auth_token') and (self.plex.conf('username') and self.plex.conf('password')): import urllib2, base64 log.info("Fetching a new X-Plex-Token from plex.tv") username = self.plex.conf('username') password = self.plex.conf('password') req = urllib2.Request("https://plex.tv/users/sign_in.xml", data="") authheader = "Basic %s" % base64.encodestring('%s:%s' % (username, password))[:-1] req.add_header("Authorization", authheader) req.add_header("X-Plex-Device-Name", "CouchPotato") req.add_header("X-Plex-Product", "CouchPotato Notifier") req.add_header("X-Plex-Client-Identifier", "b3a6b24dcab2224bdb101fc6aa08ea5e2f3147d6") req.add_header("X-Plex-Version", "1.0") try: response = urllib2.urlopen(req) except urllib2.URLError, e: log.info('Error fetching token from plex.tv: %s', traceback.format_exc()) try: auth_tree = etree.parse(response) token = auth_tree.findall(".//authentication-token")[0].text self.plex.conf('auth_token', token) except (ValueError, IndexError) as e: log.info("Error parsing plex.tv response: " + ex(e)) #Add X-Plex-Token header for myPlex support workaround data = self.plex.urlopen('%s/%s?X-Plex-Token=%s' % ( self.createHost(self.plex.conf('media_server'), port = self.plex.conf('media_server_port'), use_https = self.plex.conf('use_https')), path, self.plex.conf('auth_token') )) if data_type == 'xml': return etree.fromstring(data) else: return data def updateClients(self, client_names): log.info('Searching for clients on Plex Media Server') self.clients = {} result = self.request('clients') if not result: return found_clients = [ c for c in result.findall('Server') if c.get('name') and c.get('name').lower() in client_names ] # Store client details in cache for client in found_clients: name = client.get('name').lower() self.clients[name] = { 'name': client.get('name'), 'found': True, 'address': client.get('address'), 'port': client.get('port'), 'protocol': client.get('protocol', 'xbmchttp') } client_names.remove(name) # Store dummy info for missing clients for client_name in client_names: self.clients[client_name] = { 'found': False } if len(client_names) > 0: log.debug('Unable to find clients: %s', ', '.join(client_names)) self.last_clients_update = datetime.now() def refresh(self, section_types=None): if not section_types: section_types = ['movie'] sections = self.request('library/sections') try: for section in sections.findall('Directory'): if section.get('type') not in section_types: continue self.request('library/sections/%s/refresh' % section.get('key'), 'text') except: log.error('Plex library update failed for %s, Media Server not running: %s', (self.plex.conf('media_server'), traceback.format_exc(1))) return False return True def createHost(self, host, port = None, use_https = False): h = cleanHost(host, True, use_https) p = urlparse(h) h = h.rstrip('/') if port and not p.port: h += ':%s' % port return h
5,347
Python
.py
115
34.313043
149
0.570246
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,993
index.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/core/index.py
from CodernityDB.tree_index import TreeBasedIndex class NotificationIndex(TreeBasedIndex): _version = 1 custom_header = """from CodernityDB.tree_index import TreeBasedIndex import time""" def __init__(self, *args, **kwargs): kwargs['key_format'] = 'I' super(NotificationIndex, self).__init__(*args, **kwargs) def make_key(self, key): return key def make_key_value(self, data): if data.get('_t') == 'notification': return data.get('time'), None class NotificationUnreadIndex(TreeBasedIndex): _version = 1 custom_header = """from CodernityDB.tree_index import TreeBasedIndex import time""" def __init__(self, *args, **kwargs): kwargs['key_format'] = 'I' super(NotificationUnreadIndex, self).__init__(*args, **kwargs) def make_key(self, key): return key def make_key_value(self, data): if data.get('_t') == 'notification' and not data.get('read'): return data.get('time'), None
1,015
Python
.py
25
34
72
0.645194
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,994
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/core/main.py
from operator import itemgetter import threading import time import traceback import uuid from CodernityDB.database import RecordDeleted from couchpotato import get_db from couchpotato.api import addApiView, addNonBlockApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from .index import NotificationIndex, NotificationUnreadIndex from couchpotato.environment import Env from tornado.ioloop import IOLoop log = CPLog(__name__) class CoreNotifier(Notification): _database = { 'notification': NotificationIndex, 'notification_unread': NotificationUnreadIndex } m_lock = None listen_to = [ 'media.available', 'renamer.after', 'movie.snatched', 'updater.available', 'updater.updated', 'core.message', 'core.message.important', ] def __init__(self): super(CoreNotifier, self).__init__() addEvent('notify', self.notify) addEvent('notify.frontend', self.frontend) addApiView('notification.markread', self.markAsRead, docs = { 'desc': 'Mark notifications as read', 'params': { 'ids': {'desc': 'Notification id you want to mark as read. All if ids is empty.', 'type': 'int (comma separated)'}, }, }) addApiView('notification.list', self.listView, docs = { 'desc': 'Get list of notifications', 'params': { 'limit_offset': {'desc': 'Limit and offset the notification list. Examples: "50" or "50,30"'}, }, 'return': {'type': 'object', 'example': """{ 'success': True, 'empty': bool, any notification returned or not, 'notifications': array, notifications found, }"""} }) addNonBlockApiView('notification.listener', (self.addListener, self.removeListener)) addApiView('notification.listener', self.listener) fireEvent('schedule.interval', 'core.check_messages', self.checkMessages, hours = 12, single = True) fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds = 15, single = True) addEvent('app.load', self.clean) if not Env.get('dev'): addEvent('app.load', self.checkMessages) self.messages = [] self.listeners = [] self.m_lock = threading.Lock() def clean(self): try: db = get_db() for n in db.all('notification', with_doc = True): if n['doc'].get('time', 0) <= (int(time.time()) - 2419200): db.delete(n['doc']) except: log.error('Failed cleaning notification: %s', traceback.format_exc()) def markAsRead(self, ids = None, **kwargs): ids = splitString(ids) if ids else None try: db = get_db() for x in db.all('notification_unread', with_doc = True): if not ids or x['_id'] in ids: x['doc']['read'] = True db.update(x['doc']) return { 'success': True } except: log.error('Failed mark as read: %s', traceback.format_exc()) return { 'success': False } def listView(self, limit_offset = None, **kwargs): db = get_db() if limit_offset: splt = splitString(limit_offset) limit = tryInt(splt[0]) offset = tryInt(0 if len(splt) is 1 else splt[1]) results = db.all('notification', limit = limit, offset = offset, with_doc = True) else: results = db.all('notification', limit = 200, with_doc = True) notifications = [] for n in results: notifications.append(n['doc']) return { 'success': True, 'empty': len(notifications) == 0, 'notifications': notifications } def checkMessages(self): prop_name = 'messages.last_check' last_check = tryInt(Env.prop(prop_name, default = 0)) messages = fireEvent('cp.messages', last_check = last_check, single = True) or [] for message in messages: if message.get('time') > last_check: message['sticky'] = True # Always sticky core messages message_type = 'core.message.important' if message.get('important') else 'core.message' fireEvent(message_type, message = message.get('message'), data = message) if last_check < message.get('time'): last_check = message.get('time') Env.prop(prop_name, value = last_check) def notify(self, message = '', data = None, listener = None): if not data: data = {} n = { '_t': 'notification', 'time': int(time.time()), } try: db = get_db() n['message'] = toUnicode(message) if data.get('sticky'): n['sticky'] = True if data.get('important'): n['important'] = True db.insert(n) self.frontend(type = listener, data = n) return True except: log.error('Failed notify "%s": %s', (n, traceback.format_exc())) def frontend(self, type = 'notification', data = None, message = None): if not data: data = {} log.debug('Notifying frontend') self.m_lock.acquire() notification = { 'message_id': str(uuid.uuid4()), 'time': time.time(), 'type': type, 'data': data, 'message': message, } self.messages.append(notification) while len(self.listeners) > 0 and not self.shuttingDown(): try: listener, last_id = self.listeners.pop() IOLoop.current().add_callback(listener, { 'success': True, 'result': [notification], }) except: log.debug('Failed sending to listener: %s', traceback.format_exc()) self.listeners = [] self.m_lock.release() log.debug('Done notifying frontend') def addListener(self, callback, last_id = None): if last_id: messages = self.getMessages(last_id) if len(messages) > 0: return callback({ 'success': True, 'result': messages, }) self.m_lock.acquire() self.listeners.append((callback, last_id)) self.m_lock.release() def removeListener(self, callback): self.m_lock.acquire() new_listeners = [] for list_tuple in self.listeners: try: listener, last_id = list_tuple if listener != callback: new_listeners.append(list_tuple) except: log.debug('Failed removing listener: %s', traceback.format_exc()) self.listeners = new_listeners self.m_lock.release() def cleanMessages(self): if len(self.messages) == 0: return log.debug('Cleaning messages') self.m_lock.acquire() time_ago = (time.time() - 15) self.messages[:] = [m for m in self.messages if (m['time'] > time_ago)] self.m_lock.release() log.debug('Done cleaning messages') def getMessages(self, last_id): log.debug('Getting messages with id: %s', last_id) self.m_lock.acquire() recent = [] try: index = map(itemgetter('message_id'), self.messages).index(last_id) recent = self.messages[index + 1:] except: pass self.m_lock.release() log.debug('Returning for %s %s messages', (last_id, len(recent))) return recent def listener(self, init = False, **kwargs): messages = [] # Get last message if init: db = get_db() notifications = db.all('notification') for n in notifications: try: doc = db.get('id', n.get('_id')) if doc.get('time') > (time.time() - 604800): messages.append(doc) except RecordDeleted: pass return { 'success': True, 'result': messages, }
8,645
Python
.py
216
29.189815
131
0.56001
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,995
__init__.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/twitter/__init__.py
from .main import Twitter def autoload(): return Twitter() config = [{ 'name': 'twitter', 'groups': [ { 'tab': 'notifications', 'list': 'notification_providers', 'name': 'twitter', 'options': [ { 'name': 'enabled', 'default': 0, 'type': 'enabler', }, { 'name': 'access_token_key', 'advanced': True, }, { 'name': 'screen_name', 'advanced': True, }, { 'name': 'access_token_secret', 'advanced': True, }, { 'name': 'mention', 'description': 'Add a mention to this user to the tweet.', }, { 'name': 'on_snatch', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Also send message when movie is snatched.', }, { 'name': 'direct_message', 'default': 0, 'type': 'bool', 'advanced': True, 'description': 'Use direct messages for the notifications (Also applies to the mentioned users).', }, ], } ], }]
1,547
Python
.py
50
15.16
118
0.338688
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,996
main.py
CouchPotato_CouchPotatoServer/couchpotato/core/notifications/twitter/main.py
from urlparse import parse_qsl from couchpotato.api import addApiView from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from couchpotato.environment import Env from pytwitter import Api import oauth2 log = CPLog(__name__) class Twitter(Notification): consumer_key = 'xcVNnQ7VjAB7DyuKXREkyLHy5' consumer_secret = 'iYeY4i5haITAsHToDJgv1VlLE2H1xnYuXKzZof7OKCOIIPGCLT' request_token = None urls = { 'request': 'https://api.twitter.com/oauth/request_token', 'access': 'https://api.twitter.com/oauth/access_token', 'authorize': 'https://api.twitter.com/oauth/authorize', } def __init__(self): super(Twitter, self).__init__() addApiView('notify.%s.auth_url' % self.getName().lower(), self.getAuthorizationUrl) addApiView('notify.%s.credentials' % self.getName().lower(), self.getCredentials) def notify(self, message = '', data = None, listener = None): if not data: data = {} api = Api(self.consumer_key, self.consumer_secret, self.conf('access_token_key'), self.conf('access_token_secret')) direct_message = self.conf('direct_message') direct_message_users = self.conf('screen_name') mention = self.conf('mention') mention_tag = None if mention: if direct_message: direct_message_users = '%s %s' % (direct_message_users, mention) direct_message_users = direct_message_users.replace('@', ' ') direct_message_users = direct_message_users.replace(',', ' ') else: mention_tag = '@%s' % mention.lstrip('@') message = '%s %s' % (message, mention_tag) try: if direct_message: for user in direct_message_users.split(): api.PostDirectMessage('[%s] %s' % (self.default_title, message), screen_name = user) else: update_message = '[%s] %s' % (self.default_title, message) if len(update_message) > 140: if mention_tag: api.PostUpdate(update_message[:135 - len(mention_tag)] + ('%s 1/2 ' % mention_tag)) api.PostUpdate(update_message[135 - len(mention_tag):] + ('%s 2/2 ' % mention_tag)) else: api.PostUpdate(update_message[:135] + ' 1/2') api.PostUpdate(update_message[135:] + ' 2/2') else: api.PostUpdate(update_message) except Exception as e: log.error('Error sending tweet: %s', e) return False return True def getAuthorizationUrl(self, host = None, **kwargs): callback_url = cleanHost(host) + '%snotify.%s.credentials/' % (Env.get('api_base').lstrip('/'), self.getName().lower()) oauth_consumer = oauth2.Consumer(self.consumer_key, self.consumer_secret) oauth_client = oauth2.Client(oauth_consumer) resp, content = oauth_client.request(self.urls['request'], 'POST', body = tryUrlencode({'oauth_callback': callback_url})) if resp['status'] != '200': log.error('Invalid response from Twitter requesting temp token: %s', resp['status']) return { 'success': False, } else: self.request_token = dict(parse_qsl(content)) auth_url = self.urls['authorize'] + ("?oauth_token=%s" % self.request_token['oauth_token']) log.info('Redirecting to "%s"', auth_url) return { 'success': True, 'url': auth_url, } def getCredentials(self, oauth_verifier, **kwargs): token = oauth2.Token(self.request_token['oauth_token'], self.request_token['oauth_token_secret']) token.set_verifier(oauth_verifier) oauth_consumer = oauth2.Consumer(key = self.consumer_key, secret = self.consumer_secret) oauth_client = oauth2.Client(oauth_consumer, token) resp, content = oauth_client.request(self.urls['access'], method = 'POST', body = 'oauth_verifier=%s' % oauth_verifier) access_token = dict(parse_qsl(content)) if resp['status'] != '200': log.error('The request for an access token did not succeed: %s', resp['status']) return 'Twitter auth failed' else: log.debug('Tokens: %s, %s', (access_token['oauth_token'], access_token['oauth_token_secret'])) self.conf('access_token_key', value = access_token['oauth_token']) self.conf('access_token_secret', value = access_token['oauth_token_secret']) self.conf('screen_name', value = access_token['screen_name']) self.request_token = None return 'redirect', Env.get('web_base') + 'settings/notifications/'
5,012
Python
.py
92
43.271739
129
0.606873
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,997
scrollspy.js
CouchPotato_CouchPotatoServer/couchpotato/static/scripts/library/scrollspy.js
/* --- description: ScrollSpy authors: - David Walsh (http://davidwalsh.name) license: - MIT-style license requires: core/1.2.1: '*' provides: - ScrollSpy ... */ var ScrollSpy = new Class({ /* implements */ Implements: [Options,Events], /* options */ options: { container: window, max: 0, min: 0, mode: 'vertical'/*, onEnter: $empty, onLeave: $empty, onScroll: $empty, onTick: $empty */ }, /* initialization */ initialize: function(options) { /* set options */ this.setOptions(options); this.container = document.id(this.options.container); this.enters = this.leaves = 0; this.inside = false; /* listener */ var self = this; this.listener = function(e) { /* if it has reached the level */ var position = self.container.getScroll(), xy = position[self.options.mode == 'vertical' ? 'y' : 'x'], min = typeOf(self.options.min) == 'function' ? self.options.min() : self.options.min, max = typeOf(self.options.max) == 'function' ? self.options.max() : self.options.max; if(xy >= min && (max === 0 || xy <= max)) { /* trigger enter event if necessary */ if(!self.inside) { /* record as inside */ self.inside = true; self.enters++; /* fire enter event */ self.fireEvent('enter',[position,self.enters,e]); } /* trigger the "tick", always */ self.fireEvent('tick',[position,self.inside,self.enters,self.leaves,e]); } /* trigger leave */ else if(self.inside){ self.inside = false; self.leaves++; self.fireEvent('leave',[position,self.leaves,e]); } /* fire scroll event */ self.fireEvent('scroll',[position,self.inside,self.enters,self.leaves,e]); }; /* make it happen */ this.addListener(); }, /* starts the listener */ start: function() { this.container.addEvent('scroll',this.listener); }, /* stops the listener */ stop: function() { this.container.removeEvent('scroll',this.listener); }, /* legacy */ addListener: function() { this.start(); } });
2,035
Python
.py
80
22.0625
89
0.638517
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,998
scgi.py
CouchPotato_CouchPotatoServer/libs/rtorrent/lib/xmlrpc/clients/scgi.py
#!/usr/bin/python # rtorrent_xmlrpc # (c) 2011 Roger Que <alerante@bellsouth.net> # # Modified portions: # (c) 2013 Dean Gardiner <gardiner91@gmail.com> # # Python module for interacting with rtorrent's XML-RPC interface # directly over SCGI, instead of through an HTTP server intermediary. # Inspired by Glenn Washburn's xmlrpc2scgi.py [1], but subclasses the # built-in xmlrpclib classes so that it is compatible with features # such as MultiCall objects. # # [1] <http://libtorrent.rakshasa.no/wiki/UtilsXmlrpc2scgi> # # Usage: server = SCGIServerProxy('scgi://localhost:7000/') # server = SCGIServerProxy('scgi:///path/to/scgi.sock') # print server.system.listMethods() # mc = xmlrpclib.MultiCall(server) # mc.get_up_rate() # mc.get_down_rate() # print mc() # # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the # OpenSSL library under certain conditions as described in each # individual source file, and distribute linked combinations # including the two. # # You must obey the GNU General Public License in all respects for # all of the code used other than OpenSSL. If you modify file(s) # with this exception, you may extend this exception to your version # of the file(s), but you are not obligated to do so. If you do not # wish to do so, delete this exception statement from your version. # If you delete this exception statement from all source files in the # program, then also delete it here. # # # # Portions based on Python's xmlrpclib: # # Copyright (c) 1999-2002 by Secret Labs AB # Copyright (c) 1999-2002 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. import urllib import xmlrpclib from rtorrent.lib.xmlrpc.transports.scgi import SCGITransport class SCGIServerProxy(xmlrpclib.ServerProxy): def __init__(self, uri, transport=None, encoding=None, verbose=False, allow_none=False, use_datetime=False): type, uri = urllib.splittype(uri) if type not in ('scgi'): raise IOError('unsupported XML-RPC protocol') self.__host, self.__handler = urllib.splithost(uri) if not self.__handler: self.__handler = '/' if transport is None: transport = SCGITransport(use_datetime=use_datetime) self.__transport = transport self.__encoding = encoding self.__verbose = verbose self.__allow_none = allow_none def __close(self): self.__transport.close() def __request(self, methodname, params): # call a method on the remote server request = xmlrpclib.dumps(params, methodname, encoding=self.__encoding, allow_none=self.__allow_none) response = self.__transport.request( self.__host, self.__handler, request, verbose=self.__verbose ) if len(response) == 1: response = response[0] return response def __repr__(self): return ( "<SCGIServerProxy for %s%s>" % (self.__host, self.__handler) ) __str__ = __repr__ def __getattr__(self, name): # magic method dispatcher return xmlrpclib._Method(self.__request, name) # note: to call a remote object with an non-standard name, use # result getattr(server, "strange-python-name")(args) def __call__(self, attr): """A workaround to get special attributes on the ServerProxy without interfering with the magic __getattr__ """ if attr == "close": return self.__close elif attr == "transport": return self.__transport raise AttributeError("Attribute %r not found" % (attr,))
5,670
Python
.cgi
134
37.865672
79
0.707503
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
7,999
scgi.py
CouchPotato_CouchPotatoServer/libs/rtorrent/lib/xmlrpc/transports/scgi.py
#!/usr/bin/python # rtorrent_xmlrpc # (c) 2011 Roger Que <alerante@bellsouth.net> # # Modified portions: # (c) 2013 Dean Gardiner <gardiner91@gmail.com> # # Python module for interacting with rtorrent's XML-RPC interface # directly over SCGI, instead of through an HTTP server intermediary. # Inspired by Glenn Washburn's xmlrpc2scgi.py [1], but subclasses the # built-in xmlrpclib classes so that it is compatible with features # such as MultiCall objects. # # [1] <http://libtorrent.rakshasa.no/wiki/UtilsXmlrpc2scgi> # # Usage: server = SCGIServerProxy('scgi://localhost:7000/') # server = SCGIServerProxy('scgi:///path/to/scgi.sock') # print server.system.listMethods() # mc = xmlrpclib.MultiCall(server) # mc.get_up_rate() # mc.get_down_rate() # print mc() # # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the # OpenSSL library under certain conditions as described in each # individual source file, and distribute linked combinations # including the two. # # You must obey the GNU General Public License in all respects for # all of the code used other than OpenSSL. If you modify file(s) # with this exception, you may extend this exception to your version # of the file(s), but you are not obligated to do so. If you do not # wish to do so, delete this exception statement from your version. # If you delete this exception statement from all source files in the # program, then also delete it here. # # # # Portions based on Python's xmlrpclib: # # Copyright (c) 1999-2002 by Secret Labs AB # Copyright (c) 1999-2002 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. import errno import httplib import re import socket import urllib import xmlrpclib class SCGITransport(xmlrpclib.Transport): # Added request() from Python 2.7 xmlrpclib here to backport to Python 2.6 def request(self, host, handler, request_body, verbose=0): #retry request once if cached connection has gone cold for i in (0, 1): try: return self.single_request(host, handler, request_body, verbose) except socket.error, e: if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE): raise except httplib.BadStatusLine: #close after we sent request if i: raise def single_request(self, host, handler, request_body, verbose=0): # Add SCGI headers to the request. headers = {'CONTENT_LENGTH': str(len(request_body)), 'SCGI': '1'} header = '\x00'.join(('%s\x00%s' % item for item in headers.iteritems())) + '\x00' header = '%d:%s' % (len(header), header) request_body = '%s,%s' % (header, request_body) sock = None try: if host: host, port = urllib.splitport(host) addrinfo = socket.getaddrinfo(host, int(port), socket.AF_INET, socket.SOCK_STREAM) sock = socket.socket(*addrinfo[0][:3]) sock.connect(addrinfo[0][4]) else: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(handler) self.verbose = verbose sock.send(request_body) return self.parse_response(sock.makefile()) finally: if sock: sock.close() def parse_response(self, response): p, u = self.getparser() response_body = '' while True: data = response.read(1024) if not data: break response_body += data # Remove SCGI headers from the response. response_header, response_body = re.split(r'\n\s*?\n', response_body, maxsplit=1) if self.verbose: print 'body:', repr(response_body) p.feed(response_body) p.close() return u.close()
5,943
Python
.cgi
139
37.064748
91
0.686247
CouchPotato/CouchPotatoServer
3,869
1,214
1,266
GPL-3.0
9/5/2024, 5:10:17 PM (Europe/Amsterdam)