idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
51,400
def parse ( self ) : try : if not os . path . getsize ( self . ns . pathname ) : self . job . LOG . warn ( "Ignoring 0-byte metafile '%s'" % ( self . ns . pathname , ) ) return self . metadata = metafile . checked_open ( self . ns . pathname ) except EnvironmentError as exc : self . job . LOG . error ( "Can't read metafile '%s' (%s)" % ( self . ns . pathname , str ( exc ) . replace ( ": '%s'" % self . ns . pathname , "" ) , ) ) return except ValueError as exc : self . job . LOG . error ( "Invalid metafile '%s': %s" % ( self . ns . pathname , exc ) ) return self . ns . info_hash = metafile . info_hash ( self . metadata ) self . ns . info_name = self . metadata [ "info" ] [ "name" ] self . job . LOG . info ( "Loaded '%s' from metafile '%s'" % ( self . ns . info_name , self . ns . pathname ) ) try : name = self . job . proxy . d . name ( self . ns . info_hash , fail_silently = True ) except xmlrpc . HashNotFound : pass except xmlrpc . ERRORS as exc : if exc . faultString != "Could not find info-hash." : self . job . LOG . error ( "While checking for #%s: %s" % ( self . ns . info_hash , exc ) ) return else : self . job . LOG . warn ( "Item #%s '%s' already added to client" % ( self . ns . info_hash , name ) ) return return True
Parse metafile and check pre - conditions .
51,401
def addinfo ( self ) : self . ns . watch_path = self . job . config . path self . ns . relpath = None for watch in self . job . config . path : if self . ns . pathname . startswith ( watch . rstrip ( '/' ) + '/' ) : self . ns . relpath = os . path . dirname ( self . ns . pathname ) [ len ( watch . rstrip ( '/' ) ) + 1 : ] break flags = self . ns . pathname . split ( os . sep ) flags . extend ( flags [ - 1 ] . split ( '.' ) ) self . ns . flags = set ( i for i in flags if i ) announce = self . metadata . get ( "announce" , None ) if announce : self . ns . tracker_alias = configuration . map_announce2alias ( announce ) main_file = self . ns . info_name if "files" in self . metadata [ "info" ] : main_file = list ( sorted ( ( i [ "length" ] , i [ "path" ] [ - 1 ] ) for i in self . metadata [ "info" ] [ "files" ] ) ) [ - 1 ] [ 1 ] self . ns . filetype = os . path . splitext ( main_file ) [ 1 ] kind , info = traits . name_trait ( self . ns . info_name , add_info = True ) self . ns . traits = Bunch ( info ) self . ns . traits . kind = kind self . ns . label = '/' . join ( traits . detect_traits ( name = self . ns . info_name , alias = self . ns . tracker_alias , filetype = self . ns . filetype ) ) . strip ( '/' ) self . ns . commands = [ ] for key , cmd in sorted ( self . job . custom_cmds . items ( ) ) : try : self . ns . commands . append ( formatting . expand_template ( cmd , self . ns ) ) except error . LoggableError as exc : self . job . LOG . error ( "While expanding '%s' custom command: %s" % ( key , exc ) )
Add known facts to templating namespace .
51,402
def load ( self ) : if not self . ns . info_hash and not self . parse ( ) : return self . addinfo ( ) try : start_it = self . job . config . load_mode . lower ( ) in ( "start" , "started" ) queue_it = self . job . config . queued if "start" in self . ns . flags : start_it = True elif "load" in self . ns . flags : start_it = False if "queue" in self . ns . flags : queue_it = True load_cmd = self . job . proxy . load . verbose if queue_it : if not start_it : self . ns . commands . append ( "d.priority.set=0" ) elif start_it : load_cmd = self . job . proxy . load . start_verbose self . job . LOG . debug ( "Templating values are:\n %s" % "\n " . join ( "%s=%s" % ( key , repr ( val ) ) for key , val in sorted ( self . ns . items ( ) ) ) ) load_cmd ( xmlrpc . NOHASH , self . ns . pathname , * tuple ( self . ns . commands ) ) time . sleep ( .05 ) if not self . job . config . quiet : msg = "%s: Loaded '%s' from '%s/'%s%s" % ( self . job . __class__ . __name__ , fmt . to_utf8 ( self . job . proxy . d . name ( self . ns . info_hash , fail_silently = True ) ) , os . path . dirname ( self . ns . pathname ) . rstrip ( os . sep ) , " [queued]" if queue_it else "" , ( " [startable]" if queue_it else " [started]" ) if start_it else " [normal]" , ) self . job . proxy . log ( xmlrpc . NOHASH , msg ) except xmlrpc . ERRORS as exc : self . job . LOG . error ( "While loading #%s: %s" % ( self . ns . info_hash , exc ) )
Load metafile into client .
51,403
def handle_path ( self , event ) : self . job . LOG . debug ( "Notification %r" % event ) if event . dir : return if any ( event . pathname . endswith ( i ) for i in self . METAFILE_EXT ) : MetafileHandler ( self . job , event . pathname ) . handle ( ) elif os . path . basename ( event . pathname ) == "watch.ini" : self . job . LOG . info ( "NOT YET Reloading watch config for '%s'" % event . path )
Handle a path - related event .
51,404
def setup ( self ) : if not pyinotify . WatchManager : raise error . UserError ( "You need to install 'pyinotify' to use %s (%s)!" % ( self . __class__ . __name__ , pyinotify . _import_error ) ) self . manager = pyinotify . WatchManager ( ) self . handler = TreeWatchHandler ( job = self ) self . notifier = pyinotify . AsyncNotifier ( self . manager , self . handler ) if self . LOG . isEnabledFor ( logging . DEBUG ) : mask = pyinotify . ALL_EVENTS else : mask = pyinotify . IN_CLOSE_WRITE | pyinotify . IN_MOVED_TO for path in self . config . path : self . manager . add_watch ( path . strip ( ) , mask , rec = True , auto_add = True )
Set up inotify manager .
51,405
def get_filetypes ( filelist , path = None , size = os . path . getsize ) : path = path or ( lambda _ : _ ) histo = defaultdict ( int ) for entry in filelist : ext = os . path . splitext ( path ( entry ) ) [ 1 ] . lstrip ( '.' ) . lower ( ) if ext and ext [ 0 ] == 'r' and ext [ 1 : ] . isdigit ( ) : ext = "rar" elif ext == "jpeg" : ext = "jpg" elif ext == "mpeg" : ext = "mpg" histo [ ext ] += size ( entry ) total = sum ( histo . values ( ) ) if total : for ext , val in histo . items ( ) : histo [ ext ] = int ( val * 100.0 / total + .499 ) return sorted ( zip ( histo . values ( ) , histo . keys ( ) ) , reverse = True )
Get a sorted list of file types and their weight in percent from an iterable of file names .
51,406
def name_trait ( name , add_info = False ) : kind , info = None , { } if name and not name . startswith ( "VTS_" ) : lower_name = name . lower ( ) trait_patterns = ( ( "tv" , TV_PATTERNS , "show" ) , ( "movie" , MOVIE_PATTERNS , "title" ) ) if any ( i in lower_name for i in _DEFINITELY_TV ) : kind = "tv" trait_patterns = trait_patterns [ : 1 ] re_name = '.' . join ( [ i . lstrip ( '[(' ) . rstrip ( ')]' ) for i in name . split ( ' .' ) ] ) for trait , patterns , title_group in trait_patterns : matched , patname = None , None for patname , pattern in patterns : matched = pattern . match ( re_name ) if matched and not any ( i in matched . groupdict ( ) [ title_group ] . lower ( ) for i in BAD_TITLE_WORDS ) : kind , info = trait , matched . groupdict ( ) break if matched : info [ "pattern" ] = patname for key , val in list ( info . items ( ) ) : if key [ - 1 ] . isdigit ( ) : del info [ key ] if val : key = re . sub ( "[0-9]+$" , "" , key ) info [ key ] = ( "%s %s" % ( info . get ( key ) or "" , val ) ) . strip ( ) break return ( kind , info ) if add_info else kind
Determine content type from name .
51,407
def detect_traits ( name = None , alias = None , filetype = None ) : result = [ ] if filetype : filetype = filetype . lstrip ( '.' ) theme = config . traits_by_alias . get ( alias ) if alias and theme : result = [ theme , filetype or "other" ] elif filetype in KIND_AUDIO : result = [ "audio" , filetype ] elif filetype in KIND_VIDEO : result = [ "video" , filetype ] contents = name_trait ( name ) if contents : result = [ contents , filetype ] elif filetype in KIND_IMAGE : result = [ "img" , filetype ] elif filetype in KIND_DOCS : result = [ "docs" , filetype ] elif filetype in KIND_ARCHIVE : result = [ "misc" , filetype ] contents = name_trait ( name ) if contents : result = [ contents , filetype ] return result
Build traits list from passed attributes .
51,408
def console_progress ( ) : def progress ( totalhashed , totalsize ) : "Helper" msg = " " * 30 if totalhashed < totalsize : msg = "%5.1f%% complete" % ( totalhashed * 100.0 / totalsize ) sys . stdout . write ( msg + " \r" ) sys . stdout . flush ( ) try : return progress if sys . stdout . isatty ( ) else None except AttributeError : return None
Return a progress indicator for consoles if stdout is a tty .
51,409
def check_info ( info ) : if not isinstance ( info , dict ) : raise ValueError ( "bad metainfo - not a dictionary" ) pieces = info . get ( "pieces" ) if not isinstance ( pieces , basestring ) or len ( pieces ) % 20 != 0 : raise ValueError ( "bad metainfo - bad pieces key" ) piece_size = info . get ( "piece length" ) if not isinstance ( piece_size , ( int , long ) ) or piece_size <= 0 : raise ValueError ( "bad metainfo - illegal piece length" ) name = info . get ( "name" ) if not isinstance ( name , basestring ) : raise ValueError ( "bad metainfo - bad name (type is %r)" % type ( name ) . __name__ ) if not ALLOWED_ROOT_NAME . match ( name ) : raise ValueError ( "name %s disallowed for security reasons" % name ) if ( "files" in info ) == ( "length" in info ) : raise ValueError ( "single/multiple file mix" ) if "length" in info : length = info . get ( "length" ) if not isinstance ( length , ( int , long ) ) or length < 0 : raise ValueError ( "bad metainfo - bad length" ) else : files = info . get ( "files" ) if not isinstance ( files , ( list , tuple ) ) : raise ValueError ( "bad metainfo - bad file list" ) for item in files : if not isinstance ( item , dict ) : raise ValueError ( "bad metainfo - bad file value" ) length = item . get ( "length" ) if not isinstance ( length , ( int , long ) ) or length < 0 : raise ValueError ( "bad metainfo - bad length" ) path = item . get ( "path" ) if not isinstance ( path , ( list , tuple ) ) or not path : raise ValueError ( "bad metainfo - bad path" ) for part in path : if not isinstance ( part , basestring ) : raise ValueError ( "bad metainfo - bad path dir" ) part = fmt . to_unicode ( part ) if part == '..' : raise ValueError ( "relative path in %s disallowed for security reasons" % '/' . join ( path ) ) if part and not ALLOWED_PATH_NAME . match ( part ) : raise ValueError ( "path %s disallowed for security reasons" % part ) file_paths = [ os . sep . join ( item [ "path" ] ) for item in files ] if len ( set ( file_paths ) ) != len ( file_paths ) : raise ValueError ( "bad metainfo - duplicate path" ) return info
Validate info dict .
51,410
def check_meta ( meta ) : if not isinstance ( meta , dict ) : raise ValueError ( "bad metadata - not a dictionary" ) if not isinstance ( meta . get ( "announce" ) , basestring ) : raise ValueError ( "bad announce URL - not a string" ) check_info ( meta . get ( "info" ) ) return meta
Validate meta dict .
51,411
def clean_meta ( meta , including_info = False , logger = None ) : modified = set ( ) for key in meta . keys ( ) : if [ key ] not in METAFILE_STD_KEYS : if logger : logger ( "Removing key %r..." % ( key , ) ) del meta [ key ] modified . add ( key ) if including_info : for key in meta [ "info" ] . keys ( ) : if [ "info" , key ] not in METAFILE_STD_KEYS : if logger : logger ( "Removing key %r..." % ( "info." + key , ) ) del meta [ "info" ] [ key ] modified . add ( "info." + key ) for idx , entry in enumerate ( meta [ "info" ] . get ( "files" , [ ] ) ) : for key in entry . keys ( ) : if [ "info" , "files" , key ] not in METAFILE_STD_KEYS : if logger : logger ( "Removing key %r from file #%d..." % ( key , idx + 1 ) ) del entry [ key ] modified . add ( "info.files." + key ) entry [ "path" ] = [ i for i in entry [ "path" ] if i ] return modified
Clean meta dict . Optionally log changes using the given logger .
51,412
def sanitize ( meta , diagnostics = False ) : bad_encodings , bad_fields = set ( ) , set ( ) def sane_encoding ( field , text ) : "Transcoding helper." for encoding in ( 'utf-8' , meta . get ( 'encoding' , None ) , 'cp1252' ) : if encoding : try : u8_text = text . decode ( encoding ) . encode ( "utf-8" ) if encoding != 'utf-8' : bad_encodings . add ( encoding ) bad_fields . add ( field ) return u8_text except UnicodeError : continue else : bad_encodings . add ( 'UNKNOWN/EXOTIC' ) bad_fields . add ( field ) return str ( text , 'utf-8' , 'replace' ) . replace ( '\ufffd' , '_' ) . encode ( "utf-8" ) for field in ( "comment" , "created by" ) : if field in meta : meta [ field ] = sane_encoding ( field , meta [ field ] ) meta [ "info" ] [ "name" ] = sane_encoding ( 'info name' , meta [ "info" ] [ "name" ] ) for entry in meta [ "info" ] . get ( "files" , [ ] ) : entry [ "path" ] = [ sane_encoding ( 'file path' , i ) for i in entry [ "path" ] ] return ( meta , bad_encodings , bad_fields ) if diagnostics else meta
Try to fix common problems especially transcode non - standard string encodings .
51,413
def add_fast_resume ( meta , datapath ) : files = meta [ "info" ] . get ( "files" , None ) single = files is None if single : if os . path . isdir ( datapath ) : datapath = os . path . join ( datapath , meta [ "info" ] [ "name" ] ) files = [ Bunch ( path = [ os . path . abspath ( datapath ) ] , length = meta [ "info" ] [ "length" ] , ) ] resume = meta . setdefault ( "libtorrent_resume" , { } ) resume [ "bitfield" ] = len ( meta [ "info" ] [ "pieces" ] ) // 20 resume [ "files" ] = [ ] piece_length = meta [ "info" ] [ "piece length" ] offset = 0 for fileinfo in files : filepath = os . sep . join ( fileinfo [ "path" ] ) if not single : filepath = os . path . join ( datapath , filepath . strip ( os . sep ) ) if os . path . getsize ( filepath ) != fileinfo [ "length" ] : raise OSError ( errno . EINVAL , "File size mismatch for %r [is %d, expected %d]" % ( filepath , os . path . getsize ( filepath ) , fileinfo [ "length" ] , ) ) resume [ "files" ] . append ( dict ( priority = 1 , mtime = int ( os . path . getmtime ( filepath ) ) , completed = ( offset + fileinfo [ "length" ] + piece_length - 1 ) // piece_length - offset // piece_length , ) ) offset += fileinfo [ "length" ] return meta
Add fast resume data to a metafile dict .
51,414
def data_size ( metadata ) : info = metadata [ 'info' ] if 'length' in info : total_size = info [ 'length' ] else : total_size = sum ( [ f [ 'length' ] for f in info [ 'files' ] ] ) return total_size
Calculate the size of a torrent based on parsed metadata .
51,415
def checked_open ( filename , log = None , quiet = False ) : with open ( filename , "rb" ) as handle : raw_data = handle . read ( ) data = bencode . bdecode ( raw_data ) try : check_meta ( data ) if raw_data != bencode . bencode ( data ) : raise ValueError ( "Bad bencoded data - dict keys out of order?" ) except ValueError as exc : if log : if not quiet : log . warn ( "%s: %s" % ( filename , exc ) ) else : raise return data
Open and validate the given metafile . Optionally provide diagnostics on the passed logger for invalid metafiles which then just cause a warning but no exception . quiet can supress that warning .
51,416
def format ( self , obj , context , maxlevels , level ) : if isinstance ( obj , basestring ) and "://" in fmt . to_unicode ( obj ) : obj = mask_keys ( obj ) return pprint . PrettyPrinter . format ( self , obj , context , maxlevels , level )
Mask obj if it looks like an URL then pass it to the super class .
51,417
def _get_datapath ( self ) : if self . _datapath is None : raise OSError ( errno . ENOENT , "You didn't provide any datapath for %r" % self . filename ) return self . _datapath
Get a valid datapath else raise an exception .
51,418
def _set_datapath ( self , datapath ) : if datapath : self . _datapath = datapath . rstrip ( os . sep ) self . _fifo = int ( stat . S_ISFIFO ( os . stat ( self . datapath ) . st_mode ) ) else : self . _datapath = None self . _fifo = False
Set a datapath .
51,419
def walk ( self ) : if self . _fifo : if self . _fifo > 1 : raise RuntimeError ( "INTERNAL ERROR: FIFO read twice!" ) self . _fifo += 1 with open ( self . datapath , "r" ) as fifo : while True : relpath = fifo . readline ( ) . rstrip ( '\n' ) if not relpath : break self . LOG . debug ( "Read relative path %r from FIFO..." % ( relpath , ) ) yield os . path . join ( os . path . dirname ( self . datapath ) , relpath ) self . LOG . debug ( "FIFO %r closed!" % ( self . datapath , ) ) elif os . path . isdir ( self . datapath ) : for dirpath , dirnames , filenames in os . walk ( self . datapath ) : for bad in dirnames [ : ] : if any ( fnmatch . fnmatch ( bad , pattern ) for pattern in self . ignore ) : dirnames . remove ( bad ) for filename in filenames : if not any ( fnmatch . fnmatch ( filename , pattern ) for pattern in self . ignore ) : yield os . path . join ( dirpath , filename ) else : yield self . datapath
Generate paths in self . datapath .
51,420
def _calc_size ( self ) : return sum ( os . path . getsize ( filename ) for filename in self . walk ( ) )
Get total size of self . datapath .
51,421
def _make_info ( self , piece_size , progress , walker , piece_callback = None ) : file_list = [ ] pieces = [ ] hashing_secs = time . time ( ) totalsize = - 1 if self . _fifo else self . _calc_size ( ) totalhashed = 0 sha1sum = hashlib . sha1 ( ) done = 0 filename = None for filename in walker : filesize = os . path . getsize ( filename ) filepath = filename [ len ( os . path . dirname ( self . datapath ) if self . _fifo else self . datapath ) : ] . lstrip ( os . sep ) file_list . append ( { "length" : filesize , "path" : [ fmt . to_utf8 ( x ) for x in fmt . to_unicode ( filepath ) . replace ( os . sep , '/' ) . split ( '/' ) ] , } ) self . LOG . debug ( "Hashing %r, size %d..." % ( filename , filesize ) ) fileoffset = 0 handle = open ( filename , "rb" ) try : while fileoffset < filesize : chunk = handle . read ( min ( filesize - fileoffset , piece_size - done ) ) sha1sum . update ( chunk ) done += len ( chunk ) fileoffset += len ( chunk ) totalhashed += len ( chunk ) if done == piece_size : pieces . append ( sha1sum . digest ( ) ) if piece_callback : piece_callback ( filename , pieces [ - 1 ] ) sha1sum = hashlib . sha1 ( ) done = 0 if progress : progress ( totalhashed , totalsize ) finally : handle . close ( ) if done > 0 : pieces . append ( sha1sum . digest ( ) ) if piece_callback : piece_callback ( filename , pieces [ - 1 ] ) metainfo = { "pieces" : b"" . join ( pieces ) , "piece length" : piece_size , "name" : os . path . basename ( self . datapath ) , } if self . _fifo or os . path . isdir ( self . datapath ) : metainfo [ "files" ] = file_list else : metainfo [ "length" ] = totalhashed hashing_secs = time . time ( ) - hashing_secs self . LOG . info ( "Hashing of %s took %.1f secs (%s/s)" % ( fmt . human_size ( totalhashed ) . strip ( ) , hashing_secs , fmt . human_size ( totalhashed / hashing_secs ) . strip ( ) , ) ) return check_info ( metainfo ) , totalhashed
Create info dict .
51,422
def _make_meta ( self , tracker_url , root_name , private , progress ) : if self . _fifo : piece_size_exp = 20 else : total_size = self . _calc_size ( ) if total_size : piece_size_exp = int ( math . log ( total_size ) / math . log ( 2 ) ) - 9 else : piece_size_exp = 0 piece_size_exp = min ( max ( 15 , piece_size_exp ) , 24 ) piece_size = 2 ** piece_size_exp info , totalhashed = self . _make_info ( piece_size , progress , self . walk ( ) if self . _fifo else sorted ( self . walk ( ) ) ) info [ "x_cross_seed" ] = hashlib . md5 ( tracker_url ) . hexdigest ( ) if private : info [ "private" ] = 1 if root_name : info [ "name" ] = root_name meta = { "info" : info , "announce" : tracker_url . strip ( ) , } return check_meta ( meta ) , totalhashed
Create torrent dict .
51,423
def check ( self , metainfo , datapath , progress = None ) : if datapath : self . datapath = datapath def check_piece ( filename , piece ) : "Callback for new piece" if piece != metainfo [ "info" ] [ "pieces" ] [ check_piece . piece_index : check_piece . piece_index + 20 ] : self . LOG . warn ( "Piece #%d: Hashes differ in file %r" % ( check_piece . piece_index // 20 , filename ) ) check_piece . piece_index += 20 check_piece . piece_index = 0 datameta , _ = self . _make_info ( int ( metainfo [ "info" ] [ "piece length" ] ) , progress , [ datapath ] if "length" in metainfo [ "info" ] else ( os . path . join ( * ( [ datapath ] + i [ "path" ] ) ) for i in metainfo [ "info" ] [ "files" ] ) , piece_callback = check_piece ) return datameta [ "pieces" ] == metainfo [ "info" ] [ "pieces" ]
Check piece hashes of a metafile against the given datapath .
51,424
def _start ( self , items ) : startable = [ i for i in items if self . config . startable . match ( i ) ] if not startable : self . LOG . debug ( "Checked %d item(s), none startable according to [ %s ]" , len ( items ) , self . config . startable ) return now = time . time ( ) if now < self . last_start : self . last_start = now delayed = int ( self . last_start + self . config . intermission - now ) if delayed > 0 : self . LOG . debug ( "Delaying start of {:d} item(s)," " due to {:d}s intermission with {:d}s left" . format ( len ( startable ) , self . config . intermission , delayed ) ) return downloading = [ i for i in items if self . config . downloading . match ( i ) ] start_now = max ( self . config . start_at_once , self . config . downloading_min - len ( downloading ) ) start_now = min ( start_now , len ( startable ) ) for idx , item in enumerate ( startable ) : if idx >= start_now : self . LOG . debug ( "Only starting %d item(s) in this run, %d more could be downloading" % ( start_now , len ( startable ) - idx , ) ) break if len ( downloading ) < self . config . downloading_min : self . LOG . debug ( "Catching up from %d to a minimum of %d downloading item(s)" % ( len ( downloading ) , self . config . downloading_min ) ) else : if len ( downloading ) >= self . config . downloading_max : self . LOG . debug ( "Already downloading %d item(s) out of %d max, %d more could be downloading" % ( len ( downloading ) , self . config . downloading_max , len ( startable ) - idx , ) ) break self . last_start = now downloading . append ( item ) self . LOG . info ( "%s '%s' [%s, #%s]" % ( "WOULD start" if self . config . dry_run else "Starting" , fmt . to_utf8 ( item . name ) , item . alias , item . hash ) ) if not self . config . dry_run : item . start ( ) if not self . config . quiet : self . proxy . log ( xmlrpc . NOHASH , "%s: Started '%s' {%s}" % ( self . __class__ . __name__ , fmt . to_utf8 ( item . name ) , item . alias , ) )
Start some items if conditions are met .
51,425
def run ( self ) : try : self . proxy = config_ini . engine . open ( ) items = list ( config_ini . engine . items ( self . VIEWNAME , cache = False ) ) if self . sort_key : items . sort ( key = self . sort_key ) self . _start ( items ) self . LOG . debug ( "%s - %s" % ( config_ini . engine . engine_id , self . proxy ) ) except ( error . LoggableError , xmlrpc . ERRORS ) as exc : self . LOG . debug ( str ( exc ) )
Queue manager job callback .
51,426
def print_help_fields ( ) : def custom_manifold ( ) : "named rTorrent custom attribute, e.g. 'custom_completion_target'" return ( "custom_KEY" , custom_manifold ) def kind_manifold ( ) : "file types that contribute at least N% to the item's total size" return ( "kind_N" , kind_manifold ) print ( '' ) print ( "Fields are:" ) print ( "\n" . join ( [ " %-21s %s" % ( name , field . __doc__ ) for name , field in sorted ( engine . FieldDefinition . FIELDS . items ( ) + [ custom_manifold ( ) , kind_manifold ( ) , ] ) ] ) ) print ( '' ) print ( "Format specifiers are:" ) print ( "\n" . join ( [ " %-21s %s" % ( name , doc ) for name , doc in sorted ( formatting . OutputMapping . formatter_help ( ) ) ] ) ) print ( '' ) print ( "Append format specifiers using a '.' to field names in '-o' lists,\n" "e.g. 'size.sz' or 'completed.raw.delta'." )
Print help about fields and field formatters .
51,427
def add ( self , field , val ) : "Add a sample" if engine . FieldDefinition . FIELDS [ field ] . _matcher is matching . TimeFilter : val = self . _basetime - val try : self . total [ field ] += val self . min [ field ] = min ( self . min [ field ] , val ) if field in self . min else val self . max [ field ] = max ( self . max [ field ] , val ) except ( ValueError , TypeError ) : self . errors [ field ] += 1
Add a sample
51,428
def help_completion_fields ( self ) : for name , field in sorted ( engine . FieldDefinition . FIELDS . items ( ) ) : if issubclass ( field . _matcher , matching . BoolFilter ) : yield "%s=no" % ( name , ) yield "%s=yes" % ( name , ) continue elif issubclass ( field . _matcher , matching . PatternFilter ) : yield "%s=" % ( name , ) yield "%s=/" % ( name , ) yield "%s=?" % ( name , ) yield "%s=\"'*'\"" % ( name , ) continue elif issubclass ( field . _matcher , matching . NumericFilterBase ) : for i in range ( 10 ) : yield "%s=%d" % ( name , i ) else : yield "%s=" % ( name , ) yield r"%s=+" % ( name , ) yield r"%s=-" % ( name , ) yield "custom_" yield "kind_"
Return valid field names .
51,429
def format_item ( self , item , defaults = None , stencil = None ) : from pyrobase . osutil import shell_escape try : item_text = fmt . to_console ( formatting . format_item ( self . options . output_format , item , defaults ) ) except ( NameError , ValueError , TypeError ) , exc : self . fatal ( "Trouble with formatting item %r\n\n FORMAT = %r\n\n REASON =" % ( item , self . options . output_format ) , exc ) raise if self . options . shell : item_text = '\t' . join ( shell_escape ( i ) for i in item_text . split ( '\t' ) ) if stencil : item_text = '\t' . join ( i . ljust ( len ( s ) ) for i , s in zip ( item_text . split ( '\t' ) , stencil ) ) return item_text
Format an item .
51,430
def emit ( self , item , defaults = None , stencil = None , to_log = False , item_formatter = None ) : item_text = self . format_item ( item , defaults , stencil ) if item_formatter : item_text = item_formatter ( item_text ) if item is None and os . isatty ( sys . stdout . fileno ( ) ) : item_text = '' . join ( ( config . output_header_ecma48 , item_text , "\x1B[0m" ) ) if to_log : if callable ( to_log ) : to_log ( item_text ) else : self . LOG . info ( item_text ) elif self . options . nul : sys . stdout . write ( item_text + '\0' ) sys . stdout . flush ( ) else : print ( item_text ) return item_text . count ( '\n' ) + 1
Print an item to stdout or the log on INFO level .
51,431
def validate_output_format ( self , default_format ) : output_format = self . options . output_format if output_format is None : output_format = default_format output_format = config . formats . get ( output_format , output_format ) if re . match ( r"^[,._0-9a-zA-Z]+$" , output_format ) : self . plain_output_format = True output_format = "%%(%s)s" % ")s\t%(" . join ( formatting . validate_field_list ( output_format , allow_fmt_specs = True ) ) output_format = ( output_format . replace ( r"\\" , "\\" ) . replace ( r"\n" , "\n" ) . replace ( r"\t" , "\t" ) . replace ( r"\$" , "\0" ) . replace ( "$(" , "%(" ) . replace ( "\0" , "$" ) . replace ( r"\ " , " " ) ) self . options . output_format = formatting . preparse ( output_format )
Prepare output format for later use .
51,432
def get_output_fields ( self ) : emit_fields = list ( i . lower ( ) for i in re . sub ( r"[^_A-Z]+" , ' ' , self . format_item ( None ) ) . split ( ) ) result = [ ] for name in emit_fields [ : ] : if name not in engine . FieldDefinition . FIELDS : self . LOG . warn ( "Omitted unknown name '%s' from statistics and output format sorting" % name ) else : result . append ( name ) return result
Get field names from output template .
51,433
def validate_sort_fields ( self ) : sort_fields = ',' . join ( self . options . sort_fields ) if sort_fields == '*' : sort_fields = self . get_output_fields ( ) return formatting . validate_sort_fields ( sort_fields or config . sort_fields )
Take care of sorting .
51,434
def show_in_view ( self , sourceview , matches , targetname = None ) : append = self . options . append_view or self . options . alter_view == 'append' remove = self . options . alter_view == 'remove' action_name = ', appending to' if append else ', removing from' if remove else ' into' targetname = config . engine . show ( matches , targetname or self . options . to_view or "rtcontrol" , append = append , disjoin = remove ) msg = "Filtered %d out of %d torrents using [ %s ]" % ( len ( matches ) , sourceview . size ( ) , sourceview . matcher ) self . LOG . info ( "%s%s rTorrent view %r." % ( msg , action_name , targetname ) ) config . engine . log ( msg )
Show search result in ncurses view .
51,435
def heatmap ( self , df , imagefile ) : import seaborn as sns import matplotlib . ticker as tkr import matplotlib . pyplot as plt from matplotlib . colors import LinearSegmentedColormap sns . set ( ) with sns . axes_style ( 'whitegrid' ) : fig , ax = plt . subplots ( figsize = ( 5 , 11 ) ) cmax = max ( df [ self . args [ 2 ] ] . max ( ) , self . CMAP_MIN_MAX ) csteps = { 0.0 : 'darkred' , 0.3 / cmax : 'red' , 0.6 / cmax : 'orangered' , 0.9 / cmax : 'coral' , 1.0 / cmax : 'skyblue' , 1.5 / cmax : 'blue' , 1.9 / cmax : 'darkblue' , 2.0 / cmax : 'darkgreen' , 3.0 / cmax : 'green' , ( self . CMAP_MIN_MAX - .1 ) / cmax : 'palegreen' , 1.0 : 'yellow' } cmap = LinearSegmentedColormap . from_list ( 'RdGrYl' , sorted ( csteps . items ( ) ) , N = 256 ) dataset = df . pivot ( * self . args ) sns . heatmap ( dataset , mask = dataset . isnull ( ) , annot = False , linewidths = .5 , square = True , ax = ax , cmap = cmap , annot_kws = dict ( stretch = 'condensed' ) ) ax . tick_params ( axis = 'y' , labelrotation = 30 , labelsize = 8 ) plt . savefig ( imagefile )
Create the heat map .
51,436
def mainloop ( self ) : proxy = config . engine . open ( ) views = [ x for x in sorted ( proxy . view . list ( ) ) if x . startswith ( self . PREFIX ) ] current_view = real_current_view = proxy . ui . current_view ( ) if current_view not in views : if views : current_view = views [ 0 ] else : raise error . UserError ( "There are no '{}*' views defined at all!" . format ( self . PREFIX ) ) if self . options . list : for name in sorted ( views ) : print ( "{} {:5d} {}" . format ( '*' if name == real_current_view else ' ' , proxy . view . size ( xmlrpc . NOHASH , name ) , name [ self . PREFIX_LEN : ] ) ) elif self . options . next or self . options . prev or self . options . update : if self . options . update : new_view = current_view else : new_view = ( views * 2 ) [ views . index ( current_view ) + ( 1 if self . options . next else - 1 ) ] self . LOG . info ( "{} category view '{}'." . format ( "Updating" if self . options . update else "Switching to" , new_view ) ) proxy . pyro . category . update ( xmlrpc . NOHASH , new_view [ self . PREFIX_LEN : ] ) proxy . ui . current_view . set ( new_view ) else : self . LOG . info ( "Current category view is '{}'." . format ( current_view [ self . PREFIX_LEN : ] ) ) self . LOG . info ( "Use '--help' to get usage information." )
Manage category views .
51,437
def _custom_fields ( ) : import os from pyrocore . torrent import engine , matching from pyrocore . util import fmt def has_room ( obj ) : "Check disk space." pathname = obj . path if pathname and not os . path . exists ( pathname ) : pathname = os . path . dirname ( pathname ) if pathname and os . path . exists ( pathname ) : stats = os . statvfs ( pathname ) return ( stats . f_bavail * stats . f_frsize - int ( diskspace_threshold_mb ) * 1024 ** 2 > obj . size * ( 1.0 - obj . done / 100.0 ) ) else : return None yield engine . DynamicField ( engine . untyped , "has_room" , "check whether the download will fit on its target device" , matcher = matching . BoolFilter , accessor = has_room , formatter = lambda val : "OK" if val else "??" if val is None else "NO" ) globals ( ) . setdefault ( "diskspace_threshold_mb" , "500" )
Yield custom field definitions .
51,438
def engine_data ( engine ) : views = ( "default" , "main" , "started" , "stopped" , "complete" , "incomplete" , "seeding" , "leeching" , "active" , "messages" ) methods = [ "throttle.global_up.rate" , "throttle.global_up.max_rate" , "throttle.global_down.rate" , "throttle.global_down.max_rate" , ] proxy = engine . open ( ) calls = [ dict ( methodName = method , params = [ ] ) for method in methods ] + [ dict ( methodName = "view.size" , params = [ '' , view ] ) for view in views ] result = proxy . system . multicall ( calls , flatten = True ) data = dict ( now = time . time ( ) , engine_id = engine . engine_id , versions = engine . versions , uptime = engine . uptime , upload = [ result [ 0 ] , result [ 1 ] ] , download = [ result [ 2 ] , result [ 3 ] ] , views = dict ( [ ( name , result [ 4 + i ] ) for i , name in enumerate ( views ) ] ) , ) return data
Get important performance data and metadata from rTorrent .
51,439
def _write_pidfile ( pidfile ) : pid = str ( os . getpid ( ) ) handle = open ( pidfile , 'w' ) try : handle . write ( "%s\n" % pid ) finally : handle . close ( )
Write file with current process ID .
51,440
def guard ( pidfile , guardfile = None ) : if guardfile and not os . path . exists ( guardfile ) : raise EnvironmentError ( "Guard file '%s' not found, won't start!" % guardfile ) if os . path . exists ( pidfile ) : running , pid = check_process ( pidfile ) if running : raise EnvironmentError ( "Daemon process #%d still running, won't start!" % pid ) else : logging . getLogger ( "daemonize" ) . info ( "Process #%d disappeared, continuing..." % pid ) _write_pidfile ( pidfile )
Raise an EnvironmentError when the guardfile doesn t exist or the process with the ID found in pidfile is still active .
51,441
def daemonize ( pidfile = None , logfile = None , sync = True ) : log = logging . getLogger ( "daemonize" ) ppid = os . getpid ( ) try : pid = os . fork ( ) if pid > 0 : log . debug ( "Parent exiting (PID %d, CHILD %d)" % ( ppid , pid ) ) sys . exit ( 0 ) except OSError as exc : log . critical ( "fork #1 failed (PID %d): (%d) %s\n" % ( os . getpid ( ) , exc . errno , exc . strerror ) ) sys . exit ( 1 ) os . setsid ( ) try : pid = os . fork ( ) if pid > 0 : log . debug ( "Session leader exiting (PID %d, PPID %d, DEMON %d)" % ( os . getpid ( ) , ppid , pid ) ) sys . exit ( 0 ) except OSError as exc : log . critical ( "fork #2 failed (PID %d): (%d) %s\n" % ( os . getpid ( ) , exc . errno , exc . strerror ) ) sys . exit ( 1 ) if pidfile : _write_pidfile ( pidfile ) def sig_term ( * dummy ) : "Handler for SIGTERM." sys . exit ( 0 ) stdin = open ( "/dev/null" , "r" ) os . dup2 ( stdin . fileno ( ) , sys . stdin . fileno ( ) ) signal . signal ( signal . SIGTERM , sig_term ) if logfile : try : logfile + "" except TypeError : if logfile . fileno ( ) != sys . stdout . fileno ( ) : os . dup2 ( logfile . fileno ( ) , sys . stdout . fileno ( ) ) if logfile . fileno ( ) != sys . stderr . fileno ( ) : os . dup2 ( logfile . fileno ( ) , sys . stderr . fileno ( ) ) else : log . debug ( "Redirecting stdout / stderr to %r" % logfile ) loghandle = open ( logfile , "a+" ) os . dup2 ( loghandle . fileno ( ) , sys . stdout . fileno ( ) ) os . dup2 ( loghandle . fileno ( ) , sys . stderr . fileno ( ) ) loghandle . close ( ) if sync : polling = 5 , .01 for _ in range ( int ( polling [ 0 ] * 1 / polling [ 1 ] ) ) : try : os . kill ( ppid , 0 ) except OSError : break else : time . sleep ( polling [ 1 ] ) log . debug ( "Process detached (PID %d)" % os . getpid ( ) )
Fork the process into the background .
51,442
def flatten ( nested , containers = ( list , tuple ) ) : flat = list ( nested ) i = 0 while i < len ( flat ) : while isinstance ( flat [ i ] , containers ) : if not flat [ i ] : flat . pop ( i ) i -= 1 break else : flat [ i : i + 1 ] = ( flat [ i ] ) i += 1 return flat
Flatten a nested list in - place and return it .
51,443
def gendocs ( ) : "create some doc pages automatically" helppage = path ( "docs/references-cli-usage.rst" ) content = [ ".. automatically generated using 'paver gendocs'." , "" , ".. contents::" , " :local:" , "" , ".. note::" , "" , " The help output presented here applies to version ``%s`` of the tools." % sh ( "pyroadmin --version" , capture = True ) . split ( ) [ 1 ] , "" , ] for tool in sorted ( project . entry_points [ "console_scripts" ] ) : tool , _ = tool . split ( None , 1 ) content . extend ( [ ".. _cli-usage-%s:" % tool , "" , tool , '^' * len ( tool ) , "" , "::" , "" , ] ) help_opt = "--help-fields --config-dir /tmp" if tool == "rtcontrol" else "--help" help_txt = sh ( "%s -q %s" % ( tool , help_opt ) , capture = True , ignore_error = True ) . splitlines ( ) content . extend ( ' ' + i for i in help_txt if ' on Python ' not in i and 'Copyright (c) 200' not in i and 'see the full documentation' not in i and ' https://pyrocore.readthedocs.io/' not in i ) content . extend ( [ "" , ] ) content = [ line . rstrip ( ) for line in content if all ( i not in line for i in ( ", Copyright (c) " , "Total time: " , "Configuration file '/tmp/" ) ) ] content = [ line for line , succ in zip ( content , content [ 1 : ] + [ '' ] ) if line or succ ] helppage . write_lines ( content )
create some doc pages automatically
51,444
def watchdog_pid ( ) : result = sh ( 'netstat -tulpn 2>/dev/null | grep 127.0.0.1:{:d}' . format ( SPHINX_AUTOBUILD_PORT ) , capture = True , ignore_error = True ) pid = result . strip ( ) pid = pid . split ( ) [ - 1 ] if pid else None pid = pid . split ( '/' , 1 ) [ 0 ] if pid and pid != '-' else None return pid
Get watchdog PID via netstat .
51,445
def autodocs ( ) : "create Sphinx docs locally, and start a watchdog" build_dir = path ( 'docs/_build' ) index_html = build_dir / 'html/index.html' if build_dir . exists ( ) : build_dir . rmtree ( ) with pushd ( "docs" ) : print "\n*** Generating API doc ***\n" sh ( "sphinx-apidoc -o apidoc -f -T -M ../src/pyrocore" ) sh ( "sphinx-apidoc -o apidoc -f -T -M $(dirname $(python -c 'import tempita; print(tempita.__file__)'))" ) print "\n*** Generating HTML doc ***\n" sh ( 'nohup %s/Makefile SPHINXBUILD="sphinx-autobuild -p %d' ' -i \'.*\' -i \'*.log\' -i \'*.png\' -i \'*.txt\'" html >autobuild.log 2>&1 &' % ( os . getcwd ( ) , SPHINX_AUTOBUILD_PORT ) ) for i in range ( 25 ) : time . sleep ( 2.5 ) pid = watchdog_pid ( ) if pid : sh ( "touch docs/index.rst" ) sh ( 'ps {}' . format ( pid ) ) url = 'http://localhost:{port:d}/' . format ( port = SPHINX_AUTOBUILD_PORT ) print ( "\n*** Open '{}' in your browser..." . format ( url ) ) break
create Sphinx docs locally and start a watchdog
51,446
def stopdocs ( ) : "stop Sphinx watchdog" for i in range ( 4 ) : pid = watchdog_pid ( ) if pid : if not i : sh ( 'ps {}' . format ( pid ) ) sh ( 'kill {}' . format ( pid ) ) time . sleep ( .5 ) else : break
stop Sphinx watchdog
51,447
def coverage ( ) : "generate coverage report and show in browser" coverage_index = path ( "build/coverage/index.html" ) coverage_index . remove ( ) sh ( "paver test" ) coverage_index . exists ( ) and webbrowser . open ( coverage_index )
generate coverage report and show in browser
51,448
def lookup_announce_alias ( name ) : for alias , urls in announce . items ( ) : if alias . lower ( ) == name . lower ( ) : return alias , urls raise KeyError ( "Unknown alias %s" % ( name , ) )
Get canonical alias name and announce URL list for the given alias .
51,449
def map_announce2alias ( url ) : import urlparse for alias , urls in announce . items ( ) : if any ( i == url for i in urls ) : return alias parts = urlparse . urlparse ( url ) server = urlparse . urlunparse ( ( parts . scheme , parts . netloc , "/" , None , None , None ) ) for alias , urls in announce . items ( ) : if any ( i . startswith ( server ) for i in urls ) : return alias try : return '.' . join ( parts . netloc . split ( ':' ) [ 0 ] . split ( '.' ) [ - 2 : ] ) except IndexError : return parts . netloc
Get tracker alias for announce URL and if none is defined the 2nd level domain .
51,450
def validate ( key , val ) : if val and val . startswith ( "~/" ) : return os . path . expanduser ( val ) if key == "output_header_frequency" : return int ( val , 10 ) if key . endswith ( "_ecma48" ) : return eval ( "'%s'" % val . replace ( "'" , r"\'" ) ) return val
Validate a configuration value .
51,451
def _update_config ( self , namespace ) : for key , val in namespace . items ( ) : setattr ( config , key , val )
Inject the items from the given dict into the configuration .
51,452
def _interpolation_escape ( self , namespace ) : for key , val in namespace . items ( ) : if '%' in val : namespace [ key ] = self . INTERPOLATION_ESCAPE . sub ( lambda match : '%' + match . group ( 0 ) , val )
Re - escape interpolation strings .
51,453
def _validate_namespace ( self , namespace ) : self . _update_config ( namespace ) for key , val in namespace [ "announce" ] . items ( ) : if isinstance ( val , basestring ) : namespace [ "announce" ] [ key ] = val . split ( ) self . _interpolation_escape ( namespace [ "formats" ] ) for factory in ( "engine" , ) : if isinstance ( namespace [ factory ] , basestring ) : namespace [ factory ] = pymagic . import_name ( namespace [ factory ] ) ( ) if namespace [ factory ] else None for key in namespace : if key . endswith ( "_list" ) and isinstance ( namespace [ key ] , basestring ) : namespace [ key ] = [ i . strip ( ) for i in namespace [ key ] . replace ( ',' , ' ' ) . split ( ) ] elif any ( key . endswith ( i ) for i in ( "_factories" , "_callbacks" ) ) and isinstance ( namespace [ key ] , basestring ) : namespace [ key ] = [ pymagic . import_name ( i . strip ( ) ) for i in namespace [ key ] . replace ( ',' , ' ' ) . split ( ) ] self . _update_config ( namespace )
Validate the given namespace . This method is idempotent!
51,454
def _set_from_ini ( self , namespace , ini_file ) : global_vars = dict ( ( key , val ) for key , val in namespace . items ( ) if isinstance ( val , basestring ) ) for section in ini_file . sections ( ) : if section == "GLOBAL" : raw_vars = global_vars else : raw_vars = namespace . setdefault ( section . lower ( ) , { } ) raw_vars . update ( dict ( ini_file . items ( section , raw = True ) ) ) if section == "FORMATS" : self . _interpolation_escape ( raw_vars ) raw_vars . update ( dict ( ( key , validate ( key , val ) ) for key , val in ini_file . items ( section , vars = raw_vars ) ) ) namespace . update ( global_vars )
Copy values from loaded INI file to namespace .
51,455
def _set_defaults ( self , namespace , optional_cfg_files ) : namespace [ "config_dir" ] = self . config_dir for idx , cfg_file in enumerate ( [ self . CONFIG_INI ] + optional_cfg_files ) : if any ( i in cfg_file for i in set ( '/' + os . sep ) ) : continue try : defaults = pymagic . resource_string ( "pyrocore" , "data/config/" + cfg_file ) except IOError as exc : if idx and exc . errno == errno . ENOENT : continue raise ini_file = ConfigParser . SafeConfigParser ( ) ini_file . optionxform = str ini_file . readfp ( StringIO . StringIO ( defaults ) , "<defaults>" ) self . _set_from_ini ( namespace , ini_file )
Set default values in the given dict .
51,456
def _load_ini ( self , namespace , config_file ) : self . LOG . debug ( "Loading %r..." % ( config_file , ) ) ini_file = ConfigParser . SafeConfigParser ( ) ini_file . optionxform = str if ini_file . read ( config_file ) : self . _set_from_ini ( namespace , ini_file ) else : self . LOG . warning ( "Configuration file %r not found," " use the command 'pyroadmin --create-config' to create it!" % ( config_file , ) )
Load INI style configuration .
51,457
def _load_py ( self , namespace , config_file ) : if config_file and os . path . isfile ( config_file ) : self . LOG . debug ( "Loading %r..." % ( config_file , ) ) exec ( compile ( open ( config_file ) . read ( ) , config_file , 'exec' ) , vars ( config ) , namespace ) else : self . LOG . warning ( "Configuration file %r not found!" % ( config_file , ) )
Load scripted configuration .
51,458
def load ( self , optional_cfg_files = None ) : optional_cfg_files = optional_cfg_files or [ ] if self . _loaded : raise RuntimeError ( "INTERNAL ERROR: Attempt to load configuration twice!" ) try : namespace = { } self . _set_defaults ( namespace , optional_cfg_files ) self . _load_ini ( namespace , os . path . join ( self . config_dir , self . CONFIG_INI ) ) for cfg_file in optional_cfg_files : if not os . path . isabs ( cfg_file ) : cfg_file = os . path . join ( self . config_dir , cfg_file ) if os . path . exists ( cfg_file ) : self . _load_ini ( namespace , cfg_file ) self . _validate_namespace ( namespace ) self . _load_py ( namespace , namespace [ "config_script" ] ) self . _validate_namespace ( namespace ) for callback in namespace [ "config_validator_callbacks" ] : callback ( ) except ConfigParser . ParsingError as exc : raise error . UserError ( exc ) self . _loaded = True
Actually load the configuation from either the default location or the given directory .
51,459
def create ( self , remove_all_rc_files = False ) : if os . path . exists ( self . config_dir ) : self . LOG . debug ( "Configuration directory %r already exists!" % ( self . config_dir , ) ) else : os . mkdir ( self . config_dir ) if remove_all_rc_files : for subdir in ( '.' , 'rtorrent.d' ) : config_files = list ( glob . glob ( os . path . join ( os . path . abspath ( self . config_dir ) , subdir , '*.rc' ) ) ) config_files += list ( glob . glob ( os . path . join ( os . path . abspath ( self . config_dir ) , subdir , '*.rc.default' ) ) ) for config_file in config_files : self . LOG . info ( "Removing %r!" % ( config_file , ) ) os . remove ( config_file ) for filepath in sorted ( walk_resources ( "pyrocore" , "data/config" ) ) : text = pymagic . resource_string ( "pyrocore" , "data/config" + filepath ) config_file = self . config_dir + filepath if not os . path . exists ( os . path . dirname ( config_file ) ) : os . makedirs ( os . path . dirname ( config_file ) ) config_trail = [ ".default" ] if os . path . exists ( config_file ) : self . LOG . debug ( "Configuration file %r already exists!" % ( config_file , ) ) else : config_trail . append ( '' ) for i in config_trail : with open ( config_file + i , "w" ) as handle : handle . write ( text ) self . LOG . info ( "Configuration file %r written!" % ( config_file + i , ) )
Create default configuration files at either the default location or the given directory .
51,460
def make_magnet_meta ( self , magnet_uri ) : import cgi import hashlib if magnet_uri . startswith ( "magnet:" ) : magnet_uri = magnet_uri [ 7 : ] meta = { "magnet-uri" : "magnet:" + magnet_uri } magnet_params = cgi . parse_qs ( magnet_uri . lstrip ( '?' ) ) meta_name = magnet_params . get ( "xt" , [ hashlib . sha1 ( magnet_uri ) . hexdigest ( ) ] ) [ 0 ] if "dn" in magnet_params : meta_name = "%s-%s" % ( magnet_params [ "dn" ] [ 0 ] , meta_name ) meta_name = re . sub ( r"[^-_,a-zA-Z0-9]+" , '.' , meta_name ) . strip ( '.' ) . replace ( "urn.btih." , "" ) if not config . magnet_watch : self . fatal ( "You MUST set the 'magnet_watch' config option!" ) meta_path = os . path . join ( config . magnet_watch , "magnet-%s.torrent" % meta_name ) self . LOG . debug ( "Writing magnet-uri metafile %r..." % ( meta_path , ) ) try : bencode . bwrite ( meta_path , meta ) except EnvironmentError as exc : self . fatal ( "Error writing magnet-uri metafile %r (%s)" % ( meta_path , exc , ) ) raise
Create a magnet - uri torrent .
51,461
def get_class_logger ( obj ) : return logging . getLogger ( obj . __class__ . __module__ + '.' + obj . __class__ . __name__ )
Get a logger specific for the given object s class .
51,462
def default ( self , o ) : if isinstance ( o , set ) : return list ( sorted ( o ) ) elif hasattr ( o , 'as_dict' ) : return o . as_dict ( ) else : return super ( JSONEncoder , self ) . default ( o )
Support more object types .
51,463
def fmt_sz ( intval ) : try : return fmt . human_size ( intval ) except ( ValueError , TypeError ) : return "N/A" . rjust ( len ( fmt . human_size ( 0 ) ) )
Format a byte sized value .
51,464
def fmt_iso ( timestamp ) : try : return fmt . iso_datetime ( timestamp ) except ( ValueError , TypeError ) : return "N/A" . rjust ( len ( fmt . iso_datetime ( 0 ) ) )
Format a UNIX timestamp to an ISO datetime string .
51,465
def fmt_duration ( duration ) : try : return fmt . human_duration ( float ( duration ) , 0 , 2 , True ) except ( ValueError , TypeError ) : return "N/A" . rjust ( len ( fmt . human_duration ( 0 , 0 , 2 , True ) ) )
Format a duration value in seconds to a readable form .
51,466
def fmt_subst ( regex , subst ) : return lambda text : re . sub ( regex , subst , text ) if text else text
Replace regex with string .
51,467
def preparse ( output_format ) : try : return templating . preparse ( output_format , lambda path : os . path . join ( config . config_dir , "templates" , path ) ) except ImportError as exc : if "tempita" in str ( exc ) : raise error . UserError ( "To be able to use Tempita templates, install the 'tempita' package (%s)\n" " Possibly USING THE FOLLOWING COMMAND:\n" " %s/easy_install tempita" % ( exc , os . path . dirname ( sys . executable ) ) ) raise except IOError as exc : raise error . LoggableError ( "Cannot read template: {}" . format ( exc ) )
Do any special processing of a template and return the result .
51,468
def validate_field_list ( fields , allow_fmt_specs = False , name_filter = None ) : formats = [ i [ 4 : ] for i in globals ( ) if i . startswith ( "fmt_" ) ] try : fields = [ i . strip ( ) for i in fields . replace ( ',' , ' ' ) . split ( ) ] except AttributeError : pass if name_filter : fields = [ name_filter ( name ) for name in fields ] for name in fields : if allow_fmt_specs and '.' in name : fullname = name name , fmtspecs = name . split ( '.' , 1 ) for fmtspec in fmtspecs . split ( '.' ) : if fmtspec not in formats and fmtspec != "raw" : raise error . UserError ( "Unknown format specification %r in %r" % ( fmtspec , fullname ) ) if name not in engine . FieldDefinition . FIELDS and not engine . TorrentProxy . add_manifold_attribute ( name ) : raise error . UserError ( "Unknown field name %r" % ( name , ) ) return fields
Make sure the fields in the given list exist .
51,469
def validate_sort_fields ( sort_fields ) : descending = set ( ) def sort_order_filter ( name ) : "Helper to remove flag and memoize sort order" if name . startswith ( '-' ) : name = name [ 1 : ] descending . add ( name ) return name sort_fields = validate_field_list ( sort_fields , name_filter = sort_order_filter ) log . debug ( "Sorting order is: %s" % ", " . join ( [ ( '-' if i in descending else '' ) + i for i in sort_fields ] ) ) if not descending : return operator . attrgetter ( * tuple ( sort_fields ) ) class Key ( object ) : "Complex sort order key" def __init__ ( self , obj , * args ) : "Remember object to be compared" self . obj = obj def __lt__ ( self , other ) : "Compare to other key" for field in sort_fields : lhs , rhs = getattr ( self . obj , field ) , getattr ( other . obj , field ) if lhs == rhs : continue return rhs < lhs if field in descending else lhs < rhs return False return Key
Make sure the fields in the given list exist and return sorting key .
51,470
def formatter_help ( cls ) : result = [ ( "raw" , "Switch off the default field formatter." ) ] for name , method in globals ( ) . items ( ) : if name . startswith ( "fmt_" ) : result . append ( ( name [ 4 : ] , method . __doc__ . strip ( ) ) ) return result
Return a list of format specifiers and their documentation .
51,471
def timeparse ( sval , granularity = 'seconds' ) : match = COMPILED_SIGN . match ( sval ) sign = - 1 if match . groupdict ( ) [ 'sign' ] == '-' else 1 sval = match . groupdict ( ) [ 'unsigned' ] for timefmt in COMPILED_TIMEFORMATS : match = timefmt . match ( sval ) if match and match . group ( 0 ) . strip ( ) : mdict = match . groupdict ( ) if granularity == 'minutes' : mdict = _interpret_as_minutes ( sval , mdict ) if all ( v . isdigit ( ) for v in list ( mdict . values ( ) ) if v ) : return sign * sum ( [ MULTIPLIERS [ k ] * int ( v , 10 ) for ( k , v ) in list ( mdict . items ( ) ) if v is not None ] ) elif ( 'secs' not in mdict or mdict [ 'secs' ] is None or mdict [ 'secs' ] . isdigit ( ) ) : return ( sign * int ( sum ( [ MULTIPLIERS [ k ] * float ( v ) for ( k , v ) in list ( mdict . items ( ) ) if k != 'secs' and v is not None ] ) ) + ( int ( mdict [ 'secs' ] , 10 ) if mdict [ 'secs' ] else 0 ) ) else : return sign * sum ( [ MULTIPLIERS [ k ] * float ( v ) for ( k , v ) in list ( mdict . items ( ) ) if v is not None ] )
Parse a time expression returning it as a number of seconds . If possible the return value will be an int ; if this is not possible the return will be a float . Returns None if a time expression cannot be parsed from the given string .
51,472
def get_client ( project_id = None , credentials = None , service_url = None , service_account = None , private_key = None , private_key_file = None , json_key = None , json_key_file = None , readonly = True , swallow_results = True , num_retries = 0 ) : if not credentials : assert ( service_account and ( private_key or private_key_file ) ) or ( json_key or json_key_file ) , 'Must provide AssertionCredentials or service account and P12 key\ or JSON key' if not project_id : assert json_key or json_key_file , 'Must provide project_id unless json_key or json_key_file is\ provided' if service_url is None : service_url = DISCOVERY_URI scope = BIGQUERY_SCOPE_READ_ONLY if readonly else BIGQUERY_SCOPE if private_key_file : credentials = _credentials ( ) . from_p12_keyfile ( service_account , private_key_file , scopes = scope ) if private_key : try : if isinstance ( private_key , basestring ) : private_key = private_key . decode ( 'utf-8' ) except NameError : pass credentials = _credentials ( ) . from_p12_keyfile_buffer ( service_account , StringIO ( private_key ) , scopes = scope ) if json_key_file : with open ( json_key_file , 'r' ) as key_file : json_key = json . load ( key_file ) if json_key : credentials = _credentials ( ) . from_json_keyfile_dict ( json_key , scopes = scope ) if not project_id : project_id = json_key [ 'project_id' ] bq_service = _get_bq_service ( credentials = credentials , service_url = service_url ) return BigQueryClient ( bq_service , project_id , swallow_results , num_retries )
Return a singleton instance of BigQueryClient . Either AssertionCredentials or a service account and private key combination need to be provided in order to authenticate requests to BigQuery .
51,473
def get_projects ( bq_service ) : projects_request = bq_service . projects ( ) . list ( ) . execute ( ) projects = [ ] for project in projects_request . get ( 'projects' , [ ] ) : project_data = { 'id' : project [ 'id' ] , 'name' : project [ 'friendlyName' ] } projects . append ( project_data ) return projects
Given the BigQuery service return data about all projects .
51,474
def _get_bq_service ( credentials = None , service_url = None ) : assert credentials , 'Must provide ServiceAccountCredentials' http = credentials . authorize ( Http ( ) ) service = build ( 'bigquery' , 'v2' , http = http , discoveryServiceUrl = service_url , cache_discovery = False ) return service
Construct an authorized BigQuery service object .
51,475
def _submit_query_job ( self , query_data ) : logger . debug ( 'Submitting query job: %s' % query_data ) job_collection = self . bigquery . jobs ( ) try : query_reply = job_collection . query ( projectId = self . project_id , body = query_data ) . execute ( num_retries = self . num_retries ) except HttpError as e : if query_data . get ( "dryRun" , False ) : return None , json . loads ( e . content . decode ( 'utf8' ) ) raise job_id = query_reply [ 'jobReference' ] . get ( 'jobId' ) schema = query_reply . get ( 'schema' , { 'fields' : None } ) [ 'fields' ] rows = query_reply . get ( 'rows' , [ ] ) job_complete = query_reply . get ( 'jobComplete' , False ) if not job_complete and query_data . get ( "timeoutMs" , False ) : logger . error ( 'BigQuery job %s timeout' % job_id ) raise BigQueryTimeoutException ( ) return job_id , [ self . _transform_row ( row , schema ) for row in rows ]
Submit a query job to BigQuery .
51,476
def _insert_job ( self , body_object ) : logger . debug ( 'Submitting job: %s' % body_object ) job_collection = self . bigquery . jobs ( ) return job_collection . insert ( projectId = self . project_id , body = body_object ) . execute ( num_retries = self . num_retries )
Submit a job to BigQuery
51,477
def query ( self , query , max_results = None , timeout = 0 , dry_run = False , use_legacy_sql = None , external_udf_uris = None ) : logger . debug ( 'Executing query: %s' % query ) query_data = { 'query' : query , 'timeoutMs' : timeout * 1000 , 'dryRun' : dry_run , 'maxResults' : max_results } if use_legacy_sql is not None : query_data [ 'useLegacySql' ] = use_legacy_sql if external_udf_uris : query_data [ 'userDefinedFunctionResources' ] = [ { 'resourceUri' : u } for u in external_udf_uris ] return self . _submit_query_job ( query_data )
Submit a query to BigQuery .
51,478
def get_query_schema ( self , job_id ) : query_reply = self . get_query_results ( job_id , offset = 0 , limit = 0 ) if not query_reply [ 'jobComplete' ] : logger . warning ( 'BigQuery job %s not complete' % job_id ) raise UnfinishedQueryException ( ) return query_reply [ 'schema' ] [ 'fields' ]
Retrieve the schema of a query by job id .
51,479
def get_table_schema ( self , dataset , table , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : result = self . bigquery . tables ( ) . get ( projectId = project_id , tableId = table , datasetId = dataset ) . execute ( num_retries = self . num_retries ) except HttpError as e : if int ( e . resp [ 'status' ] ) == 404 : logger . warn ( 'Table %s.%s does not exist' , dataset , table ) return None raise return result [ 'schema' ] [ 'fields' ]
Return the table schema .
51,480
def check_job ( self , job_id ) : query_reply = self . get_query_results ( job_id , offset = 0 , limit = 0 ) return ( query_reply . get ( 'jobComplete' , False ) , int ( query_reply . get ( 'totalRows' , 0 ) ) )
Return the state and number of results of a query by job id .
51,481
def get_query_rows ( self , job_id , offset = None , limit = None , timeout = 0 ) : query_reply = self . get_query_results ( job_id , offset = offset , limit = limit , timeout = timeout ) if not query_reply [ 'jobComplete' ] : logger . warning ( 'BigQuery job %s not complete' % job_id ) raise UnfinishedQueryException ( ) schema = query_reply [ "schema" ] [ "fields" ] rows = query_reply . get ( 'rows' , [ ] ) page_token = query_reply . get ( "pageToken" ) records = [ self . _transform_row ( row , schema ) for row in rows ] while page_token and ( not limit or len ( records ) < limit ) : query_reply = self . get_query_results ( job_id , offset = offset , limit = limit , page_token = page_token , timeout = timeout ) page_token = query_reply . get ( "pageToken" ) rows = query_reply . get ( 'rows' , [ ] ) records += [ self . _transform_row ( row , schema ) for row in rows ] return records [ : limit ] if limit else records
Retrieve a list of rows from a query table by job id . This method will append results from multiple pages together . If you want to manually page through results you can use get_query_results method directly .
51,482
def check_dataset ( self , dataset_id , project_id = None ) : dataset = self . get_dataset ( dataset_id , project_id ) return bool ( dataset )
Check to see if a dataset exists .
51,483
def get_dataset ( self , dataset_id , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : dataset = self . bigquery . datasets ( ) . get ( projectId = project_id , datasetId = dataset_id ) . execute ( num_retries = self . num_retries ) except HttpError : dataset = { } return dataset
Retrieve a dataset if it exists otherwise return an empty dict .
51,484
def get_table ( self , dataset , table , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : table = self . bigquery . tables ( ) . get ( projectId = project_id , datasetId = dataset , tableId = table ) . execute ( num_retries = self . num_retries ) except HttpError : table = { } return table
Retrieve a table if it exists otherwise return an empty dict .
51,485
def create_table ( self , dataset , table , schema , expiration_time = None , time_partitioning = False , project_id = None ) : project_id = self . _get_project_id ( project_id ) body = { 'schema' : { 'fields' : schema } , 'tableReference' : { 'tableId' : table , 'projectId' : project_id , 'datasetId' : dataset } } if expiration_time is not None : body [ 'expirationTime' ] = expiration_time if time_partitioning : body [ 'timePartitioning' ] = { 'type' : 'DAY' } try : table = self . bigquery . tables ( ) . insert ( projectId = project_id , datasetId = dataset , body = body ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return table except HttpError as e : logger . error ( ( 'Cannot create table {0}.{1}.{2}\n' 'Http Error: {3}' ) . format ( project_id , dataset , table , e . content ) ) if self . swallow_results : return False else : return { }
Create a new table in the dataset .
51,486
def patch_table ( self , dataset , table , schema , project_id = None ) : project_id = self . _get_project_id ( project_id ) body = { 'schema' : { 'fields' : schema } , 'tableReference' : { 'tableId' : table , 'projectId' : project_id , 'datasetId' : dataset } } try : result = self . bigquery . tables ( ) . patch ( projectId = project_id , datasetId = dataset , body = body ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return result except HttpError as e : logger . error ( ( 'Cannot patch table {0}.{1}.{2}\n' 'Http Error: {3}' ) . format ( project_id , dataset , table , e . content ) ) if self . swallow_results : return False else : return { }
Patch an existing table in the dataset .
51,487
def create_view ( self , dataset , view , query , use_legacy_sql = None , project_id = None ) : project_id = self . _get_project_id ( project_id ) body = { 'tableReference' : { 'tableId' : view , 'projectId' : project_id , 'datasetId' : dataset } , 'view' : { 'query' : query } } if use_legacy_sql is not None : body [ 'view' ] [ 'useLegacySql' ] = use_legacy_sql try : view = self . bigquery . tables ( ) . insert ( projectId = project_id , datasetId = dataset , body = body ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return view except HttpError as e : logger . error ( ( 'Cannot create view {0}.{1}\n' 'Http Error: {2}' ) . format ( dataset , view , e . content ) ) if self . swallow_results : return False else : return { }
Create a new view in the dataset .
51,488
def delete_table ( self , dataset , table , project_id = None ) : project_id = self . _get_project_id ( project_id ) try : response = self . bigquery . tables ( ) . delete ( projectId = project_id , datasetId = dataset , tableId = table ) . execute ( num_retries = self . num_retries ) if self . swallow_results : return True else : return response except HttpError as e : logger . error ( ( 'Cannot delete table {0}.{1}\n' 'Http Error: {2}' ) . format ( dataset , table , e . content ) ) if self . swallow_results : return False else : return { }
Delete a table from the dataset .
51,489
def get_tables ( self , dataset_id , app_id , start_time , end_time , project_id = None ) : if isinstance ( start_time , datetime ) : start_time = calendar . timegm ( start_time . utctimetuple ( ) ) if isinstance ( end_time , datetime ) : end_time = calendar . timegm ( end_time . utctimetuple ( ) ) every_table = self . _get_all_tables ( dataset_id , project_id ) app_tables = every_table . get ( app_id , { } ) return self . _filter_tables_by_time ( app_tables , start_time , end_time )
Retrieve a list of tables that are related to the given app id and are inside the range of start and end times .
51,490
def wait_for_job ( self , job , interval = 5 , timeout = 60 ) : complete = False job_id = str ( job if isinstance ( job , ( six . binary_type , six . text_type , int ) ) else job [ 'jobReference' ] [ 'jobId' ] ) job_resource = None start_time = time ( ) elapsed_time = 0 while not ( complete or elapsed_time > timeout ) : sleep ( interval ) request = self . bigquery . jobs ( ) . get ( projectId = self . project_id , jobId = job_id ) job_resource = request . execute ( num_retries = self . num_retries ) self . _raise_executing_exception_if_error ( job_resource ) complete = job_resource . get ( 'status' ) . get ( 'state' ) == u'DONE' elapsed_time = time ( ) - start_time if not complete : logger . error ( 'BigQuery job %s timeout' % job_id ) raise BigQueryTimeoutException ( ) return job_resource
Waits until the job indicated by job_resource is done or has failed
51,491
def push_rows ( self , dataset , table , rows , insert_id_key = None , skip_invalid_rows = None , ignore_unknown_values = None , template_suffix = None , project_id = None ) : project_id = self . _get_project_id ( project_id ) table_data = self . bigquery . tabledata ( ) rows_data = [ ] for row in rows : each_row = { } each_row [ "json" ] = row if insert_id_key is not None : keys = insert_id_key . split ( '.' ) val = reduce ( lambda d , key : d . get ( key ) if d else None , keys , row ) if val is not None : each_row [ "insertId" ] = val rows_data . append ( each_row ) data = { "kind" : "bigquery#tableDataInsertAllRequest" , "rows" : rows_data } if skip_invalid_rows is not None : data [ 'skipInvalidRows' ] = skip_invalid_rows if ignore_unknown_values is not None : data [ 'ignoreUnknownValues' ] = ignore_unknown_values if template_suffix is not None : data [ 'templateSuffix' ] = template_suffix try : response = table_data . insertAll ( projectId = project_id , datasetId = dataset , tableId = table , body = data ) . execute ( num_retries = self . num_retries ) if response . get ( 'insertErrors' ) : logger . error ( 'BigQuery insert errors: %s' % response ) if self . swallow_results : return False else : return response if self . swallow_results : return True else : return response except HttpError as e : logger . exception ( 'Problem with BigQuery insertAll' ) if self . swallow_results : return False else : return { 'insertErrors' : [ { 'errors' : [ { 'reason' : 'httperror' , 'message' : e } ] } ] }
Upload rows to BigQuery table .
51,492
def get_all_tables ( self , dataset_id , project_id = None ) : tables_data = self . _get_all_tables_for_dataset ( dataset_id , project_id ) tables = [ ] for table in tables_data . get ( 'tables' , [ ] ) : table_name = table . get ( 'tableReference' , { } ) . get ( 'tableId' ) if table_name : tables . append ( table_name ) return tables
Retrieve a list of tables for the dataset .
51,493
def _get_all_tables_for_dataset ( self , dataset_id , project_id = None ) : project_id = self . _get_project_id ( project_id ) result = self . bigquery . tables ( ) . list ( projectId = project_id , datasetId = dataset_id ) . execute ( num_retries = self . num_retries ) page_token = result . get ( 'nextPageToken' ) while page_token : res = self . bigquery . tables ( ) . list ( projectId = project_id , datasetId = dataset_id , pageToken = page_token ) . execute ( num_retries = self . num_retries ) page_token = res . get ( 'nextPageToken' ) result [ 'tables' ] += res . get ( 'tables' , [ ] ) return result
Retrieve a list of all tables for the dataset .
51,494
def _parse_table_list_response ( self , list_response ) : tables = defaultdict ( dict ) for table in list_response . get ( 'tables' , [ ] ) : table_ref = table . get ( 'tableReference' ) if not table_ref : continue table_id = table_ref . get ( 'tableId' , '' ) year_month , app_id = self . _parse_table_name ( table_id ) if not year_month : continue table_date = datetime . strptime ( year_month , '%Y-%m' ) unix_seconds = calendar . timegm ( table_date . timetuple ( ) ) tables [ app_id ] . update ( { table_id : unix_seconds } ) tables . default_factory = None return tables
Parse the response received from calling list on tables .
51,495
def _parse_table_name ( self , table_id ) : attributes = table_id . split ( '_' ) year_month = "-" . join ( attributes [ : 2 ] ) app_id = "-" . join ( attributes [ 2 : ] ) if year_month . count ( "-" ) == 1 and all ( [ num . isdigit ( ) for num in year_month . split ( '-' ) ] ) : return year_month , app_id attributes = table_id . split ( '_' ) year_month = "-" . join ( attributes [ - 2 : ] ) app_id = "-" . join ( attributes [ : - 2 ] ) if year_month . count ( "-" ) == 1 and all ( [ num . isdigit ( ) for num in year_month . split ( '-' ) ] ) and len ( year_month ) == 7 : return year_month , app_id return None , None
Parse a table name in the form of appid_YYYY_MM or YYYY_MM_appid and return a tuple consisting of YYYY - MM and the app id .
51,496
def _filter_tables_by_time ( self , tables , start_time , end_time ) : return [ table_name for ( table_name , unix_seconds ) in tables . items ( ) if self . _in_range ( start_time , end_time , unix_seconds ) ]
Filter a table dictionary and return table names based on the range of start and end times in unix seconds .
51,497
def _in_range ( self , start_time , end_time , time ) : ONE_MONTH = 2764800 return start_time <= time <= end_time or time <= start_time <= time + ONE_MONTH or time <= end_time <= time + ONE_MONTH
Indicate if the given time falls inside of the given range .
51,498
def _transform_row ( self , row , schema ) : log = { } for index , col_dict in enumerate ( schema ) : col_name = col_dict [ 'name' ] row_value = row [ 'f' ] [ index ] [ 'v' ] if row_value is None : log [ col_name ] = None continue if col_dict [ 'type' ] == 'RECORD' : row_value = self . _recurse_on_row ( col_dict , row_value ) elif col_dict [ 'type' ] == 'INTEGER' : row_value = int ( row_value ) elif col_dict [ 'type' ] == 'FLOAT' : row_value = float ( row_value ) elif col_dict [ 'type' ] == 'BOOLEAN' : row_value = row_value in ( 'True' , 'true' , 'TRUE' ) elif col_dict [ 'type' ] == 'TIMESTAMP' : row_value = float ( row_value ) log [ col_name ] = row_value return log
Apply the given schema to the given BigQuery data row .
51,499
def _recurse_on_row ( self , col_dict , nested_value ) : row_value = None if col_dict [ 'mode' ] == 'REPEATED' and isinstance ( nested_value , list ) : row_value = [ self . _transform_row ( record [ 'v' ] , col_dict [ 'fields' ] ) for record in nested_value ] else : row_value = self . _transform_row ( nested_value , col_dict [ 'fields' ] ) return row_value
Apply the schema specified by the given dict to the nested value by recursing on it .