idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
52,400 | def exclude_sources ( exclude , section = False ) : newlist = sources . copy ( ) if not isinstance ( exclude , list ) : exclude = [ exclude ] for source in exclude : if not section : newlist . remove ( source ) else : pos = newlist . index ( source ) + 1 if pos == len ( sources ) : return [ ] newlist = sources [ pos : ] return newlist | Returns a narrower list of sources . |
52,401 | def get_lyrics ( song , l_sources = None ) : if l_sources is None : l_sources = sources if song . lyrics and not CONFIG [ 'overwrite' ] : logger . debug ( '%s already has embedded lyrics' , song ) return None runtimes = { } source = None for l_source in l_sources : start = time . time ( ) try : lyrics = l_source ( song ) except ( HTTPError , HTTPException , URLError , ConnectionError ) : lyrics = '' runtimes [ l_source ] = time . time ( ) - start if lyrics != '' : source = l_source break if lyrics != '' : logger . info ( '++ %s: Found lyrics for %s\n' , source . __name__ , song ) song . lyrics = lyrics else : logger . info ( "Couldn't find lyrics for %s\n" , song ) source = None return Result ( song , source , runtimes ) | Searches for lyrics of a single song and returns a Result object with the various stats collected in the process . |
52,402 | def get_lyrics_threaded ( song , l_sources = None ) : if l_sources is None : l_sources = sources if song . lyrics and not CONFIG [ 'overwrite' ] : logger . debug ( '%s already has embedded lyrics' , song ) return None runtimes = { } queue = Queue ( ) pool = [ LyrThread ( source , song , queue ) for source in l_sources ] for thread in pool : thread . start ( ) for _ in range ( len ( pool ) ) : result = queue . get ( ) runtimes [ result [ 'source' ] ] = result [ 'runtime' ] if result [ 'lyrics' ] : break if result [ 'lyrics' ] : song . lyrics = result [ 'lyrics' ] source = result [ 'source' ] else : source = None return Result ( song , source , runtimes ) | Launches a pool of threads to search for the lyrics of a single song . |
52,403 | def run ( songs ) : if not hasattr ( songs , '__iter__' ) : result = get_lyrics_threaded ( songs ) process_result ( result ) else : start = time . time ( ) stats = run_mp ( songs ) end = time . time ( ) if CONFIG [ 'print_stats' ] : stats . print_stats ( ) total_time = end - start total_time = '%d:%02d:%02d' % ( total_time / 3600 , ( total_time / 3600 ) / 60 , ( total_time % 3600 ) % 60 ) print ( f'Total time: {total_time}' ) | Calls get_lyrics_threaded for a song or list of songs . |
52,404 | def run_mp ( songs ) : stats = Stats ( ) if CONFIG [ 'debug' ] : good = open ( 'found' , 'w' ) bad = open ( 'notfound' , 'w' ) logger . debug ( 'Launching a pool of %d processes\n' , CONFIG [ 'jobcount' ] ) chunksize = math . ceil ( len ( songs ) / os . cpu_count ( ) ) try : with Pool ( CONFIG [ 'jobcount' ] ) as pool : for result in pool . imap_unordered ( get_lyrics , songs , chunksize ) : if result is None : continue for source , runtime in result . runtimes . items ( ) : stats . add_result ( source , result . source == source , runtime ) found = process_result ( result ) if CONFIG [ 'debug' ] : if found : good . write ( f'{id_source(source)}: {result.song}\n' ) good . flush ( ) else : bad . write ( str ( result . song ) + '\n' ) bad . flush ( ) finally : if CONFIG [ 'debug' ] : good . close ( ) bad . close ( ) return stats | Concurrently calls get_lyrics to fetch the lyrics of a large list of songs . |
52,405 | def parse_setup ( options : Union [ List , str ] ) -> str : if isinstance ( options , str ) : return options return "\n" . join ( options ) | Convert potentially a list of commands into a single string . |
52,406 | def create_scheduler_file ( scheduler : str , job : Job ) -> str : logger . debug ( "Create Scheduler File Function" ) if job . scheduler_options is None : scheduler_options : Dict [ str , Any ] = { } else : scheduler_options = deepcopy ( job . scheduler_options ) try : setup_string = parse_setup ( scheduler_options [ "setup" ] ) del scheduler_options [ "setup" ] except KeyError : setup_string = "" header_string = create_header_string ( scheduler , ** scheduler_options ) header_string += get_array_string ( scheduler , len ( job ) ) if scheduler . upper ( ) == "SLURM" : workdir = r"$SLURM_SUBMIT_DIR" array_index = r"$SLURM_ARRAY_TASK_ID" elif scheduler . upper ( ) == "PBS" : workdir = r"$PBS_O_WORKDIR" array_index = r"$PBS_ARRAY_INDEX" return header_string + SCHEDULER_TEMPLATE . format ( workdir = workdir , command_list = job . as_bash_array ( ) , setup = setup_string , array_index = array_index , ) | Substitute values into a template scheduler file . |
52,407 | def range_type_to_dtype ( range_type : str ) -> Optional [ tf . DType ] : range2dtype = { 'real' : tf . float32 , 'int' : tf . int32 , 'bool' : tf . bool } return range2dtype [ range_type ] | Maps RDDL range types to TensorFlow dtypes . |
52,408 | def python_type_to_dtype ( python_type : type ) -> Optional [ tf . DType ] : dtype = None if python_type == float : dtype = tf . float32 elif python_type == int : dtype = tf . int32 elif python_type == bool : dtype = tf . bool return dtype | Maps python types to TensorFlow dtypes . |
52,409 | def get_dependency_type ( _type ) : if _type == DependencyTypes . AFTER : return 'after' elif _type == DependencyTypes . AFTER_ANY : return 'afterany' elif _type == DependencyTypes . AFTER_CORR : return 'aftercorr' elif _type == DependencyTypes . AFTER_NOT_OK : return 'afternotok' elif _type == DependencyTypes . AFTER_OK : return 'afterok' else : return None | Get the dependency type string for SlurmPrinter |
52,410 | def generate ( self , job ) : options = job . get_options ( ) . copy ( ) job_name = options . pop ( 'name' , None ) job_account = options . pop ( 'account' , None ) job_walltime = options . pop ( 'walltime' , None ) job_mem_per_cpu = options . pop ( 'mem_per_cpu' , None ) job_memory = options . pop ( 'memory' , None ) job_working_directory = options . pop ( 'working_directory' , None ) job_error_path = options . pop ( 'error_path' , None ) job_output_path = options . pop ( 'output_path' , None ) job_dependency = options . pop ( 'depending' , None ) job_shell = options . pop ( 'shell' , '/bin/bash' ) job_custom_options = options . pop ( '__custom__' , [ ] ) directives_lines = [ ] if job_name is not None : directives_lines . append ( '--job-name=%s' % job_name ) if job_account is not None : directives_lines . append ( '--account=%s' % job_account ) if job_working_directory is not None : directives_lines . append ( '--workdir=%s' % job_working_directory ) if job_error_path is not None : directives_lines . append ( '--error=%s' % job_error_path ) if job_output_path is not None : directives_lines . append ( '--output=%s' % job_output_path ) if job_walltime is not None : directives_lines . append ( '--time=%s' % strfdelta ( job_walltime , '%H:%M:%S' ) ) if job_mem_per_cpu is not None : directives_lines . append ( '--mem-per-cpu=%d' % job_mem_per_cpu ) if job_memory is not None : directives_lines . append ( '--mem=%d' % job_memory ) if job_dependency is not None : master = job_dependency [ 'job' ] dependency_type = SlurmPrinter . get_dependency_type ( job_dependency [ 'dependency_type' ] ) job_id = master . get_id ( ) if isinstance ( master , JobInterface ) else master directives_lines . append ( '--dependency=%s:%s' % ( dependency_type , job_id ) ) for custom_option in job_custom_options : directives_lines . append ( custom_option ) directives = '\n' . join ( [ '#SBATCH %s' % directive for directive in directives_lines ] ) commands = '\n' . join ( [ '\n' . join ( command_container . get_commands ( ) ) for command_container in job . get_commands ( ) ] ) script = '#!%s\n' % job_shell script += SlurmPrinter . get_header ( ) script += directives script += '\n\n' script += commands return script | Generates a job submission script from a job object |
52,411 | def get_info_mpris2 ( name ) : bus_name = 'org.mpris.MediaPlayer2.' + name path = '/org/mpris/MediaPlayer2' interface = 'org.mpris.MediaPlayer2.Player' address = DBusAddress ( path , bus_name = bus_name , interface = interface ) msg = Properties ( address ) . get ( 'Metadata' ) connection = connect_and_authenticate ( ) response = connection . send_and_get_reply ( msg ) metadata = dict ( response [ 0 ] [ 1 ] ) keys = [ 'album' , 'title' , 'artist' , 'albumartist' ] info = { } metadata = { k : v for k , v in metadata . items ( ) if 'xesam:' in k } for key , value in metadata . items ( ) : name = key . split ( ':' ) [ 1 ] . lower ( ) value = value [ 1 ] if name not in keys or name in info : continue if isinstance ( value , list ) : value = value [ 0 ] info [ name ] = value if 'albumartist' in info : info [ 'artist' ] = info [ 'albumartist' ] del info [ 'albumartist' ] return Song ( ** info ) | Get the current playing song from an mpris2 compliant player . |
52,412 | def get_current_clementine ( ) : try : return get_info_mpris2 ( 'clementine' ) except DBusErrorResponse : bus_name = 'org.mpris.clementine' path = '/Player' interface = 'org.freedesktop.MediaPlayer' return dbus_get_metadata ( path , bus_name , interface ) | Get the current song from clementine . |
52,413 | def get_current_cmus ( ) : result = subprocess . run ( 'cmus-remote -Q' . split ( ' ' ) , check = True , stdout = subprocess . PIPE , stderr = subprocess . DEVNULL ) info = { } for line in result . stdout . decode ( ) . split ( '\n' ) : line = line . split ( ' ' ) if line [ 0 ] != 'tag' : continue key = line [ 1 ] if key in [ 'album' , 'title' , 'artist' , 'albumartist' ] and key not in info : info [ key ] = ' ' . join ( line [ 2 : ] ) if 'albumartist' in info : info [ 'artist' ] = info [ 'albumartist' ] del info [ 'albumartist' ] return Song ( ** info ) | Get the current song from cmus . |
52,414 | def from_filename ( cls , filename ) : if not filename : logger . error ( 'No filename specified' ) return None if not os . path . exists ( filename ) : logger . error ( "Err: File '%s' does not exist" , filename ) return None if os . path . isdir ( filename ) : logger . error ( "Err: File '%s' is a directory" , filename ) return None try : audiofile = eyed3 . load ( filename ) except Exception as error : print ( type ( error ) , error ) return None if audiofile is None : return None tags = audiofile . tag album = tags . album title = tags . title lyrics = '' . join ( [ l . text for l in tags . lyrics ] ) artist = tags . album_artist if not artist : artist = tags . artist song = cls ( artist , title , album , lyrics ) song . filename = filename return song | Class constructor using the path to the corresponding mp3 file . The metadata will be read from this file to create the song object so it must at least contain valid ID3 tags for artist and title . |
52,415 | def fetch_album_name ( self ) : response = get_lastfm ( 'track.getInfo' , artist = self . artist , track = self . title ) if response : try : self . album = response [ 'track' ] [ 'album' ] [ 'title' ] logger . debug ( 'Found album %s from lastfm' , self . album ) except Exception : logger . warning ( 'Could not fetch album name for %s' , self ) else : logger . warning ( 'Could not fetch album name for %s' , self ) | Get the name of the album from lastfm . |
52,416 | def _get_closest_week ( self , metric_date ) : days_after_monday = metric_date . isoweekday ( ) - 1 return metric_date - datetime . timedelta ( days = days_after_monday ) | Gets the closest monday to the date provided . |
52,417 | def _get_daily_date_range ( self , metric_date , delta ) : dates = [ metric_date ] start_date = metric_date end_date = metric_date + delta while start_date . month < end_date . month or start_date . year < end_date . year : days_in_month = calendar . monthrange ( start_date . year , start_date . month ) [ 1 ] start_date = start_date + datetime . timedelta ( days = days_in_month - start_date . day + 1 ) dates . append ( start_date ) return dates | Get the range of months that we need to use as keys to scan redis . |
52,418 | def _get_weekly_date_range ( self , metric_date , delta ) : dates = [ metric_date ] end_date = metric_date + delta spanning_years = end_date . year - metric_date . year for i in range ( spanning_years ) : dates . append ( datetime . date ( year = metric_date . year + ( i + 1 ) , month = 1 , day = 1 ) ) return dates | Gets the range of years that we need to use as keys to get metrics from redis . |
52,419 | def clear_all ( self ) : keys = self . _analytics_backend . keys ( ) for key in itertools . chain ( * keys ) : with self . _analytics_backend . map ( ) as conn : if key . startswith ( self . _prefix ) : conn . delete ( key ) | Deletes all sandsnake related data from redis . |
52,420 | def track_count ( self , unique_identifier , metric , inc_amt = 1 , ** kwargs ) : return self . _analytics_backend . incr ( self . _prefix + ":" + "analy:%s:count:%s" % ( unique_identifier , metric ) , inc_amt ) | Tracks a metric just by count . If you track a metric this way you won t be able to query the metric by day week or month . |
52,421 | def track_metric ( self , unique_identifier , metric , date = None , inc_amt = 1 , ** kwargs ) : metric = [ metric ] if isinstance ( metric , basestring ) else metric unique_identifier = [ unique_identifier ] if not isinstance ( unique_identifier , ( types . ListType , types . TupleType , types . GeneratorType , ) ) else unique_identifier results = [ ] if date is None : date = datetime . date . today ( ) with self . _analytics_backend . map ( ) as conn : for uid in unique_identifier : hash_key_daily = self . _get_daily_metric_key ( uid , date ) closest_monday = self . _get_closest_week ( date ) hash_key_weekly = self . _get_weekly_metric_key ( uid , date ) for single_metric in metric : daily_metric_name = self . _get_daily_metric_name ( single_metric , date ) weekly_metric_name = self . _get_weekly_metric_name ( single_metric , closest_monday ) monthly_metric_name = self . _get_monthly_metric_name ( single_metric , date ) results . append ( [ conn . hincrby ( hash_key_daily , daily_metric_name , inc_amt ) , conn . hincrby ( hash_key_weekly , weekly_metric_name , inc_amt ) , conn . hincrby ( hash_key_weekly , monthly_metric_name , inc_amt ) , conn . incr ( self . _prefix + ":" + "analy:%s:count:%s" % ( uid , single_metric ) , inc_amt ) ] ) return results | Tracks a metric for a specific unique_identifier for a certain date . The redis backend supports lists for both unique_identifier and metric allowing for tracking of multiple metrics for multiple unique_identifiers efficiently . Not all backends may support this . |
52,422 | def get_metric_by_day ( self , unique_identifier , metric , from_date , limit = 30 , ** kwargs ) : conn = kwargs . get ( "connection" , None ) date_generator = ( from_date + datetime . timedelta ( days = i ) for i in itertools . count ( ) ) metric_key_date_range = self . _get_daily_date_range ( from_date , datetime . timedelta ( days = limit ) ) series = list ( itertools . islice ( date_generator , limit ) ) metric_keys = [ self . _get_daily_metric_name ( metric , daily_date ) for daily_date in series ] metric_func = lambda conn : [ conn . hmget ( self . _get_daily_metric_key ( unique_identifier , metric_key_date ) , metric_keys ) for metric_key_date in metric_key_date_range ] if conn is not None : results = metric_func ( conn ) else : with self . _analytics_backend . map ( ) as conn : results = metric_func ( conn ) series , results = self . _parse_and_process_metrics ( series , results ) return series , results | Returns the metric for unique_identifier segmented by day starting from from_date |
52,423 | def get_metric_by_week ( self , unique_identifier , metric , from_date , limit = 10 , ** kwargs ) : conn = kwargs . get ( "connection" , None ) closest_monday_from_date = self . _get_closest_week ( from_date ) metric_key_date_range = self . _get_weekly_date_range ( closest_monday_from_date , datetime . timedelta ( weeks = limit ) ) date_generator = ( closest_monday_from_date + datetime . timedelta ( days = i ) for i in itertools . count ( step = 7 ) ) series = list ( itertools . islice ( date_generator , limit ) ) metric_keys = [ self . _get_weekly_metric_name ( metric , monday_date ) for monday_date in series ] metric_func = lambda conn : [ conn . hmget ( self . _get_weekly_metric_key ( unique_identifier , metric_key_date ) , metric_keys ) for metric_key_date in metric_key_date_range ] if conn is not None : results = metric_func ( conn ) else : with self . _analytics_backend . map ( ) as conn : results = metric_func ( conn ) series , results = self . _parse_and_process_metrics ( series , results ) return series , results | Returns the metric for unique_identifier segmented by week starting from from_date |
52,424 | def get_metric_by_month ( self , unique_identifier , metric , from_date , limit = 10 , ** kwargs ) : conn = kwargs . get ( "connection" , None ) first_of_month = datetime . date ( year = from_date . year , month = from_date . month , day = 1 ) metric_key_date_range = self . _get_weekly_date_range ( first_of_month , relativedelta ( months = limit ) ) date_generator = ( first_of_month + relativedelta ( months = i ) for i in itertools . count ( ) ) series = list ( itertools . islice ( date_generator , limit ) ) metric_keys = [ self . _get_monthly_metric_name ( metric , month_date ) for month_date in series ] metric_func = lambda conn : [ conn . hmget ( self . _get_weekly_metric_key ( unique_identifier , metric_key_date ) , metric_keys ) for metric_key_date in metric_key_date_range ] if conn is not None : results = metric_func ( conn ) else : with self . _analytics_backend . map ( ) as conn : results = metric_func ( conn ) series , results = self . _parse_and_process_metrics ( series , results ) return series , results | Returns the metric for unique_identifier segmented by month starting from from_date . It will retrieve metrics data starting from the 1st of the month specified in from_date |
52,425 | def get_count ( self , unique_identifier , metric , start_date = None , end_date = None , ** kwargs ) : result = None if start_date and end_date : start_date , end_date = ( start_date , end_date , ) if start_date < end_date else ( end_date , start_date , ) start_date = start_date if hasattr ( start_date , 'date' ) else datetime . datetime . combine ( start_date , datetime . time ( ) ) end_date = end_date if hasattr ( end_date , 'date' ) else datetime . datetime . combine ( end_date , datetime . time ( ) ) monthly_metrics_dates = list ( rrule . rrule ( rrule . MONTHLY , dtstart = start_date , bymonthday = 1 , until = end_date ) ) if len ( monthly_metrics_dates ) >= 3 : with self . _analytics_backend . map ( ) as conn : monthly_metric_series , monthly_metric_results , starting_metric_series , starting_metric_results , ending_metric_series , ending_metric_results = self . _get_counts ( conn , metric , unique_identifier , monthly_metrics_dates , start_date , end_date ) monthly_metric_series , monthly_metric_results = self . _parse_and_process_metrics ( monthly_metric_series , monthly_metric_results ) starting_metric_series , starting_metric_results = self . _parse_and_process_metrics ( starting_metric_series , starting_metric_results ) ending_metric_series , ending_metric_results = self . _parse_and_process_metrics ( ending_metric_series , ending_metric_results ) result = sum ( monthly_metric_results . values ( ) ) + sum ( starting_metric_results . values ( ) ) + sum ( ending_metric_results . values ( ) ) else : diff = end_date - start_date metric_results = self . get_metric_by_day ( unique_identifier , metric , start_date , limit = diff . days + 1 ) result = sum ( metric_results [ 1 ] . values ( ) ) else : try : result = int ( self . _analytics_backend . get ( self . _prefix + ":" + "analy:%s:count:%s" % ( unique_identifier , metric , ) ) ) except TypeError : result = 0 return result | Gets the count for the metric for unique_identifier . You can specify a start_date and an end_date to only get metrics within that time range . |
52,426 | def set_metric_by_day ( self , unique_identifier , metric , date , count , sync_agg = True , update_counter = True ) : metric = [ metric ] if isinstance ( metric , basestring ) else metric unique_identifier = [ unique_identifier ] if not isinstance ( unique_identifier , ( types . ListType , types . TupleType , types . GeneratorType , ) ) else unique_identifier results = [ ] with self . _analytics_backend . map ( ) as conn : for uid in unique_identifier : hash_key_daily = self . _get_daily_metric_key ( uid , date ) for single_metric in metric : daily_metric_name = self . _get_daily_metric_name ( single_metric , date ) if update_counter : overall_count = self . get_count ( uid , single_metric ) day , daily_count = self . get_metric_by_day ( uid , single_metric , date , 1 ) [ 1 ] . popitem ( ) self . _analytics_backend . set ( self . _prefix + ":" + "analy:%s:count:%s" % ( uid , single_metric ) , overall_count + ( count - daily_count ) ) results . append ( [ conn . hset ( hash_key_daily , daily_metric_name , count ) ] ) if sync_agg : self . sync_agg_metric ( unique_identifier , metric , date , date ) return results | Sets the count for the metric for unique_identifier . You must specify a date for the count to be set on . Useful for resetting a metric count to 0 or decrementing a metric . |
52,427 | def sync_agg_metric ( self , unique_identifier , metric , start_date , end_date ) : self . sync_week_metric ( unique_identifier , metric , start_date , end_date ) self . sync_month_metric ( unique_identifier , metric , start_date , end_date ) | Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for the metric for unique_identifier . Useful for updating the counters for week and month after using set_metric_by_day . |
52,428 | def sync_week_metric ( self , unique_identifier , metric , start_date , end_date ) : metric = [ metric ] if isinstance ( metric , basestring ) else metric unique_identifier = [ unique_identifier ] if not isinstance ( unique_identifier , ( types . ListType , types . TupleType , types . GeneratorType , ) ) else unique_identifier closest_monday_from_date = self . _get_closest_week ( start_date ) num_weeks = self . _num_weeks ( start_date , end_date ) metric_key_date_range = self . _get_weekly_date_range ( closest_monday_from_date , datetime . timedelta ( weeks = num_weeks ) ) week_date_generator = ( closest_monday_from_date + datetime . timedelta ( days = i ) for i in itertools . count ( step = 7 ) ) weeks_to_update = list ( itertools . islice ( week_date_generator , num_weeks ) ) for uid in unique_identifier : for single_metric in metric : for week in weeks_to_update : _ , series_results = self . get_metric_by_day ( uid , single_metric , from_date = week , limit = 7 ) week_counter = sum ( [ value for key , value in series_results . items ( ) ] ) hash_key_weekly = self . _get_weekly_metric_key ( uid , week ) weekly_metric_name = self . _get_weekly_metric_name ( single_metric , week ) with self . _analytics_backend . map ( ) as conn : conn . hset ( hash_key_weekly , weekly_metric_name , week_counter ) | Uses the count for each day in the date range to recalculate the counters for the weeks for the metric for unique_identifier . Useful for updating the counters for week and month after using set_metric_by_day . |
52,429 | def sync_month_metric ( self , unique_identifier , metric , start_date , end_date ) : metric = [ metric ] if isinstance ( metric , basestring ) else metric unique_identifier = [ unique_identifier ] if not isinstance ( unique_identifier , ( types . ListType , types . TupleType , types . GeneratorType , ) ) else unique_identifier num_months = self . _num_months ( start_date , end_date ) first_of_month = datetime . date ( year = start_date . year , month = start_date . month , day = 1 ) metric_key_date_range = self . _get_weekly_date_range ( first_of_month , relativedelta ( months = num_months ) ) month_date_generator = ( first_of_month + relativedelta ( months = i ) for i in itertools . count ( ) ) months_to_update = list ( itertools . islice ( month_date_generator , num_months ) ) for uid in unique_identifier : for single_metric in metric : for month in months_to_update : _ , series_results = self . get_metric_by_day ( uid , single_metric , from_date = month , limit = monthrange ( month . year , month . month ) [ 1 ] ) month_counter = sum ( [ value for key , value in series_results . items ( ) ] ) hash_key_monthly = self . _get_weekly_metric_key ( uid , month ) monthly_metric_name = self . _get_monthly_metric_name ( single_metric , month ) with self . _analytics_backend . map ( ) as conn : conn . hset ( hash_key_monthly , monthly_metric_name , month_counter ) | Uses the count for each day in the date range to recalculate the counters for the months for the metric for unique_identifier . Useful for updating the counters for week and month after using set_metric_by_day . |
52,430 | def is_full_mxid ( user_string ) : if not user_string [ 0 ] == "@" : return False parts = user_string [ 1 : ] . split ( ":" ) localpart_chars = ascii_lowercase + digits + "._-=" if not ( len ( parts ) == 2 and all ( [ i in localpart_chars for i in parts [ 0 ] ] ) ) : return False return True | Returns True if a string is a valid mxid . |
52,431 | def intent ( method ) : def wrapper ( self , * args , ** kwargs ) : try : return method ( self , * args , ** kwargs ) except exceptions . MatrixError as e : if isinstance ( e . original_exception , matrix_client . errors . MatrixRequestError ) : self . _handle_request_exception ( e ) return method ( self , * args , ** kwargs ) else : raise e return wrapper | Helps object methods handle MatrixRequestError . |
52,432 | def get_variables ( self ) -> Set [ str ] : variables = set ( ) for cmd in self . _cmd : for var in self . __formatter . parse ( cmd ) : logger . debug ( "Checking variable: %s" , var ) if var [ 1 ] is not None and var [ 1 ] not in [ "creates" , "requires" ] : variables . add ( var [ 1 ] ) return variables | Find all the variables specified in a format string . |
52,433 | def as_bash_array ( self ) -> str : return_string = "( \\\n" for command in self : return_string += '"' + str ( command ) + '" \\\n' return_string += ")" return return_string | Return a representation as a bash array . |
52,434 | def combine_dictionaries ( dicts : List [ Dict [ str , Any ] ] ) -> Dict [ str , Any ] : return dict ( ChainMap ( * dicts ) ) | Merge a list of dictionaries into a single dictionary . |
52,435 | def iterator_zip ( variables : VarType , parent : str = None ) -> Iterable [ VarMatrix ] : logger . debug ( "Yielding from zip iterator" ) if isinstance ( variables , list ) : for item in variables : yield list ( variable_matrix ( item , parent , "zip" ) ) else : yield list ( variable_matrix ( variables , parent , "zip" ) ) | Apply the zip operator to a set of variables . |
52,436 | def iterator_product ( variables : VarType , parent : str = None ) -> Iterable [ VarMatrix ] : logger . debug ( "Yielding from product iterator" ) if isinstance ( variables , list ) : raise ValueError ( f"Product only takes mappings of values, got {variables} of type {type(variables)}" ) yield list ( variable_matrix ( variables , parent , "product" ) ) | Apply the product operator to a set of variables . |
52,437 | def iterator_chain ( variables : VarType , parent : str = None ) -> Iterable [ VarMatrix ] : logger . debug ( "Yielding from append iterator" ) if not isinstance ( variables , list ) : raise ValueError ( f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}" ) yield list ( chain . from_iterable ( variable_matrix ( item , parent , "product" ) for item in variables ) ) | This successively appends each element of an array to a single list of values . |
52,438 | def iterator_cycle ( variables : VarType , parent : str ) -> Iterable [ VarMatrix ] : if isinstance ( variables , dict ) : if variables . get ( "times" ) : times = int ( variables [ "times" ] ) del variables [ "times" ] yield list ( variable_matrix ( variables , parent , "product" ) ) * times else : raise ValueError ( f"times is a required keyword for the repeat iterator." ) else : raise ValueError ( f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}" ) | Cycle through a list of values a specified number of times |
52,439 | def variable_matrix ( variables : VarType , parent : str = None , iterator : str = "product" ) -> Iterable [ Dict [ str , YamlValue ] ] : _iters : Dict [ str , Callable ] = { "product" : product , "zip" : zip } _special_keys : Dict [ str , Callable [ [ VarType , Any ] , Iterable [ VarMatrix ] ] ] = { "zip" : iterator_zip , "product" : iterator_product , "arange" : iterator_arange , "chain" : iterator_chain , "append" : iterator_chain , "cycle" : iterator_cycle , "repeat" : iterator_cycle , } if isinstance ( variables , dict ) : key_vars : List [ List [ Dict [ str , YamlValue ] ] ] = [ ] for key , function in _special_keys . items ( ) : if variables . get ( key ) : item = variables [ key ] assert item is not None for val in function ( item , parent ) : key_vars . append ( val ) del variables [ key ] for key , value in variables . items ( ) : key_vars . append ( list ( variable_matrix ( value , key , iterator ) ) ) logger . debug ( "key vars: %s" , key_vars ) for i in _iters [ iterator ] ( * key_vars ) : logger . debug ( "dicts: %s" , i ) yield combine_dictionaries ( i ) elif isinstance ( variables , list ) : for item in variables : yield from variable_matrix ( item , parent , iterator ) else : assert parent is not None yield { parent : variables } | Process the variables into a list of the appropriate combinations . |
52,440 | def uniqueify ( my_list : Any ) -> List [ Any ] : if sys . version_info >= ( 3 , 6 ) : return list ( dict . fromkeys ( my_list ) ) seen = set ( ) return [ x for x in my_list if x not in seen and not seen . add ( x ) ] | Remove duplicate entries in a list retaining order . |
52,441 | def process_command ( command : CommandInput , matrix : VarMatrix ) -> List [ Command ] : assert command is not None if isinstance ( command , str ) : command_list = [ Command ( command , variables = variables ) for variables in matrix ] elif isinstance ( command , list ) : command_list = [ Command ( command , variables = variables ) for variables in matrix ] else : if command . get ( "command" ) is not None : cmd = command . get ( "command" ) else : cmd = command . get ( "cmd" ) creates = str ( command . get ( "creates" , "" ) ) requires = str ( command . get ( "requires" , "" ) ) assert isinstance ( cmd , ( list , str ) ) command_list = [ Command ( cmd , variables , creates , requires ) for variables in matrix ] return uniqueify ( command_list ) | Generate all combinations of commands given a variable matrix . |
52,442 | def read_file ( filename : PathLike = "experiment.yml" ) -> Dict [ str , Any ] : logger . debug ( "Input file: %s" , filename ) with open ( filename , "r" ) as stream : structure = yaml . safe_load ( stream ) return structure | Read and parse yaml file . |
52,443 | def run_bash_jobs ( jobs : Iterator [ Job ] , directory : PathLike = Path . cwd ( ) , dry_run : bool = False ) -> None : logger . debug ( "Running commands in bash shell" ) for job in jobs : if shutil . which ( job . shell ) is None : raise ProcessLookupError ( f"The shell '{job.shell}' was not found." ) failed = False for command in job : for cmd in command : logger . info ( cmd ) if dry_run : print ( f"{job.shell} -c '{cmd}'" ) else : result = subprocess . run ( [ job . shell , "-c" , f"{cmd}" ] , cwd = str ( directory ) ) if result . returncode != 0 : failed = True logger . error ( "Command failed: %s" , command ) break if failed : logger . error ( "A command failed, not continuing further." ) return | Submit commands to the bash shell . |
52,444 | def run_scheduler_jobs ( scheduler : str , jobs : Iterator [ Job ] , directory : PathLike = Path . cwd ( ) , basename : str = "experi" , dry_run : bool = False , ) -> None : submit_job = True logger . debug ( "Creating commands in %s files." , scheduler ) if scheduler == "pbs" : submit_executable = "qsub" elif scheduler == "slurm" : submit_executable = "sbatch" else : raise ValueError ( "scheduler can only take values ['pbs', 'slurm']" ) if shutil . which ( submit_executable ) is None : logger . warning ( "The `%s` command is not found." "Skipping job submission and just generating files" , submit_executable , ) submit_job = False directory = Path ( directory ) for fname in directory . glob ( basename + f"*.{scheduler}" ) : print ( "Removing {}" . format ( fname ) ) os . remove ( str ( fname ) ) prev_jobids : List [ str ] = [ ] for index , job in enumerate ( jobs ) : content = create_scheduler_file ( scheduler , job ) logger . debug ( "File contents:\n%s" , content ) fname = Path ( directory / "{}_{:02d}.{}" . format ( basename , index , scheduler ) ) with fname . open ( "w" ) as dst : dst . write ( content ) if submit_job or dry_run : submit_cmd = [ submit_executable ] if prev_jobids : afterok = f"afterok:{':'.join(prev_jobids)}" if scheduler == "pbs" : submit_cmd += [ "-W" , f"depend={afterok}" ] elif scheduler == "slurm" : submit_cmd += [ "--dependency" , afterok ] logger . info ( str ( submit_cmd ) ) try : if dry_run : print ( f"{submit_cmd} {fname.name}" ) prev_jobids . append ( "dry_run" ) else : cmd_res = subprocess . check_output ( submit_cmd + [ fname . name ] , cwd = str ( directory ) ) prev_jobids . append ( cmd_res . decode ( ) . strip ( ) ) except subprocess . CalledProcessError : logger . error ( "Submitting job to the queue failed." ) break | Submit a series of commands to a batch scheduler . |
52,445 | def determine_scheduler ( scheduler : Optional [ str ] , experiment_definition : Dict [ str , YamlValue ] ) -> str : if scheduler is not None : if scheduler in [ "shell" , "pbs" , "slurm" ] : return scheduler raise ValueError ( "Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']" ) if experiment_definition . get ( "pbs" ) : return "pbs" if experiment_definition . get ( "slurm" ) : return "slurm" if experiment_definition . get ( "shell" ) : return "shell" if shutil . which ( "pbs" ) is not None : return "pbs" if shutil . which ( "slurm" ) is not None : return "slurm" return "shell" | Determine the scheduler to use to run the jobs . |
52,446 | def safe_type ( self , data , tree ) : if not isinstance ( data , list ) : name = self . __class__ . __name__ msg = "did not pass validation against callable: %s" % name reason = 'expected a list but got %s' % safe_repr ( data ) raise Invalid ( self . schema , tree , reason = reason , pair = 'value' , msg = msg ) | Make sure that the incoming data complies with the class type we are expecting it to be . In this case classes that inherit from this base class expect data to be of type list . |
52,447 | def get_context ( context ) : new_context = { 'model' : context [ 'model' ] , 'session' : context [ 'session' ] , 'user' : context . get ( 'user' ) , 'ignore_auth' : context . get ( 'ignore_auth' , False ) , 'use_cache' : context . get ( 'use_cache' , False ) , } if 'validate' in context : new_context [ 'validate' ] = context [ 'validate' ] return new_context | An internal context generator . Accepts a CKAN context . |
52,448 | def re_sort ( data ) : keys = sorted ( data . keys ( ) ) new_data = { } for number , key in enumerate ( keys ) : new_data [ number ] = data [ key ] return new_data | A data with keys that are not enumerated sequentially will be re sorted and sequentially ordered . |
52,449 | def ensure ( assertion , message = None ) : message = message or assertion if not assertion : raise AssertionError ( message ) return True | Checks an assertion argument for truth - ness . Will return True or explicitly raise AssertionError . This is to deal with environments using python - O or PYTHONOPTIMIZE = . |
52,450 | def fluent_shape ( self ) -> Sequence [ int ] : return tuple ( self . _shape . as_list ( ) [ 1 : ] if self . _batch else self . _shape . as_list ( ) [ : ] ) | Returns a copy of the fluent shape ignoring batch size if in batch mode . |
52,451 | def broadcast ( cls , shape1 : 'TensorFluentShape' , shape2 : 'TensorFluentShape' ) -> Tuple [ Reshaping , Reshaping ] : reshape_1 , reshape_2 = None , None if not ( shape1 . _batch or shape2 . _batch ) : return reshape_1 , reshape_2 size_1 , size_2 = shape1 . fluent_size , shape2 . fluent_size size_diff = abs ( size_1 - size_2 ) if size_diff == 0 : return reshape_1 , reshape_2 if size_2 > size_1 and not ( size_1 == 0 and not shape1 . _batch ) : reshape_1 = [ 1 ] * size_diff + list ( shape1 . fluent_shape ) if shape1 . _batch : reshape_1 = [ shape1 . batch_size ] + reshape_1 elif size_1 > size_2 and not ( size_2 == 0 and not shape2 . _batch ) : reshape_2 = [ 1 ] * size_diff + list ( shape2 . fluent_shape ) if shape2 . _batch : reshape_2 = [ shape2 . batch_size ] + reshape_2 return reshape_1 , reshape_2 | It broadcasts the fluent shapes if any input is in batch mode . |
52,452 | def run ( bam , chrom , pos1 , pos2 , reffa , chr_reffa , parameters ) : is_chr_query = chrom . startswith ( 'chr' ) if is_chr_query and chr_reffa is None : chr_reffa = reffa bam_header = subprocess . check_output ( "samtools view -H {}" . format ( bam ) , shell = True ) is_chr_bam = bam_header . find ( 'SN:chr' ) != - 1 if is_chr_bam : reffa = chr_reffa if not is_chr_query and is_chr_bam : chrom = 'chr' + chrom if is_chr_query and not is_chr_bam : chrom = re . sub ( r'^chr' , '' , chrom ) posmin = min ( pos1 , pos2 ) posmax = max ( pos1 , pos2 ) cmd = "samtools view -bh {bam} {chrom}:{pos1}-{pos2} " "| samtools mpileup {parameters} -f {reffa} -" . format ( bam = bam , chrom = chrom , pos1 = posmin , pos2 = posmax , reffa = reffa , parameters = parameters ) if pos1 == pos2 : cmd += " | awk '$2 == {pos}'" . format ( pos = pos1 ) else : cmd += " | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'" . format ( posmin = posmin , posmax = posmax ) sys . stderr . write ( "Running:\n{}\n" . format ( cmd ) ) child = subprocess . Popen ( cmd , shell = True , stdout = subprocess . PIPE ) stdout , stderr = child . communicate ( ) if child . returncode != 0 : if len ( stdout ) == 0 and stderr is None : warnings . warn ( "Command:\n{cmd}\n did not exit with zero exit code. " "Possibly no coverage for sample." . format ( cmd = cmd ) ) else : raise ( Exception ( "Command:\n{cmd}\n did not exit with zero exit code. " "Check command." . format ( cmd = cmd ) ) ) else : return stdout | Run mpileup on given chrom and pos |
52,453 | def parse_iso8601_date ( string ) : match = _RE_ISO8601_DATE . search ( string ) if not match : raise ValueError ( 'Expected ISO 8601 date' ) year = int ( match . group ( 'year' ) ) month = int ( match . group ( 'month' ) ) day = int ( match . group ( 'day' ) ) return date ( year , month , day ) | Parse an ISO 8601 date string |
52,454 | def import_submodules ( package , parent_package = None , exclude_submodules = None ) : exclude_submodules_dot = [ x + '.' for x in exclude_submodules ] if exclude_submodules else exclude_submodules package = importlib . import_module ( package , parent_package ) for _ , name , _ in pkgutil . walk_packages ( package . __path__ , package . __name__ + '.' ) : if exclude_submodules and ( name in exclude_submodules or any ( name . startswith ( x ) for x in exclude_submodules_dot ) ) : continue yield importlib . import_module ( name ) | Generator which imports all submodules of a module recursively including subpackages |
52,455 | def compile_initial_state ( self , batch_size : Optional [ int ] = None ) -> Sequence [ tf . Tensor ] : with self . graph . as_default ( ) : with tf . name_scope ( 'initial_state' ) : self . _initialize_initial_state_fluents ( ) if batch_size is None : return self . initial_state_fluents return self . _compile_batch_fluents ( self . initial_state_fluents , batch_size ) | Returns a tuple of tensors representing the initial state fluents . |
52,456 | def compile_default_action ( self , batch_size : Optional [ int ] = None ) -> Sequence [ tf . Tensor ] : with self . graph . as_default ( ) : with tf . name_scope ( 'default_action' ) : self . _initialize_default_action_fluents ( ) if batch_size is None : return self . default_action_fluents return self . _compile_batch_fluents ( self . default_action_fluents , batch_size ) | Returns a tuple of tensors representing the default action fluents . |
52,457 | def cpfs ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] , noise : Optional [ Noise ] = None ) -> Tuple [ List [ TensorFluent ] , List [ TensorFluent ] ] : scope = self . transition_scope ( state , action ) batch_size = int ( state [ 0 ] . shape [ 0 ] ) interm_fluents , next_state_fluents = self . compile_cpfs ( scope , batch_size , noise ) interms = [ fluent for _ , fluent in interm_fluents ] next_state = [ fluent for _ , fluent in next_state_fluents ] return interms , next_state | Compiles the intermediate and next state fluent CPFs given the current state and action . |
52,458 | def reward ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] , next_state : Sequence [ tf . Tensor ] ) -> tf . Tensor : scope = self . reward_scope ( state , action , next_state ) r = self . compile_reward ( scope ) . tensor with self . graph . as_default ( ) : with tf . name_scope ( 'reward' ) : return tf . expand_dims ( r , - 1 ) | Compiles the reward function given the current state action and next_state . |
52,459 | def compile_cpfs ( self , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ Noise ] = None ) -> Tuple [ List [ CPFPair ] , List [ CPFPair ] ] : interm_fluents = self . compile_intermediate_cpfs ( scope , batch_size , noise ) scope . update ( dict ( interm_fluents ) ) next_state_fluents = self . compile_state_cpfs ( scope , batch_size , noise ) return interm_fluents , next_state_fluents | Compiles the intermediate and next state fluent CPFs given the current state and action scope . |
52,460 | def compile_intermediate_cpfs ( self , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ Noise ] = None ) -> List [ CPFPair ] : interm_fluents = [ ] with self . graph . as_default ( ) : with tf . name_scope ( 'intermediate_cpfs' ) : for cpf in self . rddl . domain . intermediate_cpfs : cpf_noise = noise . get ( cpf . name , None ) if noise is not None else None name_scope = utils . identifier ( cpf . name ) with tf . name_scope ( name_scope ) : t = self . _compile_expression ( cpf . expr , scope , batch_size , cpf_noise ) interm_fluents . append ( ( cpf . name , t ) ) scope [ cpf . name ] = t return interm_fluents | Compiles the intermediate fluent CPFs given the current state and action scope . |
52,461 | def compile_state_cpfs ( self , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ Noise ] = None ) -> List [ CPFPair ] : next_state_fluents = [ ] with self . graph . as_default ( ) : with tf . name_scope ( 'state_cpfs' ) : for cpf in self . rddl . domain . state_cpfs : cpf_noise = noise . get ( cpf . name , None ) if noise is not None else None name_scope = utils . identifier ( cpf . name ) with tf . name_scope ( name_scope ) : t = self . _compile_expression ( cpf . expr , scope , batch_size , cpf_noise ) next_state_fluents . append ( ( cpf . name , t ) ) key = lambda f : self . rddl . domain . next_state_fluent_ordering . index ( f [ 0 ] ) next_state_fluents = sorted ( next_state_fluents , key = key ) return next_state_fluents | Compiles the next state fluent CPFs given the current state and action scope . |
52,462 | def compile_reward ( self , scope : Dict [ str , TensorFluent ] ) -> TensorFluent : reward_expr = self . rddl . domain . reward with self . graph . as_default ( ) : with tf . name_scope ( 'reward' ) : return self . _compile_expression ( reward_expr , scope ) | Compiles the reward function given the fluent scope . |
52,463 | def compile_state_action_constraints ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] ) -> List [ TensorFluent ] : scope = self . transition_scope ( state , action ) constraints = [ ] with self . graph . as_default ( ) : with tf . name_scope ( 'state_action_constraints' ) : for p in self . rddl . domain . constraints : fluent = self . _compile_expression ( p , scope ) constraints . append ( fluent ) return constraints | Compiles the state - action constraints given current state and action fluents . |
52,464 | def compile_action_preconditions ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] ) -> List [ TensorFluent ] : scope = self . action_precondition_scope ( state , action ) preconds = [ ] with self . graph . as_default ( ) : with tf . name_scope ( 'action_preconditions' ) : for p in self . rddl . domain . preconds : fluent = self . _compile_expression ( p , scope ) preconds . append ( fluent ) return preconds | Compiles the action preconditions given current state and action fluents . |
52,465 | def compile_state_invariants ( self , state : Sequence [ tf . Tensor ] ) -> List [ TensorFluent ] : scope = self . state_invariant_scope ( state ) invariants = [ ] with self . graph . as_default ( ) : with tf . name_scope ( 'state_invariants' ) : for p in self . rddl . domain . invariants : fluent = self . _compile_expression ( p , scope ) invariants . append ( fluent ) return invariants | Compiles the state invarints given current state fluents . |
52,466 | def compile_action_preconditions_checking ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] ) -> tf . Tensor : with self . graph . as_default ( ) : with tf . name_scope ( 'action_preconditions_checking' ) : preconds = self . compile_action_preconditions ( state , action ) all_preconds = tf . stack ( [ p . tensor for p in preconds ] , axis = 1 ) checking = tf . reduce_all ( all_preconds , axis = 1 ) return checking | Combines the action preconditions into an applicability checking op . |
52,467 | def compile_action_bound_constraints ( self , state : Sequence [ tf . Tensor ] ) -> Dict [ str , Bounds ] : scope = self . action_precondition_scope ( state ) lower_bounds = self . rddl . domain . action_lower_bound_constraints upper_bounds = self . rddl . domain . action_upper_bound_constraints with self . graph . as_default ( ) : with tf . name_scope ( 'action_bound_constraints' ) : bounds = { } for name in self . rddl . domain . action_fluent_ordering : lower_expr = lower_bounds . get ( name ) lower = None if lower_expr is not None : with tf . name_scope ( 'lower_bound' ) : lower = self . _compile_expression ( lower_expr , scope ) upper_expr = upper_bounds . get ( name ) upper = None if upper_expr is not None : with tf . name_scope ( 'upper_bound' ) : upper = self . _compile_expression ( upper_expr , scope ) bounds [ name ] = ( lower , upper ) return bounds | Compiles all actions bounds for the given state . |
52,468 | def non_fluents_scope ( self ) -> Dict [ str , TensorFluent ] : if self . __dict__ . get ( 'non_fluents' ) is None : self . _initialize_non_fluents ( ) return dict ( self . non_fluents ) | Returns a partial scope with non - fluents . |
52,469 | def state_scope ( self , state_fluents : Sequence [ tf . Tensor ] ) -> Dict [ str , TensorFluent ] : return dict ( zip ( self . rddl . domain . state_fluent_ordering , state_fluents ) ) | Returns a partial scope with current state - fluents . |
52,470 | def action_scope ( self , action_fluents : Sequence [ tf . Tensor ] ) -> Dict [ str , TensorFluent ] : return dict ( zip ( self . rddl . domain . action_fluent_ordering , action_fluents ) ) | Returns a partial scope with current action - fluents . |
52,471 | def next_state_scope ( self , next_state_fluents : Sequence [ tf . Tensor ] ) -> Dict [ str , TensorFluent ] : return dict ( zip ( self . rddl . domain . next_state_fluent_ordering , next_state_fluents ) ) | Returns a partial scope with current next state - fluents . |
52,472 | def transition_scope ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] ) -> Dict [ str , TensorFluent ] : scope = { } scope . update ( self . non_fluents_scope ( ) ) scope . update ( self . state_scope ( state ) ) scope . update ( self . action_scope ( action ) ) return scope | Returns the complete transition fluent scope for the current state and action fluents . |
52,473 | def reward_scope ( self , state : Sequence [ tf . Tensor ] , action : Sequence [ tf . Tensor ] , next_state : Sequence [ tf . Tensor ] ) -> Dict [ str , TensorFluent ] : scope = { } scope . update ( self . non_fluents_scope ( ) ) scope . update ( self . state_scope ( state ) ) scope . update ( self . action_scope ( action ) ) scope . update ( self . next_state_scope ( next_state ) ) return scope | Returns the complete reward fluent scope for the current state action fluents and next_state fluents . |
52,474 | def state_invariant_scope ( self , state : Sequence [ tf . Tensor ] ) : scope = { } scope . update ( self . non_fluents_scope ( ) ) scope . update ( self . state_scope ( state ) ) return scope | Returns the state invariant fluent scope for the current state . |
52,475 | def _initialize_pvariables ( self , pvariables : Dict [ str , PVariable ] , ordering : List [ str ] , initializer : Optional [ InitializerList ] = None ) -> List [ Tuple [ str , TensorFluent ] ] : if initializer is not None : init = dict ( ) for ( ( name , args ) , value ) in initializer : arity = len ( args ) if args is not None else 0 name = '{}/{}' . format ( name , arity ) init [ name ] = init . get ( name , [ ] ) init [ name ] . append ( ( args , value ) ) fluents = [ ] for name in ordering : pvar = pvariables [ name ] shape = self . rddl . _param_types_to_shape ( pvar . param_types ) dtype = utils . range_type_to_dtype ( pvar . range ) fluent = np . full ( shape , pvar . default ) if initializer is not None : for args , val in init . get ( name , [ ] ) : if args is not None : idx = [ ] for ptype , arg in zip ( pvar . param_types , args ) : idx . append ( self . rddl . object_table [ ptype ] [ 'idx' ] [ arg ] ) idx = tuple ( idx ) fluent [ idx ] = val else : fluent = val with self . graph . as_default ( ) : t = tf . constant ( fluent , dtype = dtype , name = utils . identifier ( name ) ) scope = [ None ] * len ( t . shape ) fluent = TensorFluent ( t , scope , batch = False ) fluent_pair = ( name , fluent ) fluents . append ( fluent_pair ) return fluents | Instantiates pvariables given an initialization list and returns a list of TensorFluents in the given ordering . |
52,476 | def _initialize_non_fluents ( self ) : non_fluents = self . rddl . domain . non_fluents initializer = self . rddl . non_fluents . init_non_fluent self . non_fluents = self . _initialize_pvariables ( non_fluents , self . rddl . domain . non_fluent_ordering , initializer ) return self . non_fluents | Returns the non - fluents instantiated . |
52,477 | def _initialize_initial_state_fluents ( self ) : state_fluents = self . rddl . domain . state_fluents initializer = self . rddl . instance . init_state self . initial_state_fluents = self . _initialize_pvariables ( state_fluents , self . rddl . domain . state_fluent_ordering , initializer ) return self . initial_state_fluents | Returns the initial state - fluents instantiated . |
52,478 | def _initialize_default_action_fluents ( self ) : action_fluents = self . rddl . domain . action_fluents self . default_action_fluents = self . _initialize_pvariables ( action_fluents , self . rddl . domain . action_fluent_ordering ) return self . default_action_fluents | Returns the default action - fluents instantiated . |
52,479 | def _compile_batch_fluents ( self , fluents : List [ Tuple [ str , TensorFluent ] ] , batch_size : int ) -> Sequence [ tf . Tensor ] : batch_fluents = [ ] with self . graph . as_default ( ) : for name , fluent in fluents : name_scope = utils . identifier ( name ) with tf . name_scope ( name_scope ) : t = tf . stack ( [ fluent . tensor ] * batch_size ) batch_fluents . append ( t ) return tuple ( batch_fluents ) | Compiles fluents into tensors with given batch_size . |
52,480 | def _compile_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype2compiler = { 'constant' : self . _compile_constant_expression , 'pvar' : self . _compile_pvariable_expression , 'randomvar' : self . _compile_random_variable_expression , 'arithmetic' : self . _compile_arithmetic_expression , 'boolean' : self . _compile_boolean_expression , 'relational' : self . _compile_relational_expression , 'func' : self . _compile_function_expression , 'control' : self . _compile_control_flow_expression , 'aggregation' : self . _compile_aggregation_expression } etype = expr . etype if etype [ 0 ] not in etype2compiler : raise ValueError ( 'Expression type unknown: {}' . format ( etype ) ) with self . graph . as_default ( ) : compiler_fn = etype2compiler [ etype [ 0 ] ] return compiler_fn ( expr , scope , batch_size , noise ) | Compile the expression expr into a TensorFluent in the given scope with optional batch size . |
52,481 | def _compile_constant_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype = expr . etype args = expr . args dtype = utils . python_type_to_dtype ( etype [ 1 ] ) fluent = TensorFluent . constant ( args , dtype = dtype ) return fluent | Compile a constant expression expr into a TensorFluent in the given scope with optional batch size . |
52,482 | def _compile_pvariable_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype = expr . etype args = expr . args name = expr . _pvar_to_name ( args ) if name not in scope : raise ValueError ( 'Variable {} not in scope.' . format ( name ) ) fluent = scope [ name ] scope = args [ 1 ] if args [ 1 ] is not None else [ ] if isinstance ( fluent , TensorFluent ) : fluent = TensorFluent ( fluent . tensor , scope , batch = fluent . batch ) elif isinstance ( fluent , tf . Tensor ) : fluent = TensorFluent ( fluent , scope , batch = self . batch_mode ) else : raise ValueError ( 'Variable in scope must be TensorFluent-like: {}' . format ( fluent ) ) return fluent | Compile a pvariable expression expr into a TensorFluent in the given scope with optional batch size . |
52,483 | def _compile_random_variable_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype = expr . etype args = expr . args if etype [ 1 ] == 'KronDelta' : sample = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) elif etype [ 1 ] == 'Bernoulli' : mean = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) dist , sample = TensorFluent . Bernoulli ( mean , batch_size ) elif etype [ 1 ] == 'Uniform' : low = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) high = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) dist , sample = TensorFluent . Uniform ( low , high , batch_size ) elif etype [ 1 ] == 'Normal' : if noise is None : mean = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) variance = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) dist , sample = TensorFluent . Normal ( mean , variance , batch_size ) else : xi = noise . pop ( ) xi = TensorFluent ( xi , scope = [ ] , batch = True ) mean = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) variance = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) sample = mean + TensorFluent . sqrt ( variance ) * xi elif etype [ 1 ] == 'Laplace' : mean = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) variance = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) dist , sample = TensorFluent . Laplace ( mean , variance , batch_size ) elif etype [ 1 ] == 'Gamma' : shape = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) scale = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) dist , sample = TensorFluent . Gamma ( shape , scale , batch_size ) elif etype [ 1 ] == 'Exponential' : mean = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) dist , sample = TensorFluent . Exponential ( mean , batch_size ) else : raise ValueError ( 'Invalid random variable expression:\n{}.' . format ( expr ) ) return sample | Compile a random variable expression expr into a TensorFluent in the given scope with optional batch size . |
52,484 | def _compile_arithmetic_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype = expr . etype args = expr . args if len ( args ) == 1 : etype2op = { '+' : lambda x : x , '-' : lambda x : - x } if etype [ 1 ] not in etype2op : raise ValueError ( 'Invalid binary arithmetic expression:\n{}' . format ( expr ) ) op = etype2op [ etype [ 1 ] ] x = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) fluent = op ( x ) else : etype2op = { '+' : lambda x , y : x + y , '-' : lambda x , y : x - y , '*' : lambda x , y : x * y , '/' : lambda x , y : x / y , } if etype [ 1 ] not in etype2op : raise ValueError ( 'Invalid binary arithmetic expression:\n{}' . format ( expr ) ) op = etype2op [ etype [ 1 ] ] x = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) y = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) fluent = op ( x , y ) return fluent | Compile an arithmetic expression expr into a TensorFluent in the given scope with optional batch size . |
52,485 | def _compile_function_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype = expr . etype args = expr . args if len ( args ) == 1 : etype2func = { 'abs' : TensorFluent . abs , 'exp' : TensorFluent . exp , 'log' : TensorFluent . log , 'sqrt' : TensorFluent . sqrt , 'cos' : TensorFluent . cos , 'sin' : TensorFluent . sin , 'tan' : TensorFluent . tan , 'acos' : TensorFluent . acos , 'arccos' : TensorFluent . acos , 'asin' : TensorFluent . asin , 'arcsin' : TensorFluent . asin , 'atan' : TensorFluent . atan , 'arctan' : TensorFluent . atan , 'round' : TensorFluent . round , 'ceil' : TensorFluent . ceil , 'floor' : TensorFluent . floor } if etype [ 1 ] not in etype2func : raise ValueError ( 'Invalid unary function expression:\n{}' . format ( expr ) ) op = etype2func [ etype [ 1 ] ] x = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) fluent = op ( x ) else : etype2func = { 'pow' : TensorFluent . pow , 'max' : TensorFluent . max , 'min' : TensorFluent . min } if etype [ 1 ] not in etype2func : raise ValueError ( 'Invalid binary function expression:\n{}' . format ( expr ) ) op = etype2func [ etype [ 1 ] ] x = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) y = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) fluent = op ( x , y ) return fluent | Compile a function expression expr into a TensorFluent in the given scope with optional batch size . |
52,486 | def _compile_control_flow_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype = expr . etype args = expr . args if etype [ 1 ] == 'if' : condition = self . _compile_expression ( args [ 0 ] , scope , batch_size , noise ) true_case = self . _compile_expression ( args [ 1 ] , scope , batch_size , noise ) false_case = self . _compile_expression ( args [ 2 ] , scope , batch_size , noise ) fluent = TensorFluent . if_then_else ( condition , true_case , false_case ) else : raise ValueError ( 'Invalid control flow expression:\n{}' . format ( expr ) ) return fluent | Compile a control flow expression expr into a TensorFluent in the given scope with optional batch size . |
52,487 | def _compile_aggregation_expression ( self , expr : Expression , scope : Dict [ str , TensorFluent ] , batch_size : Optional [ int ] = None , noise : Optional [ List [ tf . Tensor ] ] = None ) -> TensorFluent : etype = expr . etype args = expr . args typed_var_list = args [ : - 1 ] vars_list = [ var for _ , ( var , _ ) in typed_var_list ] expr = args [ - 1 ] x = self . _compile_expression ( expr , scope ) etype2aggr = { 'sum' : x . sum , 'prod' : x . prod , 'avg' : x . avg , 'maximum' : x . maximum , 'minimum' : x . minimum , 'exists' : x . exists , 'forall' : x . forall } if etype [ 1 ] not in etype2aggr : raise ValueError ( 'Invalid aggregation expression {}.' . format ( expr ) ) aggr = etype2aggr [ etype [ 1 ] ] fluent = aggr ( vars_list = vars_list ) return fluent | Compile an aggregation expression expr into a TensorFluent in the given scope with optional batch size . |
52,488 | def variable_names ( self ) : if self . _variable_names is None : if self . _operator is None : if self . _operands is None : self . _variable_names = tuple ( ) else : self . _variable_names = self . _get_variable_names ( self . _operands ) elif self . _operator == 'NOT' : self . _variable_names = self . _operands . variable_names else : v = list ( ) for op in self . _operands : v . extend ( op . variable_names ) self . _variable_names = tuple ( set ( v ) ) return self . _variable_names | Get all variable names required for this query |
52,489 | def user ( self ) : try : return self . _user except AttributeError : self . _user = MatrixUser ( self . mxid , self . Api ( identity = self . mxid ) ) return self . _user | Creates a User object when requested . |
52,490 | def room ( self ) : try : return self . _room except AttributeError : room_id = self . json [ "room_id" ] self . _room = MatrixRoom ( room_id , self . Api ) return self . _room | Creates a Room object when requested . |
52,491 | def join ( self , room_str ) : response = self . user_api . join_room ( room_str ) return self . _mkroom ( response [ "room_id" ] ) | Joins room id or alias even if it must first be created . |
52,492 | def optional ( _object ) : if is_callable ( _object ) : validator = _object @ wraps ( validator ) def decorated ( value ) : if value : return validator ( value ) return return decorated else : def optional ( * args ) : return _object optional . is_optional = True optional . _object = _object return optional | This decorator has a double functionality it can wrap validators and make them optional or it can wrap keys and make that entry optional . |
52,493 | def set_walltime ( self , walltime ) : if not isinstance ( walltime , timedelta ) : raise TypeError ( 'walltime must be an instance of datetime.timedelta. %s given' % type ( walltime ) ) self . _options [ 'walltime' ] = walltime return self | Setting a walltime for the job |
52,494 | def get_url ( url , parser = 'html' ) : url = request . quote ( url , safe = ':/?=&' ) logger . debug ( 'URL: %s' , url ) req = request . Request ( url , headers = { 'User-Agent' : 'foobar' } ) try : response = request . urlopen ( req ) except HTTPError : raise except ( ssl . SSLError , URLError ) : context = ssl . SSLContext ( ssl . PROTOCOL_TLSv1 ) response = request . urlopen ( req , context = context ) response = response . read ( ) if parser == 'html' : return BeautifulSoup ( response , 'html.parser' , from_encoding = 'utf-8' ) elif parser == 'json' : return json . loads ( response ) elif parser == 'raw' : return response . decode ( ) raise ValueError ( 'Unrecognized parser' ) | Requests the specified url and returns a BeautifulSoup object with its contents . |
52,495 | def get_lastfm ( method , lastfm_key = '' , ** kwargs ) : if not lastfm_key : if 'lastfm_key' not in CONFIG or not CONFIG [ 'lastfm_key' ] : logger . warning ( 'No lastfm key configured' ) return '' else : lastfm_key = CONFIG [ 'lastfm_key' ] url = 'http://ws.audioscrobbler.com/2.0/?method={}&api_key={}&format=json' url = url . format ( method , lastfm_key ) for key in kwargs : url += '&{}={}' . format ( key , kwargs [ key ] ) response = get_url ( url , parser = 'json' ) if 'error' in response : logger . error ( 'Error number %d in lastfm query: %s' , response [ 'error' ] , response [ 'message' ] ) return '' return response | Request the specified method from the lastfm api . |
52,496 | def metrolyrics ( song ) : translate = { URLESCAPE : '' , ' ' : '-' } title = song . title . lower ( ) title = normalize ( title , translate ) title = re . sub ( r'\-{2,}' , '-' , title ) artist = song . artist . lower ( ) artist = normalize ( artist , translate ) artist = re . sub ( r'\-{2,}' , '-' , artist ) url = 'http://www.metrolyrics.com/{}-lyrics-{}.html' . format ( title , artist ) soup = get_url ( url ) body = soup . find ( id = 'lyrics-body-text' ) if body is None : return '' text = '' verses = body . find_all ( 'p' ) for verse in verses : text += verse . get_text ( ) . strip ( ) text += '\n\n' return text . strip ( ) | Returns the lyrics found in metrolyrics for the specified mp3 file or an empty string if not found . |
52,497 | def darklyrics ( song ) : if not hasattr ( song , 'album' ) or not song . album : song . fetch_album_name ( ) if not hasattr ( song , 'album' ) or not song . album : return '' artist = song . artist . lower ( ) artist = normalize ( artist , URLESCAPES , '' ) album = song . album . lower ( ) album = normalize ( album , URLESCAPES , '' ) title = song . title url = 'http://www.darklyrics.com/lyrics/{}/{}.html' . format ( artist , album ) soup = get_url ( url ) text = '' for header in soup . find_all ( 'h3' ) : song = str ( header . get_text ( ) ) next_sibling = header . next_sibling if song . lower ( ) . find ( title . lower ( ) ) != - 1 : while next_sibling is not None and ( next_sibling . name is None or next_sibling . name != 'h3' ) : if next_sibling . name is None : text += str ( next_sibling ) next_sibling = next_sibling . next_sibling return text . strip ( ) | Returns the lyrics found in darklyrics for the specified mp3 file or an empty string if not found . |
52,498 | def azlyrics ( song ) : artist = song . artist . lower ( ) if artist [ 0 : 2 ] == 'a ' : artist = artist [ 2 : ] artist = normalize ( artist , URLESCAPES , '' ) title = song . title . lower ( ) title = normalize ( title , URLESCAPES , '' ) url = 'https://www.azlyrics.com/lyrics/{}/{}.html' . format ( artist , title ) soup = get_url ( url ) body = soup . find_all ( 'div' , class_ = '' ) [ - 1 ] return body . get_text ( ) . strip ( ) | Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found . |
52,499 | def metalarchives ( song ) : artist = normalize ( song . artist ) title = normalize ( song . title ) url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs' url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1' soup = get_url ( url , parser = 'json' ) if not soup : return '' song_id_re = re . compile ( r'lyricsLink_([0-9]*)' ) ids = set ( re . search ( song_id_re , a ) for sub in soup [ 'aaData' ] for a in sub ) if not ids : return '' if None in ids : ids . remove ( None ) ids = map ( lambda a : a . group ( 1 ) , ids ) for song_id in ids : url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}' lyrics = get_url ( url . format ( song_id ) , parser = 'html' ) lyrics = lyrics . get_text ( ) . strip ( ) if not re . search ( 'lyrics not available' , lyrics ) : return lyrics return '' | Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.